From 542e42d42ebdddb9db0f033b4e9849108c309425 Mon Sep 17 00:00:00 2001 From: Mengwei Liu Date: Fri, 12 Dec 2025 15:33:22 -0800 Subject: [PATCH] Avoid copying output from GPU to CPU --- .../ci_commit_pins/optimum-executorch.txt | 2 +- backends/aoti/aoti_delegate_handle.h | 2 + backends/cuda/runtime/cuda_backend.cpp | 134 ++++++++++++++++-- extension/asr/runner/CMakeLists.txt | 16 +++ extension/asr/runner/runner.cpp | 17 ++- 5 files changed, 155 insertions(+), 16 deletions(-) diff --git a/.ci/docker/ci_commit_pins/optimum-executorch.txt b/.ci/docker/ci_commit_pins/optimum-executorch.txt index 156ff2f3c82..2aea6eef8d6 100644 --- a/.ci/docker/ci_commit_pins/optimum-executorch.txt +++ b/.ci/docker/ci_commit_pins/optimum-executorch.txt @@ -1 +1 @@ -0123293118efb08ac4ffc4fefe9d330201465c93 +de4f3c4978b4d36cc0bb8f87c6877a4a040d7ae7 diff --git a/backends/aoti/aoti_delegate_handle.h b/backends/aoti/aoti_delegate_handle.h index 82ce2521750..b14e02da9ef 100644 --- a/backends/aoti/aoti_delegate_handle.h +++ b/backends/aoti/aoti_delegate_handle.h @@ -10,6 +10,7 @@ #include #include +#include namespace executorch { namespace backends { @@ -85,6 +86,7 @@ struct AOTIDelegateHandle { AOTInductorModelContainerHandle container_handle; void* cuda_stream; // cudaStream_t stored as void* to avoid CUDA header // dependency + std::string method_name; // Function pointers specific to this handle's shared library AOTInductorModelContainerCreateWithDeviceFunc create_with_device; diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index 0cef859ddfb..5e6bc3d3252 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -13,8 +13,10 @@ #include #include +#include #include #include +#include #include #include @@ -35,20 +37,55 @@ using executorch::runtime::ArrayRef; using executorch::runtime::Backend; using executorch::runtime::BackendExecutionContext; using executorch::runtime::BackendInitContext; +using executorch::runtime::BackendOption; +using executorch::runtime::BackendOptionContext; using executorch::runtime::CompileSpec; using executorch::runtime::DelegateHandle; using executorch::runtime::Error; using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; +using executorch::runtime::kMaxOptionValueLength; using executorch::runtime::MemoryAllocator; using executorch::runtime::NamedDataMap; using executorch::runtime::Result; using executorch::runtime::Span; using executorch::runtime::etensor::Tensor; +namespace { +constexpr char kSkipCopyOutputToCpuForMethod[] = + "skip_copy_output_to_cpu_for_method"; +} + class ET_EXPERIMENTAL CudaBackend final : public ::executorch::runtime::BackendInterface { private: + + void set_skip_copy_method( + const std::array& raw) { + std::lock_guard guard(skip_copy_method_mutex_); + skip_copy_method_ = std::string(raw.data()); + } + + std::array get_skip_copy_method_as_option() + const { + std::array out{}; + std::string value; + { + std::lock_guard guard(skip_copy_method_mutex_); + value = skip_copy_method_; + } + std::snprintf(out.data(), out.size(), "%s", value.c_str()); + return out; + } + + bool should_skip_copy_for_method(const std::string& method_name) const { + if (method_name.empty()) { + return false; + } + std::lock_guard guard(skip_copy_method_mutex_); + return method_name == skip_copy_method_; + } + Error load_function_pointers_into_handle( void* so_handle, AOTIDelegateHandle* handle) const { @@ -91,6 +128,38 @@ class ET_EXPERIMENTAL CudaBackend final return 1; } + Error set_option( + ET_UNUSED BackendOptionContext& context, + const executorch::runtime::Span& backend_options) + override { + for (const auto& option : backend_options) { + if (std::strcmp(option.key, kSkipCopyOutputToCpuForMethod) == 0) { + if (auto* val = std::get_if>( + &option.value)) { + set_skip_copy_method(*val); + } else { + ET_LOG( + Error, + "Option %s must be a method name string.", + kSkipCopyOutputToCpuForMethod); + return Error::InvalidArgument; + } + } + } + return Error::Ok; + } + + Error get_option( + ET_UNUSED BackendOptionContext& context, + executorch::runtime::Span& backend_options) override { + for (auto& option : backend_options) { + if (std::strcmp(option.key, kSkipCopyOutputToCpuForMethod) == 0) { + option.value = get_skip_copy_method_as_option(); + } + } + return Error::Ok; + } + // Once per loaded binary blob Result init( BackendInitContext& context, @@ -159,6 +228,7 @@ class ET_EXPERIMENTAL CudaBackend final AOTIDelegateHandle* handle = new AOTIDelegateHandle(); handle->so_handle = lib_handle; handle->so_path = so_path.string(); + handle->method_name = method_name; // Load function pointers specific to this handle's shared library ET_CHECK_OK_OR_RETURN_ERROR( @@ -222,9 +292,33 @@ class ET_EXPERIMENTAL CudaBackend final std::vector gpu_outputs( n_outputs); // GPU tensors for kernel output + // RAII helper to ensure GPU tensors are cleaned up on all exit paths. + // Prevents memory leaks when errors occur during execute(). + // TODO(larryliu0820): revisit this after SlimTensor migration, to see + // if this is still needed. + struct TensorCleanup { + std::vector& inputs; + std::vector& outputs; + + ~TensorCleanup() { + // Clean up input tensors + for (auto* handle : inputs) { + if (handle != nullptr) { + aoti_torch_delete_tensor_object(handle); + } + } + // Clean up output tensors + for (auto* handle : outputs) { + if (handle != nullptr) { + aoti_torch_delete_tensor_object(handle); + } + } + } + }; + TensorCleanup cleanup{gpu_inputs, gpu_outputs}; // Process input tensors: ExecuTorch provides CPU tensors, create GPU // copies - for (int i = 0; i < n_inputs; i++) { + for (size_t i = 0; i < n_inputs; i++) { // Get tensor dimensions and properties from ExecuTorch CPU tensor auto cpu_tensor = &(args[i]->toTensor()); auto sizes = cpu_tensor->sizes(); @@ -260,7 +354,7 @@ class ET_EXPERIMENTAL CudaBackend final } // Process output tensors: create GPU counterparts for ExecuTorch CPU // tensors - for (int i = 0; i < n_outputs; i++) { + for (size_t i = 0; i < n_outputs; i++) { // Get output tensor dimensions from ExecuTorch CPU tensor auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); auto sizes = cpu_output_tensor->sizes(); @@ -303,18 +397,26 @@ class ET_EXPERIMENTAL CudaBackend final "AOTInductorModelContainerRun failed with error code %d", error); - // Copy GPU output results back to CPU output tensors - for (int i = 0; i < n_outputs; i++) { - auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); - // For DYNAMIC_BOUND tensors we try to resize - ET_CHECK_OK_OR_RETURN_ERROR( - resize_tensor(*cpu_output_tensor, gpu_outputs[i]->sizes()), - "Error resizing tensor at output index %d", - i); - ET_CHECK_OK_OR_RETURN_ERROR( - aoti_torch_copy_(cpu_output_tensor, gpu_outputs[i], 0), - "Failed to copy GPU output %d back to CPU", - i); + const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); + + if (copy_outputs) { + // Copy GPU output results back to CPU output tensors + for (int i = 0; i < n_outputs; i++) { + auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); + // For DYNAMIC_BOUND tensors we try to resize + ET_CHECK_OK_OR_RETURN_ERROR( + resize_tensor(*cpu_output_tensor, gpu_outputs[i]->sizes()), + "Error resizing tensor at output index %d", + i); + ET_CHECK_OK_OR_RETURN_ERROR( + aoti_torch_copy_(cpu_output_tensor, gpu_outputs[i], 0), + "Failed to copy GPU output %d back to CPU", + i); + } + } else { + for (int i = 0; i < n_outputs; i++) { + args[i + n_inputs]->toTensor() = *gpu_outputs[i]; + } } return Error::Ok; @@ -365,6 +467,10 @@ class ET_EXPERIMENTAL CudaBackend final delete handle; clear_all_tensors(); } + + private: + mutable std::mutex skip_copy_method_mutex_; + std::string skip_copy_method_; }; } // namespace executorch::backends::cuda diff --git a/extension/asr/runner/CMakeLists.txt b/extension/asr/runner/CMakeLists.txt index cc9ba01596a..c3d77712017 100644 --- a/extension/asr/runner/CMakeLists.txt +++ b/extension/asr/runner/CMakeLists.txt @@ -35,6 +35,22 @@ set_target_properties( extension_asr_runner PROPERTIES POSITION_INDEPENDENT_CODE ON ) +# If the project is configured to build with CUDA support, try to find a CUDA +# runtime (prefer the CUDAToolkit package). If found, expose a compile-time +# macro so sources can conditionally compile CUDA-aware code. +if(EXECUTORCH_BUILD_CUDA) + find_package(CUDAToolkit QUIET) + if(CUDAToolkit_FOUND) + target_compile_definitions(extension_asr_runner PUBLIC CUDA_AVAILABLE) + message(STATUS "CUDAToolkit found; defining CUDA_AVAILABLE for ASR runner") + else() + message( + STATUS + "CUDA requested (EXECUTORCH_BUILD_CUDA=ON) but no CUDA runtime found" + ) + endif() +endif() + install( TARGETS extension_asr_runner EXPORT ExecuTorchTargets diff --git a/extension/asr/runner/runner.cpp b/extension/asr/runner/runner.cpp index 4f2523989c1..61eb7e0366f 100644 --- a/extension/asr/runner/runner.cpp +++ b/extension/asr/runner/runner.cpp @@ -107,7 +107,22 @@ Error AsrRunner::load() { ET_CHECK_OK_OR_RETURN_ERROR(module_->load_method(kDecoderMethodName)); decoder_method_loaded_ = true; - +#ifdef CUDA_AVAILABLE + executorch::runtime::BackendOptions<1> backend_options; + // For decoder still copy output from GPU to CPU for sampling. + // TODO: change sampler to use a CUDA kernel to sample and then skip copying + // decoder output as well + ET_CHECK_OK_OR_RETURN_ERROR(backend_options.set_option( + "skip_copy_output_to_cpu_for_method", kEncoderMethodName)); + const auto opt_err = + executorch::runtime::set_option("CudaBackend", backend_options.view()); + if (opt_err != ::executorch::runtime::Error::Ok) { + ET_LOG( + Warning, + "Failed to set CUDA backend options: %d", + static_cast(opt_err)); + } +#endif ET_CHECK_OK_OR_RETURN_ERROR(load_tokenizer()); auto eos_ids = get_eos_ids(tokenizer_.get(), module_.get()); if (!eos_ids.empty()) {