diff --git a/onnxruntime/core/providers/shared_library/provider_interfaces.h b/onnxruntime/core/providers/shared_library/provider_interfaces.h index 03f5e69720940..a1bb86598ebc0 100644 --- a/onnxruntime/core/providers/shared_library/provider_interfaces.h +++ b/onnxruntime/core/providers/shared_library/provider_interfaces.h @@ -178,7 +178,6 @@ struct ProviderHost { virtual std::string demangle(const char* name) = 0; virtual std::string demangle(const std::string& name) = 0; - // #ifdef USE_CUDA virtual std::unique_ptr CreateCUDAAllocator(int16_t device_id, const char* name) = 0; virtual std::unique_ptr CreateCUDAPinnedAllocator(const char* name) = 0; virtual std::unique_ptr CreateGPUDataTransfer() = 0; @@ -190,7 +189,6 @@ struct ProviderHost { virtual Status CudaCall_false(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) = 0; virtual void CudaCall_true(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) = 0; - // #endif #ifdef USE_MIGRAPHX virtual std::unique_ptr CreateMIGraphXAllocator(int16_t device_id, const char* name) = 0; @@ -1255,9 +1253,7 @@ struct ProviderHost { virtual training::DistributedRunContext& GetDistributedRunContextInstance() = 0; #endif - // #if defined(USE_CUDA) || defined(USE_ROCM) virtual PhiloxGenerator& PhiloxGenerator__Default() = 0; - // #endif #ifdef ENABLE_TRAINING_TORCH_INTEROP virtual void contrib__PythonOpBase__Init(contrib::PythonOpBase* p, const OpKernelInfo& info) = 0; diff --git a/onnxruntime/core/session/provider_bridge_ort.cc b/onnxruntime/core/session/provider_bridge_ort.cc index 29fc86226f27c..3a694ac6f8e5e 100644 --- a/onnxruntime/core/session/provider_bridge_ort.cc +++ b/onnxruntime/core/session/provider_bridge_ort.cc @@ -1559,9 +1559,7 @@ struct ProviderHostImpl : ProviderHost { training::DistributedRunContext& GetDistributedRunContextInstance() override { return training::DistributedRunContext::GetInstance(); } #endif - // #if defined(USE_CUDA) || defined(USE_ROCM) PhiloxGenerator& PhiloxGenerator__Default() override { return PhiloxGenerator::Default(); } - // #endif #ifdef ENABLE_TRAINING_TORCH_INTEROP void contrib__PythonOpBase__Init(contrib::PythonOpBase* p, const OpKernelInfo& info) override { p->PythonOpBase::Init(info); }