diff --git a/paddle/phi/api/lib/tensor_utils.cc b/paddle/phi/api/lib/tensor_utils.cc index aa62b2e7300c2c..1403df8ed68eb8 100644 --- a/paddle/phi/api/lib/tensor_utils.cc +++ b/paddle/phi/api/lib/tensor_utils.cc @@ -38,8 +38,12 @@ PADDLE_API phi::Place GetPlaceFromPtr(void* data) { #ifdef PADDLE_WITH_CUDA cudaPointerAttributes attr = {}; cudaError_t status = cudaPointerGetAttributes(&attr, data); - if (status == cudaSuccess && attr.type == cudaMemoryTypeDevice) { - return phi::GPUPlace(attr.device); + if (status == cudaSuccess) { + if (attr.type == cudaMemoryTypeDevice) { + return phi::GPUPlace(attr.device); + } else if (attr.type == cudaMemoryTypeHost) { + return phi::GPUPinnedPlace(); + } } #else hipPointerAttribute_t attr = {}; diff --git a/test/cpp/phi/api/test_from_blob.cc b/test/cpp/phi/api/test_from_blob.cc index a4680b1b94a061..ece961532e0ec4 100644 --- a/test/cpp/phi/api/test_from_blob.cc +++ b/test/cpp/phi/api/test_from_blob.cc @@ -107,6 +107,15 @@ TEST(GetPlaceFromPtr, GPU) { ASSERT_EQ(gpu1_data_place, phi::GPUPlace(1)); std::cout << "gpu1_data_place: " << gpu1_data_place << std::endl; } + + // Test GPUPinnedPlace (cudaMemoryTypeHost) + auto pinned_alloc_ptr = + paddle::GetAllocator(phi::GPUPinnedPlace())->Allocate(sizeof(cpu_data)); + float* pinned_data = static_cast(pinned_alloc_ptr->ptr()); + auto pinned_data_place = GetPlaceFromPtr(pinned_data); + ASSERT_EQ(pinned_data_place, phi::GPUPinnedPlace()); + std::cout << "pinned_data_place: " << pinned_data_place << std::endl; + pinned_alloc_ptr.release(); } TEST(from_blob, GPU) {