Skip to content
76 changes: 76 additions & 0 deletions test/TensorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,23 @@
#include <ATen/core/Tensor.h>
#include <ATen/ops/ones.h>
#include <gtest/gtest.h>
#if !USE_PADDLE_API
#include <torch/all.h>
#endif

#include <string>
#include <vector>
#if USE_PADDLE_API
#include "paddle/phi/api/include/tensor.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

去掉额外的头文件,和torch保持完全一致

#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/memory/malloc.h"
namespace phi {
inline std::ostream& operator<<(std::ostream& os, AllocationType type) {
return os << static_cast<int>(type);
}
} // namespace phi
#endif

#include "../src/file_manager.h"

Expand Down Expand Up @@ -213,5 +226,68 @@ TEST_F(TensorTest, Transpose) {
file.saveFile();
}

// 返回当前用例的结果文件名(用于逐个用例对比)
static std::string GetTestCaseResultFileName() {
std::string base = g_custom_param.get();
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
if (base.size() >= 4 && base.substr(base.size() - 4) == ".txt") {
base.resize(base.size() - 4);
}
return base + "_" + test_name + ".txt";
}

// 测试 cuda
TEST_F(TensorTest, CudaResult) {
FileManerger file(GetTestCaseResultFileName());
file.createFile();
try {
at::Tensor cuda_tensor = tensor.cuda();
file << "1 ";
file << std::to_string(static_cast<int>(cuda_tensor.device().type()))
<< " ";
file << std::to_string(cuda_tensor.is_cuda() ? 1 : 0) << " ";
file << std::to_string(cuda_tensor.numel()) << " ";
} catch (const std::exception&) {
file << "0 ";
} catch (...) {
file << "0 ";
}
file.saveFile();
}

// 测试 is_pinned
TEST_F(TensorTest, IsPinnedResult) {
FileManerger file(GetTestCaseResultFileName());
file.createFile();
file << std::to_string(tensor.is_pinned() ? 1 : 0) << " ";
int pinned_after_cuda = 0;
try {
at::Tensor cuda_tensor = tensor.cuda();
at::Tensor pinned_tensor = cuda_tensor.pin_memory();
pinned_after_cuda = pinned_tensor.is_pinned() ? 1 : 0;
} catch (...) {
pinned_after_cuda = 0;
}
file << std::to_string(pinned_after_cuda) << " ";
file.saveFile();
}

// 测试 pin_memory
TEST_F(TensorTest, PinMemoryResult) {
FileManerger file(GetTestCaseResultFileName());
file.createFile();
int gpu_pin_ok = 0;
try {
at::Tensor cuda_tensor = tensor.cuda();
at::Tensor pinned_tensor = cuda_tensor.pin_memory();
gpu_pin_ok = pinned_tensor.is_pinned() ? 1 : 0;
} catch (...) {
gpu_pin_ok = 0;
}
file << std::to_string(gpu_pin_ok) << " ";
file.saveFile();
}

} // namespace test
} // namespace at