-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathget_backend.cpp
More file actions
64 lines (53 loc) · 1.69 KB
/
get_backend.cpp
File metadata and controls
64 lines (53 loc) · 1.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#include "agnocast/gpu_transfer_backend.hpp"
#include "cuda_ipc_backend.hpp"
#include "nvscibuf_backend.hpp"
#include "unified_memory_backend.hpp"
#include "vmm_backend.hpp"
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <memory>
namespace agnocast::cuda
{
namespace
{
std::unique_ptr<GpuTransferBackend> select_backend()
{
int device = 0;
cudaError_t err = cudaGetDevice(&device);
if (err != cudaSuccess) {
std::fprintf(
stderr, "[agnocast_cuda] FATAL: cudaGetDevice failed: %s\n", cudaGetErrorString(err));
std::abort();
}
int is_integrated = 0;
err = cudaDeviceGetAttribute(&is_integrated, cudaDevAttrIntegrated, device);
if (err != cudaSuccess) {
std::fprintf(
stderr, "[agnocast_cuda] FATAL: cudaDeviceGetAttribute failed: %s\n",
cudaGetErrorString(err));
std::abort();
}
if (!is_integrated) {
// Discrete GPU (GeForce, Quadro, Tesla, A/H series) — CUDA IPC is supported.
std::fprintf(stderr, "[agnocast_cuda] Discrete GPU detected, using CudaIpcBackend.\n");
return std::make_unique<CudaIpcBackend>();
}
// Integrated GPU (Jetson Xavier/Orin/Thor, DRIVE).
// TODO(agnocast): Implement and select the appropriate backend.
// - Jetson Thor (CUDA 13.0+): CudaIpcBackend may work via OpenRM.
// - Jetson Xavier/Orin: NvSciBufBackend or UnifiedMemoryBackend.
// - DRIVE: NvSciBufBackend.
std::fprintf(
stderr,
"[agnocast_cuda] FATAL: Integrated GPU detected (Jetson/DRIVE). "
"No backend is implemented yet for this platform.\n");
std::abort();
}
} // namespace
GpuTransferBackend & get_backend()
{
static auto instance = select_backend();
return *instance;
}
} // namespace agnocast::cuda