Skip to content

Commit 55d2064

Browse files
authored
Merge pull request #53 from makortel/cudauvm
[cudauvm] Move conditions to use managed memory
2 parents fb2653f + db094b5 commit 55d2064

10 files changed

+127
-188
lines changed

run-scan.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
n_events_unit = 1000
1313
n_blocks_per_stream = {
1414
"fwtest": 1,
15-
"cuda": {"": 400, "transfer": 350}
15+
"cuda": {"": 400, "transfer": 350},
16+
"cudauvm": {"": 400, "transfer": 350},
1617
}
1718

1819
result_re = re.compile("Processed (?P<events>\d+) events in (?P<time>\S+) seconds, throughput (?P<throughput>\S+) events/s")

src/cudauvm/CondFormats/PixelCPEFast.cc

+46-61
Original file line numberDiff line numberDiff line change
@@ -6,80 +6,65 @@
66

77
#include "Geometry/phase1PixelTopology.h"
88
#include "CUDACore/cudaCheck.h"
9+
#include "CUDACore/deviceCount.h"
10+
#include "CUDACore/ScopedSetDevice.h"
11+
#include "CUDACore/StreamCache.h"
912
#include "CondFormats/PixelCPEFast.h"
1013

11-
// Services
12-
// this is needed to get errors from templates
13-
14-
namespace {
15-
constexpr float micronsToCm = 1.0e-4;
16-
}
17-
1814
//-----------------------------------------------------------------------------
1915
//! The constructor.
2016
//-----------------------------------------------------------------------------
2117
PixelCPEFast::PixelCPEFast(std::string const &path) {
18+
unsigned int ndetParams;
19+
20+
cudaCheck(cudaMallocManaged(&m_params, sizeof(pixelCPEforGPU::ParamsOnGPU)));
21+
cudaCheck(cudaMallocManaged(&m_commonParams, sizeof(pixelCPEforGPU::CommonParams)));
22+
cudaCheck(cudaMallocManaged(&m_layerGeometry, sizeof(pixelCPEforGPU::LayerGeometry)));
23+
cudaCheck(cudaMallocManaged(&m_averageGeometry, sizeof(pixelCPEforGPU::AverageGeometry)));
24+
2225
{
2326
std::ifstream in(path, std::ios::binary);
2427
in.exceptions(std::ifstream::badbit | std::ifstream::failbit | std::ifstream::eofbit);
25-
in.read(reinterpret_cast<char *>(&m_commonParamsGPU), sizeof(pixelCPEforGPU::CommonParams));
26-
unsigned int ndetParams;
28+
in.read(reinterpret_cast<char *>(m_commonParams), sizeof(pixelCPEforGPU::CommonParams));
2729
in.read(reinterpret_cast<char *>(&ndetParams), sizeof(unsigned int));
28-
m_detParamsGPU.resize(ndetParams);
29-
in.read(reinterpret_cast<char *>(m_detParamsGPU.data()), ndetParams * sizeof(pixelCPEforGPU::DetParams));
30-
in.read(reinterpret_cast<char *>(&m_averageGeometry), sizeof(pixelCPEforGPU::AverageGeometry));
31-
in.read(reinterpret_cast<char *>(&m_layerGeometry), sizeof(pixelCPEforGPU::LayerGeometry));
30+
cudaCheck(cudaMallocManaged(&m_detParams, ndetParams * sizeof(pixelCPEforGPU::DetParams)));
31+
in.read(reinterpret_cast<char *>(m_detParams), ndetParams * sizeof(pixelCPEforGPU::DetParams));
32+
in.read(reinterpret_cast<char *>(m_averageGeometry), sizeof(pixelCPEforGPU::AverageGeometry));
33+
in.read(reinterpret_cast<char *>(m_layerGeometry), sizeof(pixelCPEforGPU::LayerGeometry));
3234
}
3335

34-
cpuData_ = {
35-
&m_commonParamsGPU,
36-
m_detParamsGPU.data(),
37-
&m_layerGeometry,
38-
&m_averageGeometry,
39-
};
40-
}
41-
42-
const pixelCPEforGPU::ParamsOnGPU *PixelCPEFast::getGPUProductAsync(cudaStream_t cudaStream) const {
43-
const auto &data = gpuData_.dataForCurrentDeviceAsync(cudaStream, [this](GPUData &data, cudaStream_t stream) {
44-
// and now copy to device...
45-
cudaCheck(cudaMalloc((void **)&data.h_paramsOnGPU.m_commonParams, sizeof(pixelCPEforGPU::CommonParams)));
46-
cudaCheck(cudaMalloc((void **)&data.h_paramsOnGPU.m_detParams,
47-
this->m_detParamsGPU.size() * sizeof(pixelCPEforGPU::DetParams)));
48-
cudaCheck(cudaMalloc((void **)&data.h_paramsOnGPU.m_averageGeometry, sizeof(pixelCPEforGPU::AverageGeometry)));
49-
cudaCheck(cudaMalloc((void **)&data.h_paramsOnGPU.m_layerGeometry, sizeof(pixelCPEforGPU::LayerGeometry)));
50-
cudaCheck(cudaMalloc((void **)&data.d_paramsOnGPU, sizeof(pixelCPEforGPU::ParamsOnGPU)));
36+
m_params->m_commonParams = m_commonParams;
37+
m_params->m_detParams = m_detParams;
38+
m_params->m_layerGeometry = m_layerGeometry;
39+
m_params->m_averageGeometry = m_averageGeometry;
5140

52-
cudaCheck(cudaMemcpyAsync(
53-
data.d_paramsOnGPU, &data.h_paramsOnGPU, sizeof(pixelCPEforGPU::ParamsOnGPU), cudaMemcpyDefault, stream));
54-
cudaCheck(cudaMemcpyAsync((void *)data.h_paramsOnGPU.m_commonParams,
55-
&this->m_commonParamsGPU,
56-
sizeof(pixelCPEforGPU::CommonParams),
57-
cudaMemcpyDefault,
58-
stream));
59-
cudaCheck(cudaMemcpyAsync((void *)data.h_paramsOnGPU.m_averageGeometry,
60-
&this->m_averageGeometry,
61-
sizeof(pixelCPEforGPU::AverageGeometry),
62-
cudaMemcpyDefault,
63-
stream));
64-
cudaCheck(cudaMemcpyAsync((void *)data.h_paramsOnGPU.m_layerGeometry,
65-
&this->m_layerGeometry,
66-
sizeof(pixelCPEforGPU::LayerGeometry),
67-
cudaMemcpyDefault,
68-
stream));
69-
cudaCheck(cudaMemcpyAsync((void *)data.h_paramsOnGPU.m_detParams,
70-
this->m_detParamsGPU.data(),
71-
this->m_detParamsGPU.size() * sizeof(pixelCPEforGPU::DetParams),
72-
cudaMemcpyDefault,
73-
stream));
74-
});
75-
return data.d_paramsOnGPU;
41+
for (int device = 0, ndev = cms::cuda::deviceCount(); device < ndev; ++device) {
42+
#ifndef CUDAUVM_DISABLE_ADVICE
43+
cudaCheck(cudaMemAdvise(m_params, sizeof(pixelCPEforGPU::ParamsOnGPU), cudaMemAdviseSetReadMostly, device));
44+
cudaCheck(cudaMemAdvise(m_commonParams, sizeof(pixelCPEforGPU::CommonParams), cudaMemAdviseSetReadMostly, device));
45+
cudaCheck(
46+
cudaMemAdvise(m_detParams, ndetParams * sizeof(pixelCPEforGPU::DetParams), cudaMemAdviseSetReadMostly, device));
47+
cudaCheck(
48+
cudaMemAdvise(m_layerGeometry, sizeof(pixelCPEforGPU::LayerGeometry), cudaMemAdviseSetReadMostly, device));
49+
cudaCheck(
50+
cudaMemAdvise(m_averageGeometry, sizeof(pixelCPEforGPU::AverageGeometry), cudaMemAdviseSetReadMostly, device));
51+
#endif
52+
#ifndef CUDAUVM_DISABLE_PREFETCH
53+
cms::cuda::ScopedSetDevice guard{device};
54+
auto stream = cms::cuda::getStreamCache().get();
55+
cudaCheck(cudaMemPrefetchAsync(m_params, sizeof(pixelCPEforGPU::ParamsOnGPU), device, stream.get()));
56+
cudaCheck(cudaMemPrefetchAsync(m_commonParams, sizeof(pixelCPEforGPU::CommonParams), device, stream.get()));
57+
cudaCheck(cudaMemPrefetchAsync(m_detParams, ndetParams * sizeof(pixelCPEforGPU::DetParams), device, stream.get()));
58+
cudaCheck(cudaMemPrefetchAsync(m_layerGeometry, sizeof(pixelCPEforGPU::LayerGeometry), device, stream.get()));
59+
cudaCheck(cudaMemPrefetchAsync(m_averageGeometry, sizeof(pixelCPEforGPU::AverageGeometry), device, stream.get()));
60+
#endif
61+
}
7662
}
7763

78-
PixelCPEFast::GPUData::~GPUData() {
79-
if (d_paramsOnGPU != nullptr) {
80-
cudaFree((void *)h_paramsOnGPU.m_commonParams);
81-
cudaFree((void *)h_paramsOnGPU.m_detParams);
82-
cudaFree((void *)h_paramsOnGPU.m_averageGeometry);
83-
cudaFree(d_paramsOnGPU);
84-
}
64+
PixelCPEFast::~PixelCPEFast() {
65+
cudaFree(m_params);
66+
cudaFree(m_commonParams);
67+
cudaFree(m_detParams);
68+
cudaFree(m_layerGeometry);
69+
cudaFree(m_averageGeometry);
8570
}

src/cudauvm/CondFormats/PixelCPEFast.h

+8-25
Original file line numberDiff line numberDiff line change
@@ -9,35 +9,18 @@
99

1010
class PixelCPEFast {
1111
public:
12-
PixelCPEFast(std::string const &path);
12+
PixelCPEFast(std::string const& path);
1313

14-
~PixelCPEFast() = default;
14+
~PixelCPEFast();
1515

16-
// The return value can only be used safely in kernels launched on
17-
// the same cudaStream, or after cudaStreamSynchronize.
18-
const pixelCPEforGPU::ParamsOnGPU *getGPUProductAsync(cudaStream_t cudaStream) const;
19-
20-
pixelCPEforGPU::ParamsOnGPU const &getCPUProduct() const { return cpuData_; }
16+
pixelCPEforGPU::ParamsOnGPU const* get() const { return m_params; }
2117

2218
private:
23-
// allocate it with posix malloc to be ocmpatible with cpu wf
24-
std::vector<pixelCPEforGPU::DetParams> m_detParamsGPU;
25-
// std::vector<pixelCPEforGPU::DetParams, CUDAHostAllocator<pixelCPEforGPU::DetParams>> m_detParamsGPU;
26-
pixelCPEforGPU::CommonParams m_commonParamsGPU;
27-
pixelCPEforGPU::LayerGeometry m_layerGeometry;
28-
pixelCPEforGPU::AverageGeometry m_averageGeometry;
29-
30-
pixelCPEforGPU::ParamsOnGPU cpuData_;
31-
32-
struct GPUData {
33-
~GPUData();
34-
// not needed if not used on CPU...
35-
pixelCPEforGPU::ParamsOnGPU h_paramsOnGPU;
36-
pixelCPEforGPU::ParamsOnGPU *d_paramsOnGPU = nullptr; // copy of the above on the Device
37-
};
38-
cms::cuda::ESProduct<GPUData> gpuData_;
39-
40-
void fillParamsForGpu();
19+
pixelCPEforGPU::ParamsOnGPU* m_params = nullptr;
20+
pixelCPEforGPU::CommonParams* m_commonParams = nullptr;
21+
pixelCPEforGPU::DetParams* m_detParams = nullptr;
22+
pixelCPEforGPU::LayerGeometry* m_layerGeometry = nullptr;
23+
pixelCPEforGPU::AverageGeometry* m_averageGeometry = nullptr;
4124
};
4225

4326
#endif // RecoLocalTracker_SiPixelRecHits_PixelCPEFast_h

src/cudauvm/CondFormats/SiPixelFedCablingMapGPUWrapper.cc

+24-37
Original file line numberDiff line numberDiff line change
@@ -9,46 +9,33 @@
99

1010
// CMSSW includes
1111
#include "CUDACore/cudaCheck.h"
12-
#include "CUDACore/device_unique_ptr.h"
13-
#include "CUDACore/host_unique_ptr.h"
12+
#include "CUDACore/deviceCount.h"
13+
#include "CUDACore/ScopedSetDevice.h"
14+
#include "CUDACore/StreamCache.h"
1415
#include "CondFormats/SiPixelFedCablingMapGPUWrapper.h"
1516

1617
SiPixelFedCablingMapGPUWrapper::SiPixelFedCablingMapGPUWrapper(SiPixelFedCablingMapGPU const& cablingMap,
17-
std::vector<unsigned char> modToUnp)
18-
: modToUnpDefault(modToUnp.size()), hasQuality_(true) {
19-
cudaCheck(cudaMallocHost(&cablingMapHost, sizeof(SiPixelFedCablingMapGPU)));
20-
std::memcpy(cablingMapHost, &cablingMap, sizeof(SiPixelFedCablingMapGPU));
21-
22-
std::copy(modToUnp.begin(), modToUnp.end(), modToUnpDefault.begin());
18+
std::vector<unsigned char> const& modToUnp)
19+
: hasQuality_(true) {
20+
cudaCheck(cudaMallocManaged(&cablingMap_, sizeof(SiPixelFedCablingMapGPU)));
21+
*cablingMap_ = cablingMap;
22+
cudaCheck(cudaMallocManaged(&modToUnpDefault_, modToUnp.size()));
23+
std::copy(modToUnp.begin(), modToUnp.end(), modToUnpDefault_);
24+
for (int device = 0, ndev = cms::cuda::deviceCount(); device < ndev; ++device) {
25+
#ifndef CUDAUVM_DISABLE_ADVICE
26+
cudaCheck(cudaMemAdvise(cablingMap_, sizeof(SiPixelFedCablingMapGPU), cudaMemAdviseSetReadMostly, device));
27+
cudaCheck(cudaMemAdvise(modToUnpDefault_, sizeof(modToUnp.size()), cudaMemAdviseSetReadMostly, device));
28+
#endif
29+
#ifndef CUDAUVM_DISABLE_PREFETCH
30+
cms::cuda::ScopedSetDevice guard{device};
31+
auto stream = cms::cuda::getStreamCache().get();
32+
cudaCheck(cudaMemPrefetchAsync(cablingMap_, sizeof(SiPixelFedCablingMapGPU), device, stream.get()));
33+
cudaCheck(cudaMemPrefetchAsync(modToUnpDefault_, modToUnp.size(), device, stream.get()));
34+
#endif
35+
}
2336
}
2437

25-
SiPixelFedCablingMapGPUWrapper::~SiPixelFedCablingMapGPUWrapper() { cudaCheck(cudaFreeHost(cablingMapHost)); }
26-
27-
const SiPixelFedCablingMapGPU* SiPixelFedCablingMapGPUWrapper::getGPUProductAsync(cudaStream_t cudaStream) const {
28-
const auto& data = gpuData_.dataForCurrentDeviceAsync(cudaStream, [this](GPUData& data, cudaStream_t stream) {
29-
// allocate
30-
cudaCheck(cudaMalloc(&data.cablingMapDevice, sizeof(SiPixelFedCablingMapGPU)));
31-
32-
// transfer
33-
cudaCheck(cudaMemcpyAsync(
34-
data.cablingMapDevice, this->cablingMapHost, sizeof(SiPixelFedCablingMapGPU), cudaMemcpyDefault, stream));
35-
});
36-
return data.cablingMapDevice;
38+
SiPixelFedCablingMapGPUWrapper::~SiPixelFedCablingMapGPUWrapper() {
39+
cudaCheck(cudaFree(cablingMap_));
40+
cudaCheck(cudaFree(modToUnpDefault_));
3741
}
38-
39-
const unsigned char* SiPixelFedCablingMapGPUWrapper::getModToUnpAllAsync(cudaStream_t cudaStream) const {
40-
const auto& data =
41-
modToUnp_.dataForCurrentDeviceAsync(cudaStream, [this](ModulesToUnpack& data, cudaStream_t stream) {
42-
cudaCheck(cudaMalloc((void**)&data.modToUnpDefault, pixelgpudetails::MAX_SIZE_BYTE_BOOL));
43-
cudaCheck(cudaMemcpyAsync(data.modToUnpDefault,
44-
this->modToUnpDefault.data(),
45-
this->modToUnpDefault.size() * sizeof(unsigned char),
46-
cudaMemcpyDefault,
47-
stream));
48-
});
49-
return data.modToUnpDefault;
50-
}
51-
52-
SiPixelFedCablingMapGPUWrapper::GPUData::~GPUData() { cudaCheck(cudaFree(cablingMapDevice)); }
53-
54-
SiPixelFedCablingMapGPUWrapper::ModulesToUnpack::~ModulesToUnpack() { cudaCheck(cudaFree(modToUnpDefault)); }

src/cudauvm/CondFormats/SiPixelFedCablingMapGPUWrapper.h

+6-20
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22
#define RecoLocalTracker_SiPixelClusterizer_SiPixelFedCablingMapGPUWrapper_h
33

44
#include "CUDACore/ESProduct.h"
5-
#include "CUDACore/CUDAHostAllocator.h"
6-
#include "CUDACore/device_unique_ptr.h"
75
#include "CondFormats/SiPixelFedCablingMapGPU.h"
86

97
#include <cuda_runtime.h>
@@ -12,35 +10,23 @@
1210

1311
class SiPixelFedCablingMapGPUWrapper {
1412
public:
15-
explicit SiPixelFedCablingMapGPUWrapper(SiPixelFedCablingMapGPU const &cablingMap,
16-
std::vector<unsigned char> modToUnp);
13+
explicit SiPixelFedCablingMapGPUWrapper(SiPixelFedCablingMapGPU const& cablingMap,
14+
std::vector<unsigned char> const& modToUnp);
1715
~SiPixelFedCablingMapGPUWrapper();
1816

1917
bool hasQuality() const { return hasQuality_; }
2018

2119
// returns pointer to GPU memory
22-
const SiPixelFedCablingMapGPU *getGPUProductAsync(cudaStream_t cudaStream) const;
20+
const SiPixelFedCablingMapGPU* cablingMap() const { return cablingMap_; }
2321

2422
// returns pointer to GPU memory
25-
const unsigned char *getModToUnpAllAsync(cudaStream_t cudaStream) const;
23+
const unsigned char* modToUnpAll() const { return modToUnpDefault_; }
2624

2725
private:
28-
std::vector<unsigned char, CUDAHostAllocator<unsigned char>> modToUnpDefault;
2926
bool hasQuality_;
3027

31-
SiPixelFedCablingMapGPU *cablingMapHost = nullptr; // pointer to struct in CPU
32-
33-
struct GPUData {
34-
~GPUData();
35-
SiPixelFedCablingMapGPU *cablingMapDevice = nullptr; // pointer to struct in GPU
36-
};
37-
cms::cuda::ESProduct<GPUData> gpuData_;
38-
39-
struct ModulesToUnpack {
40-
~ModulesToUnpack();
41-
unsigned char *modToUnpDefault = nullptr; // pointer to GPU
42-
};
43-
cms::cuda::ESProduct<ModulesToUnpack> modToUnp_;
28+
SiPixelFedCablingMapGPU* cablingMap_ = nullptr;
29+
unsigned char* modToUnpDefault_ = nullptr;
4430
};
4531

4632
#endif
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,37 @@
1+
#include <cstring>
2+
13
#include <cuda.h>
24

35
#include "CondFormats/SiPixelGainCalibrationForHLTGPU.h"
46
#include "CondFormats/SiPixelGainForHLTonGPU.h"
57
#include "CUDACore/cudaCheck.h"
8+
#include "CUDACore/deviceCount.h"
9+
#include "CUDACore/ScopedSetDevice.h"
10+
#include "CUDACore/StreamCache.h"
611

712
SiPixelGainCalibrationForHLTGPU::SiPixelGainCalibrationForHLTGPU(SiPixelGainForHLTonGPU const& gain,
8-
std::vector<char> gainData)
9-
: gainData_(std::move(gainData)) {
10-
cudaCheck(cudaMallocHost(&gainForHLTonHost_, sizeof(SiPixelGainForHLTonGPU)));
11-
*gainForHLTonHost_ = gain;
12-
}
13+
std::vector<char> const& gainData) {
14+
cudaCheck(cudaMallocManaged(&gainForHLT_, sizeof(SiPixelGainForHLTonGPU)));
15+
*gainForHLT_ = gain;
16+
cudaCheck(cudaMallocManaged(&gainData_, gainData.size()));
17+
gainForHLT_->v_pedestals = gainData_;
1318

14-
SiPixelGainCalibrationForHLTGPU::~SiPixelGainCalibrationForHLTGPU() { cudaCheck(cudaFreeHost(gainForHLTonHost_)); }
15-
16-
SiPixelGainCalibrationForHLTGPU::GPUData::~GPUData() {
17-
cudaCheck(cudaFree(gainForHLTonGPU));
18-
cudaCheck(cudaFree(gainDataOnGPU));
19+
std::memcpy(gainData_, gainData.data(), gainData.size());
20+
for (int device = 0, ndev = cms::cuda::deviceCount(); device < ndev; ++device) {
21+
#ifndef CUDAUVM_DISABLE_ADVICE
22+
cudaCheck(cudaMemAdvise(gainForHLT_, sizeof(SiPixelGainForHLTonGPU), cudaMemAdviseSetReadMostly, device));
23+
cudaCheck(cudaMemAdvise(gainData_, gainData.size(), cudaMemAdviseSetReadMostly, device));
24+
#endif
25+
#ifndef CUDAUVM_DISABLE_PREFETCH
26+
cms::cuda::ScopedSetDevice guard{device};
27+
auto stream = cms::cuda::getStreamCache().get();
28+
cudaCheck(cudaMemPrefetchAsync(gainForHLT_, sizeof(SiPixelGainForHLTonGPU), device, stream.get()));
29+
cudaCheck(cudaMemPrefetchAsync(gainData_, gainData.size(), device, stream.get()));
30+
#endif
31+
}
1932
}
2033

21-
const SiPixelGainForHLTonGPU* SiPixelGainCalibrationForHLTGPU::getGPUProductAsync(cudaStream_t cudaStream) const {
22-
const auto& data = gpuData_.dataForCurrentDeviceAsync(cudaStream, [this](GPUData& data, cudaStream_t stream) {
23-
cudaCheck(cudaMalloc((void**)&data.gainForHLTonGPU, sizeof(SiPixelGainForHLTonGPU)));
24-
cudaCheck(cudaMalloc((void**)&data.gainDataOnGPU, this->gainData_.size()));
25-
// gains.data().data() is used also for non-GPU code, we cannot allocate it on aligned and write-combined memory
26-
cudaCheck(
27-
cudaMemcpyAsync(data.gainDataOnGPU, this->gainData_.data(), this->gainData_.size(), cudaMemcpyDefault, stream));
28-
29-
cudaCheck(cudaMemcpyAsync(
30-
data.gainForHLTonGPU, this->gainForHLTonHost_, sizeof(SiPixelGainForHLTonGPU), cudaMemcpyDefault, stream));
31-
cudaCheck(cudaMemcpyAsync(&(data.gainForHLTonGPU->v_pedestals),
32-
&(data.gainDataOnGPU),
33-
sizeof(SiPixelGainForHLTonGPU_DecodingStructure*),
34-
cudaMemcpyDefault,
35-
stream));
36-
});
37-
return data.gainForHLTonGPU;
34+
SiPixelGainCalibrationForHLTGPU::~SiPixelGainCalibrationForHLTGPU() {
35+
cudaCheck(cudaFree(gainForHLT_));
36+
cudaCheck(cudaFree(gainData_));
3837
}

src/cudauvm/CondFormats/SiPixelGainCalibrationForHLTGPU.h

+4-11
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,14 @@ struct SiPixelGainForHLTonGPU_DecodingStructure;
88

99
class SiPixelGainCalibrationForHLTGPU {
1010
public:
11-
explicit SiPixelGainCalibrationForHLTGPU(SiPixelGainForHLTonGPU const &gain, std::vector<char> gainData);
11+
explicit SiPixelGainCalibrationForHLTGPU(SiPixelGainForHLTonGPU const& gain, std::vector<char> const& gainData);
1212
~SiPixelGainCalibrationForHLTGPU();
1313

14-
const SiPixelGainForHLTonGPU *getGPUProductAsync(cudaStream_t cudaStream) const;
15-
const SiPixelGainForHLTonGPU *getCPUProduct() const { return gainForHLTonHost_; }
14+
const SiPixelGainForHLTonGPU* get() const { return gainForHLT_; }
1615

1716
private:
18-
SiPixelGainForHLTonGPU *gainForHLTonHost_ = nullptr;
19-
std::vector<char> gainData_;
20-
struct GPUData {
21-
~GPUData();
22-
SiPixelGainForHLTonGPU *gainForHLTonGPU = nullptr;
23-
SiPixelGainForHLTonGPU_DecodingStructure *gainDataOnGPU = nullptr;
24-
};
25-
cms::cuda::ESProduct<GPUData> gpuData_;
17+
SiPixelGainForHLTonGPU* gainForHLT_ = nullptr;
18+
SiPixelGainForHLTonGPU_DecodingStructure* gainData_ = nullptr;
2619
};
2720

2821
#endif // CalibTracker_SiPixelESProducers_interface_SiPixelGainCalibrationForHLTGPU_h

0 commit comments

Comments
 (0)