Skip to content

Commit 7969cd8

Browse files
committed
Fix compiler warnings for NanoVDB
Signed-off-by: Matthew Cong <mcong@nvidia.com>
1 parent cb7040b commit 7969cd8

4 files changed

Lines changed: 24 additions & 24 deletions

File tree

nanovdb/nanovdb/GridHandle.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@ splitGrids(const GridHandle<BufferT> &handle, const BufferT* other = nullptr)
490490
h = HandleT(std::move(buffer));
491491
ptr = util::PtrAdd(ptr, src->mGridSize);
492492
}
493-
return std::move(handles);
493+
return handles;
494494
}// splitGrids
495495

496496
/// @brief Combines (or merges) multiple GridHandles into a single GridHandle containing all grids

nanovdb/nanovdb/cuda/GridHandle.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ splitGridHandles(const GridHandle<BufferT> &handle, const BufferT* other = nullp
6969
ptr = util::PtrAdd(ptr, handle.gridSize(n));
7070
}
7171
cudaCheck(util::cuda::freeAsync(d_dirty, stream));
72-
return std::move(handles);
72+
return handles;
7373
}// cuda::splitGridHandles
7474

7575
template<typename BufferT, template <class, class...> class VectorT>

nanovdb/nanovdb/unittest/TestMultiGPU.cu

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,9 @@ TEST(TestNanoVDBMultiGPU, ExclusiveSum)
4949
{
5050
// Set the input indices corresponding to the Fibbonacci sequence to be 1, rest 0
5151
input[0] = 1;
52-
int i = 0;
53-
int j = 1;
54-
int k = i + j;
52+
size_t i = 0;
53+
size_t j = 1;
54+
size_t k = i + j;
5555
while(k < input.size()) {
5656
input[k] = 1;
5757
i = j;
@@ -68,7 +68,7 @@ TEST(TestNanoVDBMultiGPU, ExclusiveSum)
6868
}
6969

7070
int accumulator = 0;
71-
for (auto i = 0; i < output.size(); ++i) {
71+
for (size_t i = 0; i < output.size(); ++i) {
7272
EXPECT_EQ(output[i], accumulator);
7373
accumulator += input[i];
7474
}
@@ -114,9 +114,9 @@ TEST(TestNanoVDBMultiGPU, InclusiveSum)
114114
{
115115
// Set the input indices corresponding to the Fibbonacci sequence to be 1, rest 0
116116
input[0] = 1;
117-
int i = 0;
118-
int j = 1;
119-
int k = i + j;
117+
size_t i = 0;
118+
size_t j = 1;
119+
size_t k = i + j;
120120
while(k < input.size()) {
121121
input[k] = 1;
122122
i = j;
@@ -133,7 +133,7 @@ TEST(TestNanoVDBMultiGPU, InclusiveSum)
133133
}
134134

135135
int accumulator = 0;
136-
for (auto i = 0; i < output.size(); ++i) {
136+
for (size_t i = 0; i < output.size(); ++i) {
137137
accumulator += input[i];
138138
EXPECT_EQ(output[i], accumulator);
139139
}

nanovdb/nanovdb/unittest/TestNanoVDB.cu

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ void device2host(size_t count)
4545
EXPECT_TRUE(buffer.deviceData());
4646
float *d_array = reinterpret_cast<float*>(buffer.deviceData());
4747
constexpr unsigned int num_threads = 256;
48-
unsigned int num_blocks = num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
48+
unsigned int num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
4949
nanovdb::util::cuda::lambdaKernel<<<num_blocks, num_threads>>>(count, [=] __device__ (size_t i) {d_array[i] = float(i);});
5050
buffer.deviceDownload();// copy device -> host
5151
EXPECT_EQ(size, buffer.size());
@@ -70,7 +70,7 @@ void host2device(size_t count)
7070
EXPECT_TRUE(devBuffer.deviceData());
7171
float *d_array = reinterpret_cast<float*>(devBuffer.deviceData());
7272
constexpr unsigned int num_threads = 256;
73-
unsigned int num_blocks = num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
73+
unsigned int num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
7474

7575
bool *test, *d_test;
7676
cudaCheck(cudaMallocHost((void**)&test, sizeof(bool)));
@@ -114,7 +114,7 @@ void host2device2host(size_t count)
114114
EXPECT_TRUE(buffer.deviceData());
115115
float *d_array = reinterpret_cast<float*>(buffer.deviceData());
116116
constexpr unsigned int num_threads = 256;
117-
unsigned int num_blocks = num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
117+
unsigned int num_blocks = (static_cast<unsigned int>(count) + num_threads - 1) / num_threads;
118118
nanovdb::util::cuda::lambdaKernel<<<num_blocks, num_threads>>>(count, [=] __device__ (size_t i) {
119119
if (d_array[i] != float(i)) *d_test = false;
120120
d_array[i] = float(i) + 1.0f;
@@ -295,7 +295,7 @@ TEST(TestNanoVDBCUDA, DeviceMesh)
295295
testKernel<<<1, 1, 0, stream>>>(device);
296296
}
297297
// Wait on each kernel to finish using indexing operation
298-
for (int i = 0; i < serialMesh.deviceCount(); ++i) {
298+
for (size_t i = 0; i < serialMesh.deviceCount(); ++i) {
299299
cudaSetDevice(serialMesh[i].id);
300300
cudaStreamSynchronize(serialMesh[i].stream);
301301
}
@@ -2747,7 +2747,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27472747
auto *upper1 = grid1->tree().getFirstUpper(), *upper2 = grid2->tree().getFirstUpper();
27482748
EXPECT_TRUE(upper1);
27492749
EXPECT_TRUE(upper2);
2750-
for (int i=0; i<grid1->tree().nodeCount(2); ++i) {
2750+
for (uint32_t i=0; i<grid1->tree().nodeCount(2); ++i) {
27512751
//std::cerr << "#" << i << " origin(CPU)=" << upper1[i].origin() << " origin(GPU)=" << upper2[i].origin() << std::endl;
27522752
EXPECT_EQ(upper1[i].origin(), upper2[i].origin());
27532753
EXPECT_EQ(upper1[i].valueMask(), upper2[i].valueMask());
@@ -2759,7 +2759,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27592759
auto *lower1 = grid1->tree().getFirstLower(), *lower2 = grid2->tree().getFirstLower();
27602760
EXPECT_TRUE(lower1);
27612761
EXPECT_TRUE(lower2);
2762-
for (int i=0; i<grid1->tree().nodeCount(1); ++i) {
2762+
for (uint32_t i=0; i<grid1->tree().nodeCount(1); ++i) {
27632763
EXPECT_EQ(lower1[i].origin(), lower2[i].origin());
27642764
EXPECT_EQ(lower1[i].valueMask(), lower2[i].valueMask());
27652765
EXPECT_EQ(lower1[i].childMask(), lower2[i].childMask());
@@ -2770,7 +2770,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27702770
auto *leaf1 = grid1->tree().getFirstLeaf(), *leaf2 = grid2->tree().getFirstLeaf();
27712771
EXPECT_TRUE(leaf1);
27722772
EXPECT_TRUE(leaf2);
2773-
for (int i=0; i<grid1->tree().nodeCount(0); ++i) {
2773+
for (uint32_t i=0; i<grid1->tree().nodeCount(0); ++i) {
27742774
EXPECT_EQ(leaf1[i].origin(), leaf2[i].origin());
27752775
EXPECT_EQ(leaf1[i].valueMask(), leaf2[i].valueMask());
27762776
}
@@ -3362,7 +3362,8 @@ TEST(TestNanoVDBCUDA, UnifiedBuffer_basic)
33623362
//buffer.deviceDownload(stream);
33633363
buffer.prefetch(0, size, cudaCpuDeviceId, stream);
33643364
nanovdb::util::Timer timer("Setting values on CPU with unified memory");
3365-
for (int i = 0, *x = buffer.data<int>(); i < N; i++) *x++ = 1;
3365+
int *x = buffer.data<int>();
3366+
for (size_t i = 0; i < N; i++) *x++ = 1;
33663367
timer.stop();
33673368
}
33683369
{// resize unified buffers
@@ -3383,13 +3384,13 @@ TEST(TestNanoVDBCUDA, UnifiedBuffer_basic)
33833384

33843385
EXPECT_EQ(CUDA_SUCCESS, cudaStreamSynchronize(stream));
33853386
int *x = buffer.data<int>();
3386-
for (int i = 0; i < N; ++i) EXPECT_EQ(1, *x++);
3387-
for (int i = 0; i < N; ++i) EXPECT_EQ(2, *x++);
3387+
for (size_t i = 0; i < N; ++i) EXPECT_EQ(1, *x++);
3388+
for (size_t i = 0; i < N; ++i) EXPECT_EQ(2, *x++);
33883389

33893390
nanovdb::cuda::UnifiedBuffer otherBuffer(std::move(buffer));
33903391
int *y = otherBuffer.data<int>();
3391-
for (int i = 0; i < N; ++i) EXPECT_EQ(1, *y++);
3392-
for (int i = 0; i < N; ++i) EXPECT_EQ(2, *y++);
3392+
for (size_t i = 0; i < N; ++i) EXPECT_EQ(1, *y++);
3393+
for (size_t i = 0; i < N; ++i) EXPECT_EQ(2, *y++);
33933394
}// UnifiedBuffer_basic
33943395

33953396
TEST(TestNanoVDBCUDA, UnifiedBuffer_IO)
@@ -3554,8 +3555,7 @@ TEST(TestNanoVDBCUDA, VoxelBlockManager_ValueOnIndex)
35543555
using StencilNeighborsType = uint64_t (*)[27];
35553556
auto stencilNeighbors = reinterpret_cast<StencilNeighborsType>(neighborStencilBuffer.data());
35563557
auto acc = grid->getAccessor();
3557-
for (int i = 0; i < voxels.size(); ++i) {
3558-
const auto& coord = voxels[i];
3558+
for (const auto& coord : voxels) {
35593559
const auto index = acc.getValue(coord);
35603560
for (int di = -1; di <= 1; di++)
35613561
for (int dj = -1; dj <= 1; dj++)

0 commit comments

Comments
 (0)