@@ -45,7 +45,7 @@ void device2host(size_t count)
4545 EXPECT_TRUE (buffer.deviceData ());
4646 float *d_array = reinterpret_cast <float *>(buffer.deviceData ());
4747 constexpr unsigned int num_threads = 256 ;
48- unsigned int num_blocks = num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
48+ unsigned int num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
4949 nanovdb::util::cuda::lambdaKernel<<<num_blocks, num_threads>>> (count, [=] __device__ (size_t i) {d_array[i] = float (i);});
5050 buffer.deviceDownload ();// copy device -> host
5151 EXPECT_EQ (size, buffer.size ());
@@ -70,7 +70,7 @@ void host2device(size_t count)
7070 EXPECT_TRUE (devBuffer.deviceData ());
7171 float *d_array = reinterpret_cast <float *>(devBuffer.deviceData ());
7272 constexpr unsigned int num_threads = 256 ;
73- unsigned int num_blocks = num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
73+ unsigned int num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
7474
7575 bool *test, *d_test;
7676 cudaCheck (cudaMallocHost ((void **)&test, sizeof (bool )));
@@ -114,7 +114,7 @@ void host2device2host(size_t count)
114114 EXPECT_TRUE (buffer.deviceData ());
115115 float *d_array = reinterpret_cast <float *>(buffer.deviceData ());
116116 constexpr unsigned int num_threads = 256 ;
117- unsigned int num_blocks = num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
117+ unsigned int num_blocks = (static_cast <unsigned int >(count) + num_threads - 1 ) / num_threads;
118118 nanovdb::util::cuda::lambdaKernel<<<num_blocks, num_threads>>> (count, [=] __device__ (size_t i) {
119119 if (d_array[i] != float (i)) *d_test = false ;
120120 d_array[i] = float (i) + 1 .0f ;
@@ -295,7 +295,7 @@ TEST(TestNanoVDBCUDA, DeviceMesh)
295295 testKernel<<<1 , 1 , 0 , stream>>> (device);
296296 }
297297 // Wait on each kernel to finish using indexing operation
298- for (int i = 0 ; i < serialMesh.deviceCount (); ++i) {
298+ for (size_t i = 0 ; i < serialMesh.deviceCount (); ++i) {
299299 cudaSetDevice (serialMesh[i].id );
300300 cudaStreamSynchronize (serialMesh[i].stream );
301301 }
@@ -2747,7 +2747,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27472747 auto *upper1 = grid1->tree ().getFirstUpper (), *upper2 = grid2->tree ().getFirstUpper ();
27482748 EXPECT_TRUE (upper1);
27492749 EXPECT_TRUE (upper2);
2750- for (int i=0 ; i<grid1->tree ().nodeCount (2 ); ++i) {
2750+ for (uint32_t i=0 ; i<grid1->tree ().nodeCount (2 ); ++i) {
27512751 // std::cerr << "#" << i << " origin(CPU)=" << upper1[i].origin() << " origin(GPU)=" << upper2[i].origin() << std::endl;
27522752 EXPECT_EQ (upper1[i].origin (), upper2[i].origin ());
27532753 EXPECT_EQ (upper1[i].valueMask (), upper2[i].valueMask ());
@@ -2759,7 +2759,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27592759 auto *lower1 = grid1->tree ().getFirstLower (), *lower2 = grid2->tree ().getFirstLower ();
27602760 EXPECT_TRUE (lower1);
27612761 EXPECT_TRUE (lower2);
2762- for (int i=0 ; i<grid1->tree ().nodeCount (1 ); ++i) {
2762+ for (uint32_t i=0 ; i<grid1->tree ().nodeCount (1 ); ++i) {
27632763 EXPECT_EQ (lower1[i].origin (), lower2[i].origin ());
27642764 EXPECT_EQ (lower1[i].valueMask (), lower2[i].valueMask ());
27652765 EXPECT_EQ (lower1[i].childMask (), lower2[i].childMask ());
@@ -2770,7 +2770,7 @@ TEST(TestNanoVDBCUDA, compareNodeOrdering)
27702770 auto *leaf1 = grid1->tree ().getFirstLeaf (), *leaf2 = grid2->tree ().getFirstLeaf ();
27712771 EXPECT_TRUE (leaf1);
27722772 EXPECT_TRUE (leaf2);
2773- for (int i=0 ; i<grid1->tree ().nodeCount (0 ); ++i) {
2773+ for (uint32_t i=0 ; i<grid1->tree ().nodeCount (0 ); ++i) {
27742774 EXPECT_EQ (leaf1[i].origin (), leaf2[i].origin ());
27752775 EXPECT_EQ (leaf1[i].valueMask (), leaf2[i].valueMask ());
27762776 }
@@ -3362,7 +3362,8 @@ TEST(TestNanoVDBCUDA, UnifiedBuffer_basic)
33623362 // buffer.deviceDownload(stream);
33633363 buffer.prefetch (0 , size, cudaCpuDeviceId, stream);
33643364 nanovdb::util::Timer timer (" Setting values on CPU with unified memory" );
3365- for (int i = 0 , *x = buffer.data <int >(); i < N; i++) *x++ = 1 ;
3365+ int *x = buffer.data <int >();
3366+ for (size_t i = 0 ; i < N; i++) *x++ = 1 ;
33663367 timer.stop ();
33673368 }
33683369 {// resize unified buffers
@@ -3383,13 +3384,13 @@ TEST(TestNanoVDBCUDA, UnifiedBuffer_basic)
33833384
33843385 EXPECT_EQ (CUDA_SUCCESS, cudaStreamSynchronize (stream));
33853386 int *x = buffer.data <int >();
3386- for (int i = 0 ; i < N; ++i) EXPECT_EQ (1 , *x++);
3387- for (int i = 0 ; i < N; ++i) EXPECT_EQ (2 , *x++);
3387+ for (size_t i = 0 ; i < N; ++i) EXPECT_EQ (1 , *x++);
3388+ for (size_t i = 0 ; i < N; ++i) EXPECT_EQ (2 , *x++);
33883389
33893390 nanovdb::cuda::UnifiedBuffer otherBuffer (std::move (buffer));
33903391 int *y = otherBuffer.data <int >();
3391- for (int i = 0 ; i < N; ++i) EXPECT_EQ (1 , *y++);
3392- for (int i = 0 ; i < N; ++i) EXPECT_EQ (2 , *y++);
3392+ for (size_t i = 0 ; i < N; ++i) EXPECT_EQ (1 , *y++);
3393+ for (size_t i = 0 ; i < N; ++i) EXPECT_EQ (2 , *y++);
33933394}// UnifiedBuffer_basic
33943395
33953396TEST (TestNanoVDBCUDA, UnifiedBuffer_IO)
@@ -3554,8 +3555,7 @@ TEST(TestNanoVDBCUDA, VoxelBlockManager_ValueOnIndex)
35543555 using StencilNeighborsType = uint64_t (*)[27 ];
35553556 auto stencilNeighbors = reinterpret_cast <StencilNeighborsType>(neighborStencilBuffer.data ());
35563557 auto acc = grid->getAccessor ();
3557- for (int i = 0 ; i < voxels.size (); ++i) {
3558- const auto & coord = voxels[i];
3558+ for (const auto & coord : voxels) {
35593559 const auto index = acc.getValue (coord);
35603560 for (int di = -1 ; di <= 1 ; di++)
35613561 for (int dj = -1 ; dj <= 1 ; dj++)
0 commit comments