@@ -76,12 +76,12 @@ inline uint64_t rotl64(uint64_t x, int8_t r)
7676// Block read - if your platform needs to do endian-swapping or can only
7777// handle aligned reads, do the conversion here
7878
79- FORCE_INLINE uint32_t getblock32 (const uint32_t * p, int i)
79+ FORCE_INLINE uint32_t getblock32 (const uint32_t * p, uint32_t i)
8080{
8181 return p[i];
8282}
8383
84- FORCE_INLINE uint64_t getblock64 (const uint64_t * p, int i)
84+ FORCE_INLINE uint64_t getblock64 (const uint64_t * p, std:: size_t i)
8585{
8686 return p[i];
8787}
@@ -115,10 +115,10 @@ FORCE_INLINE uint64_t fmix64(uint64_t k)
115115
116116// -----------------------------------------------------------------------------
117117
118- inline void MurmurHash3_x86_32 (const void * key, int len, uint32_t seed, void * out)
118+ inline void MurmurHash3_x86_32 (const void * key, const uint32_t len, uint32_t seed, void * out)
119119{
120- const uint8_t * data = ( const uint8_t *) key;
121- const int nblocks = len / 4 ;
120+ const uint8_t * data = reinterpret_cast < const uint8_t *>( key) ;
121+ const uint32_t nblocks = len / 4 ;
122122
123123 uint32_t h1 = seed;
124124
@@ -128,9 +128,9 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
128128 // ----------
129129 // body
130130
131- const uint32_t * blocks = ( const uint32_t *) (data + nblocks * 4 );
131+ const uint32_t * blocks = reinterpret_cast < const uint32_t *> (data);
132132
133- for (int i = -nblocks ; i; i++ )
133+ for (uint32_t i = 0 ; i < nblocks; ++i )
134134 {
135135 uint32_t k1 = getblock32 (blocks, i);
136136
@@ -146,17 +146,17 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
146146 // ----------
147147 // tail
148148
149- const uint8_t * tail = (const uint8_t *)( data + nblocks * 4 );
149+ const uint8_t * tail = (data + nblocks * 4 );
150150
151151 uint32_t k1 = 0 ;
152152
153153 switch (len & 3 )
154154 {
155155 case 3 :
156- k1 ^= tail[2 ] << 16 ;
156+ k1 ^= static_cast < uint32_t >( tail[2 ] << 16 ) ;
157157 /* no break */
158158 case 2 :
159- k1 ^= tail[1 ] << 8 ;
159+ k1 ^= static_cast < uint32_t >( tail[1 ] << 8 ) ;
160160 /* no break */
161161 case 1 :
162162 k1 ^= tail[0 ];
@@ -173,15 +173,16 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
173173
174174 h1 = fmix32 (h1);
175175
176- *( uint32_t *) out = h1;
176+ *static_cast < uint32_t *>( out) = h1;
177177}
178178
179179// -----------------------------------------------------------------------------
180180
181- inline void MurmurHash3_x64_128 (const void * key, const int len, const uint32_t seed, void * out)
181+ inline void MurmurHash3_x64_128 (const void * key, const std::size_t len, const uint32_t seed,
182+ void * out)
182183{
183- const uint8_t * data = ( const uint8_t *) key;
184- const int nblocks = len / 16 ;
184+ const uint8_t * data = reinterpret_cast < const uint8_t *>( key) ;
185+ const std:: size_t nblocks = len / 16 ;
185186
186187 uint64_t h1 = seed;
187188 uint64_t h2 = seed;
@@ -192,9 +193,9 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
192193 // ----------
193194 // body
194195
195- const uint64_t * blocks = ( const uint64_t *) (data);
196+ const uint64_t * blocks = reinterpret_cast < const uint64_t *> (data);
196197
197- for (int i = 0 ; i < nblocks; i++)
198+ for (std:: size_t i = 0 ; i < nblocks; i++)
198199 {
199200 uint64_t k1 = getblock64 (blocks, i * 2 + 0 );
200201 uint64_t k2 = getblock64 (blocks, i * 2 + 1 );
@@ -221,62 +222,62 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
221222 // ----------
222223 // tail
223224
224- const uint8_t * tail = (const uint8_t *)( data + nblocks * 16 );
225+ const uint8_t * tail = (data + nblocks * 16 );
225226
226227 uint64_t k1 = 0 ;
227228 uint64_t k2 = 0 ;
228229
229230 switch (len & 15 )
230231 {
231232 case 15 :
232- k2 ^= (( uint64_t ) tail[14 ]) << 48 ;
233+ k2 ^= (static_cast < uint64_t >( tail[14 ]) ) << 48 ;
233234 /* no break */
234235 case 14 :
235- k2 ^= (( uint64_t ) tail[13 ]) << 40 ;
236+ k2 ^= (static_cast < uint64_t >( tail[13 ]) ) << 40 ;
236237 /* no break */
237238 case 13 :
238- k2 ^= (( uint64_t ) tail[12 ]) << 32 ;
239+ k2 ^= (static_cast < uint64_t >( tail[12 ]) ) << 32 ;
239240 /* no break */
240241 case 12 :
241- k2 ^= (( uint64_t ) tail[11 ]) << 24 ;
242+ k2 ^= (static_cast < uint64_t >( tail[11 ]) ) << 24 ;
242243 /* no break */
243244 case 11 :
244- k2 ^= (( uint64_t ) tail[10 ]) << 16 ;
245+ k2 ^= (static_cast < uint64_t >( tail[10 ]) ) << 16 ;
245246 /* no break */
246247 case 10 :
247- k2 ^= (( uint64_t ) tail[9 ]) << 8 ;
248+ k2 ^= (static_cast < uint64_t >( tail[9 ]) ) << 8 ;
248249 /* no break */
249250 case 9 :
250- k2 ^= (( uint64_t ) tail[8 ]) << 0 ;
251+ k2 ^= (static_cast < uint64_t >( tail[8 ]) ) << 0 ;
251252 k2 *= c2;
252253 k2 = ROTL64 (k2, 33 );
253254 k2 *= c1;
254255 h2 ^= k2;
255256 /* no break */
256257
257258 case 8 :
258- k1 ^= (( uint64_t ) tail[7 ]) << 56 ;
259+ k1 ^= (static_cast < uint64_t >( tail[7 ]) ) << 56 ;
259260 /* no break */
260261 case 7 :
261- k1 ^= (( uint64_t ) tail[6 ]) << 48 ;
262+ k1 ^= (static_cast < uint64_t >( tail[6 ]) ) << 48 ;
262263 /* no break */
263264 case 6 :
264- k1 ^= (( uint64_t ) tail[5 ]) << 40 ;
265+ k1 ^= (static_cast < uint64_t >( tail[5 ]) ) << 40 ;
265266 /* no break */
266267 case 5 :
267- k1 ^= (( uint64_t ) tail[4 ]) << 32 ;
268+ k1 ^= (static_cast < uint64_t >( tail[4 ]) ) << 32 ;
268269 /* no break */
269270 case 4 :
270- k1 ^= (( uint64_t ) tail[3 ]) << 24 ;
271+ k1 ^= (static_cast < uint64_t >( tail[3 ]) ) << 24 ;
271272 /* no break */
272273 case 3 :
273- k1 ^= (( uint64_t ) tail[2 ]) << 16 ;
274+ k1 ^= (static_cast < uint64_t >( tail[2 ]) ) << 16 ;
274275 /* no break */
275276 case 2 :
276- k1 ^= (( uint64_t ) tail[1 ]) << 8 ;
277+ k1 ^= (static_cast < uint64_t >( tail[1 ]) ) << 8 ;
277278 /* no break */
278279 case 1 :
279- k1 ^= (( uint64_t ) tail[0 ]) << 0 ;
280+ k1 ^= (static_cast < uint64_t >( tail[0 ]) ) << 0 ;
280281 k1 *= c1;
281282 k1 = ROTL64 (k1, 31 );
282283 k1 *= c2;
@@ -298,8 +299,8 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
298299 h1 += h2;
299300 h2 += h1;
300301
301- (( uint64_t *) out)[0 ] = h1;
302- (( uint64_t *) out)[1 ] = h2;
302+ static_cast < uint64_t *>( out)[0 ] = h1;
303+ static_cast < uint64_t *>( out)[1 ] = h2;
303304}
304305
305306// -----------------------------------------------------------------------------
@@ -320,7 +321,7 @@ template <>
320321inline size_t hash_impl<32 >(const void * buffer, size_t len)
321322{
322323 uint32_t result = 0 ;
323- murmurhash3::MurmurHash3_x86_32 (buffer, len, 0 /* seed */ , &result);
324+ murmurhash3::MurmurHash3_x86_32 (buffer, static_cast < uint32_t >( len) , 0 /* seed */ , &result);
324325 return result;
325326}
326327
@@ -337,10 +338,9 @@ inline size_t hash_impl<64>(const void* buffer, size_t len)
337338 murmurhash3::MurmurHash3_x64_128 (buffer, len, 0 /* seed */ , result128);
338339
339340 // this is the only "defined" way to convert properly...
340- // TODO: Why does MSVC complain about size_t != uint64_t?!
341341 size_t result = 0 ;
342342 memcpy (&result, result128, sizeof (result));
343- return result;
343+ return result;
344344}
345345
346346/* *
0 commit comments