1 /**
2 * Written in the D programming language.
3 * This module provides functions to uniform calculating hash values for different types
4 *
5 * Copyright: Copyright Igor Stepanov 2013-2013.
6 * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
7 * Authors: Igor Stepanov
8 * Source: $(DRUNTIMESRC core/internal/_hash.d)
9 */
10 module core.internal.hash;
11
12 import core.internal.traits : Unconst;
13
14 // If true ensure that positive zero and negative zero have the same hash.
15 // Historically typeid(float).getHash did this but hashOf(float) did not.
16 private enum floatCoalesceZeroes = true;
17 // If true ensure that all NaNs of the same floating point type have the same hash.
18 // Historically typeid(float).getHash didn't do this but hashOf(float) did.
19 private enum floatCoalesceNaNs = true;
20
21 // If either of the above are true then no struct or array that contains the
22 // representation of a floating point number may be hashed with `bytesHash`.
23
24 @nogc nothrow pure @safe unittest
25 {
26 static if (floatCoalesceZeroes)
27 assert(hashOf(+0.0) == hashOf(-0.0)); // Same hash for +0.0 and -0.0.
28 static if (floatCoalesceNaNs)
29 assert(hashOf(double.nan) == hashOf(-double.nan)); // Same hash for different NaN.
30 }
31
32 private enum hasCallableToHash(T) = __traits(compiles,
33 {
34 size_t hash = ((T* x) => (*x).toHash())(null);
35 });
36
37 @nogc nothrow pure @safe unittest
38 {
toHashS39 static struct S { size_t toHash() { return 4; } }
40 assert(hasCallableToHash!S);
41 assert(!hasCallableToHash!(shared const S));
42 }
43
44 private enum isFinalClassWithAddressBasedHash(T) = __traits(isFinalClass, T)
45 // Use __traits(compiles, ...) in case there are multiple overloads of `toHash`.
46 && __traits(compiles, {static assert(&Object.toHash is &T.toHash);});
47
48 @nogc nothrow pure @safe unittest
49 {
50 static class C1 {}
51 final static class C2 : C1 {}
toHash()52 final static class C3 : C1 { override size_t toHash() const nothrow { return 1; }}
53 static assert(!isFinalClassWithAddressBasedHash!Object);
54 static assert(!isFinalClassWithAddressBasedHash!C1);
55 static assert(isFinalClassWithAddressBasedHash!C2);
56 static assert(!isFinalClassWithAddressBasedHash!C3);
57 }
58
isCppClassWithoutHash(T)59 private template isCppClassWithoutHash(T)
60 {
61 static if (!is(T == class) && !is(T == interface))
62 enum isCppClassWithoutHash = false;
63 else
64 enum bool isCppClassWithoutHash = __traits(getLinkage, T) == "C++"
65 && !is(immutable T* : immutable Object*) && !hasCallableToHash!T;
66 }
67
68 /+
69 Is it valid to calculate a hash code for T based on the bits of its
70 representation? Always false for interfaces, dynamic arrays, and
71 associative arrays. False for all classes except final classes that do
72 not override `toHash`.
73
74 Note: according to the spec as of
75 https://github.com/dlang/dlang.org/commit/d66eff16491b0664c0fc00ba80a7aa291703f1f2
76 the contents of unnamed paddings between fields is undefined. Currently
77 this hashing implementation assumes that the padding contents (if any)
78 for all instances of `T` are the same. The correctness of this
79 assumption is yet to be verified.
80 +/
canBitwiseHash(T)81 private template canBitwiseHash(T)
82 {
83 static if (is(T EType == enum))
84 enum canBitwiseHash = .canBitwiseHash!EType;
85 else static if (__traits(isFloating, T))
86 enum canBitwiseHash = !(floatCoalesceZeroes || floatCoalesceNaNs);
87 else static if (__traits(isScalar, T))
88 enum canBitwiseHash = true;
89 else static if (is(T == class))
90 {
91 enum canBitwiseHash = isFinalClassWithAddressBasedHash!T || isCppClassWithoutHash!T;
92 }
93 else static if (is(T == interface))
94 {
95 enum canBitwiseHash = isCppClassWithoutHash!T;
96 }
97 else static if (is(T == struct))
98 {
99 static if (hasCallableToHash!T || __traits(isNested, T))
100 enum canBitwiseHash = false;
101 else
102 {
103 import core.internal.traits : allSatisfy;
104 enum canBitwiseHash = allSatisfy!(.canBitwiseHash, typeof(T.tupleof));
105 }
106 }
107 else static if (is(T == union))
108 {
109 // Right now we always bytewise hash unions that lack callable `toHash`.
110 enum canBitwiseHash = !hasCallableToHash!T;
111 }
112 else static if (is(T E : E[]))
113 {
114 static if (__traits(isStaticArray, T))
115 enum canBitwiseHash = (T.length == 0) || .canBitwiseHash!E;
116 else
117 enum canBitwiseHash = false;
118 }
119 else static if (__traits(isAssociativeArray, T))
120 {
121 enum canBitwiseHash = false;
122 }
123 else
124 {
125 static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)),
126 "Internal error: unanticipated type "~T.stringof);
127 enum canBitwiseHash = true;
128 }
129 }
130
131 //enum hash. CTFE depends on base type
132 size_t hashOf(T)(auto ref T val, size_t seed = 0)
133 if (is(T == enum) && !__traits(isScalar, T))
134 {
135 static if (is(T EType == enum)) {} //for EType
136 return hashOf(cast(EType) val, seed);
137 }
138
139 //CTFE ready (depends on base type).
140 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
141 if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T)
142 {
143 import core.internal.convert : toUbyte;
144 // FIXME:
145 // We would like to to do this:
146 //
147 //static if (T.length == 0)
148 // return seed;
149 //else static if (T.length == 1)
150 // return hashOf(val[0], seed);
151 //else
152 // return bytesHashWithExactSizeAndAlignment!T(toUbyte(val), seed);
153 //
154 // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
155 // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
156 // hashOf(val).
157 static if (T.length == 0)
158 {
159 return bytesHashAlignedBy!size_t((ubyte[]).init, seed);
160 }
161 static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
162 {
163 return bytesHashAlignedBy!T(toUbyte(val), seed);
164 }
165 else //Other types. CTFE unsupported
166 {
167 assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
168 return bytesHashAlignedBy!T((cast(const(ubyte)*) &val)[0 .. T.sizeof], seed);
169 }
170 }
171
172 //CTFE ready (depends on base type).
173 size_t hashOf(T)(auto ref T val, size_t seed = 0)
174 if (!is(T == enum) && __traits(isStaticArray, T) && !canBitwiseHash!T)
175 {
176 // FIXME:
177 // We would like to to do this:
178 //
179 //static if (T.length == 0)
180 // return seed;
181 //else static if (T.length == 1)
182 // return hashOf(val[0], seed);
183 //else
184 // /+ hash like a dynamic array +/
185 //
186 // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
187 // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
188 // hashOf(val).
189 return hashOf(val[], seed);
190 }
191
192 //dynamic array hash
193 size_t hashOf(T)(scope const T val, size_t seed = 0)
194 if (is(T == S[], S) && (__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
195 {
196 import core.internal.convert : toUbyte;
197 alias ElementType = typeof(val[0]);
198 static if (!canBitwiseHash!ElementType)
199 {
200 size_t hash = seed;
foreach(ref o;val)201 foreach (ref o; val)
202 {
203 hash = hashOf(hashOf(o), hash); // double hashing to match TypeInfo.getHash
204 }
205 return hash;
206 }
207 else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
208 //ubyteble array (arithmetic types and structs without toHash) CTFE ready for arithmetic types and structs without reference fields
209 {
210 return bytesHashAlignedBy!ElementType(toUbyte(val), seed);
211 }
212 else //Other types. CTFE unsupported
213 {
214 assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
215 return bytesHashAlignedBy!ElementType((cast(const(ubyte)*) val.ptr)[0 .. ElementType.sizeof*val.length], seed);
216 }
217 }
218
219 //dynamic array hash
220 size_t hashOf(T)(T val, size_t seed = 0)
221 if (is(T == S[], S) && !(__traits(isScalar, S) || canBitwiseHash!S)) // excludes enum types
222 {
223 size_t hash = seed;
foreach(ref o;val)224 foreach (ref o; val)
225 {
226 hash = hashOf(hashOf(o), hash); // double hashing because TypeInfo.getHash doesn't allow to pass seed value
227 }
228 return hash;
229 }
230
231 // Indicates if F is a built-in complex number type.
232 private F coalesceFloat(F)(const F val)
233 if (__traits(isFloating, val) && !is(F == __vector) && !is(F : creal))
234 {
235 static if (floatCoalesceZeroes)
236 if (val == cast(F) 0)
237 return cast(F) 0;
238 static if (floatCoalesceNaNs)
239 if (val != val)
240 return F.nan;
241 return val;
242 }
243
244 //scalar type hash
245 @trusted @nogc nothrow pure
246 size_t hashOf(T)(scope const T val) if (__traits(isScalar, T) && !is(T == __vector))
247 {
248 static if (is(T V : V*))
249 {
250 if (__ctfe)
251 {
252 if (val is null) return 0;
253 assert(0, "Unable to calculate hash of non-null pointer at compile time");
254 }
255 size_t result = cast(size_t) val;
256 return result ^ (result >> 4);
257 }
258 else static if (is(T EType == enum) && is(typeof(val[0])))
259 {
260 // Enum type whose base type is vector.
261 return hashOf(cast(EType) val);
262 }
263 else static if (__traits(isIntegral, T))
264 {
265 static if (T.sizeof <= size_t.sizeof)
266 return val;
267 else
268 return cast(size_t) (val ^ (val >>> (size_t.sizeof * 8)));
269 }
270 else static if (is(T : creal))
271 {
272 return hashOf(coalesceFloat(val.re), hashOf(coalesceFloat(val.im)));
273 }
274 else
275 {
276 static assert(__traits(isFloating, T));
277 auto data = coalesceFloat(val);
278 static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
279 return *cast(const uint*) &data;
280 else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
281 return hashOf(*cast(const ulong*) &data);
282 else
283 {
284 import core.internal.convert : floatSize, toUbyte;
285 return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], 0);
286 }
287 }
288 }
289
290 //scalar type hash
291 @trusted @nogc nothrow pure
292 size_t hashOf(T)(scope const T val, size_t seed) if (__traits(isScalar, T) && !is(T == __vector))
293 {
294 static if (is(T V : V*))
295 {
296 if (__ctfe)
297 {
298 if (val is null) return hashOf(size_t(0), seed);
299 assert(0, "Unable to calculate hash of non-null pointer at compile time");
300 }
301 return hashOf(cast(size_t) val, seed);
302 }
303 else static if (is(T EType == enum) && is(typeof(val[0])))
304 {
305 // Enum type whose base type is vector.
306 return hashOf(cast(EType) val, seed);
307 }
308 else static if (__traits(isIntegral, val) && T.sizeof <= size_t.sizeof)
309 {
310 static if (size_t.sizeof < ulong.sizeof)
311 {
312 //MurmurHash3 32-bit single round
313 enum uint c1 = 0xcc9e2d51;
314 enum uint c2 = 0x1b873593;
315 enum uint c3 = 0xe6546b64;
316 enum uint r1 = 15;
317 enum uint r2 = 13;
318 }
319 else
320 {
321 //Half of MurmurHash3 64-bit single round
322 //(omits second interleaved update)
323 enum ulong c1 = 0x87c37b91114253d5;
324 enum ulong c2 = 0x4cf5ad432745937f;
325 enum ulong c3 = 0x52dce729;
326 enum uint r1 = 31;
327 enum uint r2 = 27;
328 }
329 size_t h = c1 * val;
330 h = (h << r1) | (h >>> (size_t.sizeof * 8 - r1));
331 h = (h * c2) ^ seed;
332 h = (h << r2) | (h >>> (size_t.sizeof * 8 - r2));
333 return h * 5 + c3;
334 }
335 else static if (__traits(isIntegral, val) && T.sizeof > size_t.sizeof)
336 {
337 static foreach (i; 0 .. T.sizeof / size_t.sizeof)
338 seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed);
339 return seed;
340 }
341 else static if (is(T : creal))
342 {
343 return hashOf(val.re, hashOf(val.im, seed));
344 }
345 else static if (__traits(isFloating, T))
346 {
347 auto data = coalesceFloat(val);
348 static if (T.sizeof == float.sizeof && T.mant_dig == float.mant_dig)
349 return hashOf(*cast(const uint*) &data, seed);
350 else static if (T.sizeof == double.sizeof && T.mant_dig == double.mant_dig)
351 return hashOf(*cast(const ulong*) &data, seed);
352 else
353 {
354 import core.internal.convert : floatSize, toUbyte;
355 return bytesHashWithExactSizeAndAlignment!T(toUbyte(data)[0 .. floatSize!T], seed);
356 }
357 }
358 else
359 {
360 static assert(0);
361 }
362 }
363
364 size_t hashOf(T)(scope const T val, size_t seed = 0) @safe @nogc nothrow pure
365 if (is(T == __vector)) // excludes enum types
366 {
367 static if (__traits(isFloating, T) && (floatCoalesceZeroes || floatCoalesceNaNs))
368 {
369 if (__ctfe)
370 {
371 // Workaround for CTFE bug.
372 static if (is(immutable typeof(val[0]) == immutable E, E)) {} // Get E.
373 E[T.sizeof / E.sizeof] array;
374 foreach (i; 0 .. T.sizeof / E.sizeof)
375 array[i] = val[i];
376 return hashOf(array, seed);
377 }
378 return hashOf(val.array, seed);
379 }
380 else
381 {
382 import core.internal.convert : toUbyte;
383 return bytesHashAlignedBy!T(toUbyte(val), seed);
384 }
385 }
386
387 //typeof(null) hash. CTFE supported
388 @trusted @nogc nothrow pure
389 size_t hashOf(T)(scope const T val) if (!is(T == enum) && is(T : typeof(null)))
390 {
391 return 0;
392 }
393
394 //typeof(null) hash. CTFE supported
395 @trusted @nogc nothrow pure
396 size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && is(T : typeof(null)))
397 {
398 return hashOf(size_t(0), seed);
399 }
400
401 private enum _hashOfStruct =
402 q{
403 enum bool isChained = is(typeof(seed) : size_t);
404 static if (!isChained) enum size_t seed = 0;
405 static if (hasCallableToHash!(typeof(val))) //CTFE depends on toHash()
406 {
407 static if (!__traits(isSame, typeof(val), __traits(parent, val.toHash))
408 && is(typeof(val is null)))
409 {
410 static if (isChained)
411 return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))), seed);
412 else
413 return hashOf(__traits(getMember, val, __traits(getAliasThis, typeof(val))));
414 }
415 else
416 {
417 static if (isChained)
418 return hashOf(cast(size_t) val.toHash(), seed);
419 else
420 return val.toHash();
421 }
422 }
423 else
424 {
425 import core.internal.convert : toUbyte;
426 static if (__traits(hasMember, T, "toHash") && is(typeof(T.toHash) == function))
427 {
428 // TODO: in the future maybe this should be changed to a static
429 // assert(0), because if there's a `toHash` the programmer probably
430 // expected it to be called and a compilation failure here will
431 // expose a bug in his code.
432 // In the future we also might want to disallow non-const toHash
433 // altogether.
434 pragma(msg, "Warning: struct "~__traits(identifier, T)
435 ~" has method toHash, however it cannot be called with "
436 ~typeof(val).stringof~" this.");
437 static if (__traits(compiles, __traits(getLocation, T.toHash)))
438 {
439 enum file = __traits(getLocation, T.toHash)[0];
440 enum line = __traits(getLocation, T.toHash)[1].stringof;
441 pragma(msg, " ",__traits(identifier, T),".toHash defined here: ",file,"(",line,")");
442 }
443 }
444
445 static if (T.tupleof.length == 0)
446 {
447 return seed;
448 }
449 else static if ((is(T == struct) && !canBitwiseHash!T) || T.tupleof.length == 1)
450 {
451 static if (isChained) size_t h = seed;
452 static foreach (i, F; typeof(val.tupleof))
453 {
454 static if (__traits(isStaticArray, F))
455 {
456 static if (i == 0 && !isChained) size_t h = 0;
457 static if (F.sizeof > 0 && canBitwiseHash!F)
458 // May use smallBytesHash instead of bytesHash.
459 h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
460 else
461 // We can avoid the "double hashing" the top-level version uses
462 // for consistency with TypeInfo.getHash.
463 foreach (ref e; val.tupleof[i])
464 h = hashOf(e, h);
465 }
466 else static if (is(F == struct) || is(F == union))
467 {
468 static if (hasCallableToHash!F)
469 {
470 static if (!__traits(isSame, F, __traits(parent, val.tupleof[i].toHash))
471 && is(typeof(val.tupleof[i] is null)))
472 {
473 static if (i == 0 && !isChained)
474 size_t h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)));
475 else
476 h = hashOf(__traits(getMember, val.tupleof[i], __traits(getAliasThis, F)), h);
477 }
478 else
479 {
480 static if (i == 0 && !isChained)
481 size_t h = val.tupleof[i].toHash();
482 else
483 h = hashOf(cast(size_t) val.tupleof[i].toHash(), h);
484 }
485 }
486 else static if (F.tupleof.length == 1)
487 {
488 // Handle the single member case separately to avoid unnecessarily using bytesHash.
489 static if (i == 0 && !isChained)
490 size_t h = hashOf(val.tupleof[i].tupleof[0]);
491 else
492 h = hashOf(val.tupleof[i].tupleof[0], h);
493 }
494 else static if (canBitwiseHash!F)
495 {
496 // May use smallBytesHash instead of bytesHash.
497 static if (i == 0 && !isChained) size_t h = 0;
498 h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
499 }
500 else
501 {
502 // Nothing special happening.
503 static if (i == 0 && !isChained)
504 size_t h = hashOf(val.tupleof[i]);
505 else
506 h = hashOf(val.tupleof[i], h);
507 }
508 }
509 else
510 {
511 // Nothing special happening.
512 static if (i == 0 && !isChained)
513 size_t h = hashOf(val.tupleof[i]);
514 else
515 h = hashOf(val.tupleof[i], h);
516 }
517 }
518 return h;
519 }
520 else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))//CTFE ready for structs without reference fields
521 {
522 // Not using bytesHashWithExactSizeAndAlignment here because
523 // the result may differ from typeid(T).hashOf(&val).
524 return bytesHashAlignedBy!T(toUbyte(val), seed);
525 }
526 else // CTFE unsupported
527 {
528 assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
529 const(ubyte)[] bytes = (() @trusted => (cast(const(ubyte)*)&val)[0 .. T.sizeof])();
530 // Not using bytesHashWithExactSizeAndAlignment here because
531 // the result may differ from typeid(T).hashOf(&val).
532 return bytesHashAlignedBy!T(bytes, seed);
533 }
534 }
535 };
536
537 //struct or union hash
538 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
539 if (!is(T == enum) && (is(T == struct) || is(T == union))
540 && !is(T == const) && !is(T == immutable)
541 && canBitwiseHash!T)
542 {
543 mixin(_hashOfStruct);
544 }
545
546 //struct or union hash
547 size_t hashOf(T)(auto ref T val)
548 if (!is(T == enum) && (is(T == struct) || is(T == union))
549 && !canBitwiseHash!T)
550 {
551 mixin(_hashOfStruct);
552 }
553
554 //struct or union hash
555 size_t hashOf(T)(auto ref T val, size_t seed)
556 if (!is(T == enum) && (is(T == struct) || is(T == union))
557 && !canBitwiseHash!T)
558 {
559 mixin(_hashOfStruct);
560 }
561
562 //struct or union hash - https://issues.dlang.org/show_bug.cgi?id=19332 (support might be removed in future)
563 size_t hashOf(T)(scope auto ref T val, size_t seed = 0)
564 if (!is(T == enum) && (is(T == struct) || is(T == union))
565 && (is(T == const) || is(T == immutable))
566 && canBitwiseHash!T && !canBitwiseHash!(Unconst!T))
567 {
568 mixin(_hashOfStruct);
569 }
570
571 //delegate hash. CTFE only if null.
572 @trusted @nogc nothrow pure
573 size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate))
574 {
575 if (__ctfe)
576 {
577 if (val is null) return hashOf(size_t(0), hashOf(size_t(0), seed));
578 assert(0, "unable to compute hash of "~T.stringof~" at compile time");
579 }
580 return hashOf(val.ptr, hashOf(cast(void*) val.funcptr, seed));
581 }
582
583 //address-based class hash. CTFE only if null.
584 @nogc nothrow pure @trusted
585 size_t hashOf(T)(scope const T val)
586 if (!is(T == enum) && (is(T == interface) || is(T == class))
587 && canBitwiseHash!T)
588 {
589 if (__ctfe) if (val is null) return 0;
590 return hashOf(cast(const void*) val);
591 }
592
593 //address-based class hash. CTFE only if null.
594 @nogc nothrow pure @trusted
595 size_t hashOf(T)(scope const T val, size_t seed)
596 if (!is(T == enum) && (is(T == interface) || is(T == class))
597 && canBitwiseHash!T)
598 {
599 if (__ctfe) if (val is null) return hashOf(size_t(0), seed);
600 return hashOf(cast(const void*) val, seed);
601 }
602
603 //class or interface hash. CTFE depends on toHash
604 size_t hashOf(T)(T val)
605 if (!is(T == enum) && (is(T == interface) || is(T == class))
606 && !canBitwiseHash!T)
607 {
608 static if (__traits(compiles, {size_t h = val.toHash();}))
609 {
610 static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
611 && is(typeof((ref P p) => p is null)))
612 return val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T))) : 0;
613 else
614 return val ? val.toHash() : 0;
615 }
616 else
617 return val ? (cast(Object)val).toHash() : 0;
618 }
619
620 //class or interface hash. CTFE depends on toHash
621 size_t hashOf(T)(T val, size_t seed)
622 if (!is(T == enum) && (is(T == interface) || is(T == class))
623 && !canBitwiseHash!T)
624 {
625 static if (__traits(compiles, {size_t h = val.toHash();}))
626 {
627 static if (is(__traits(parent, val.toHash) P) && !is(immutable T* : immutable P*)
628 && is(typeof((ref P p) => p is null)))
629 return hashOf(val ? hashOf(__traits(getMember, val, __traits(getAliasThis, T)))
630 : size_t(0), seed);
631 else
632 return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed);
633 }
634 else
635 return hashOf(val ? (cast(Object)val).toHash() : 0, seed);
636 }
637
638 //associative array hash. CTFE depends on base types
639 size_t hashOf(T)(T aa) if (!is(T == enum) && __traits(isAssociativeArray, T))
640 {
641 static if (is(typeof(aa) : V[K], K, V)) {} // Put K & V in scope.
642 static if (__traits(compiles, (ref K k, ref V v) nothrow => .hashOf(k) + .hashOf(v)))
643 scope (failure) assert(0); // Allow compiler to infer nothrow.
644
645 if (!aa.length) return 0;
646 size_t h = 0;
647
648 // The computed hash is independent of the foreach traversal order.
649 foreach (ref key, ref val; aa)
650 h += hashOf(hashOf(val), hashOf(key));
651 return h;
652 }
653
654 //associative array hash. CTFE depends on base types
655 size_t hashOf(T)(T aa, size_t seed) if (!is(T == enum) && __traits(isAssociativeArray, T))
656 {
657 return hashOf(hashOf(aa), seed);
658 }
659
660 // MurmurHash3 was written by Austin Appleby, and is placed in the public
661 // domain. The author hereby disclaims copyright to this source code.
662
663 // This overload is for backwards compatibility.
664 @system pure nothrow @nogc
bytesHash()665 size_t bytesHash()(scope const(void)* buf, size_t len, size_t seed)
666 {
667 return bytesHashAlignedBy!ubyte((cast(const(ubyte)*) buf)[0 .. len], seed);
668 }
669
bytesHashAlignedBy(AlignType)670 private template bytesHashAlignedBy(AlignType)
671 {
672 alias bytesHashAlignedBy = bytesHash!(AlignType.alignof >= uint.alignof);
673 }
674
bytesHashWithExactSizeAndAlignment(SizeAndAlignType)675 private template bytesHashWithExactSizeAndAlignment(SizeAndAlignType)
676 {
677 static if (SizeAndAlignType.alignof < uint.alignof
678 ? SizeAndAlignType.sizeof <= 12
679 : SizeAndAlignType.sizeof <= 10)
680 alias bytesHashWithExactSizeAndAlignment = smallBytesHash;
681 else
682 alias bytesHashWithExactSizeAndAlignment = bytesHashAlignedBy!SizeAndAlignType;
683 }
684
685 // Fowler/Noll/Vo hash. http://www.isthe.com/chongo/tech/comp/fnv/
fnv()686 private size_t fnv()(scope const(ubyte)[] bytes, size_t seed) @nogc nothrow pure @safe
687 {
688 static if (size_t.max <= uint.max)
689 enum prime = (1U << 24) + (1U << 8) + 0x93U;
690 else static if (size_t.max <= ulong.max)
691 enum prime = (1UL << 40) + (1UL << 8) + 0xb3UL;
692 else
693 enum prime = (size_t(1) << 88) + (size_t(1) << 8) + size_t(0x3b);
694 foreach (b; bytes)
695 seed = (seed ^ b) * prime;
696 return seed;
697 }
698 private alias smallBytesHash = fnv;
699
700 //-----------------------------------------------------------------------------
701 // Block read - if your platform needs to do endian-swapping or can only
702 // handle aligned reads, do the conversion here
get32bits()703 private uint get32bits()(scope const(ubyte)* x) @nogc nothrow pure @system
704 {
705 version (BigEndian)
706 {
707 return ((cast(uint) x[0]) << 24) | ((cast(uint) x[1]) << 16) | ((cast(uint) x[2]) << 8) | (cast(uint) x[3]);
708 }
709 else
710 {
711 return ((cast(uint) x[3]) << 24) | ((cast(uint) x[2]) << 16) | ((cast(uint) x[1]) << 8) | (cast(uint) x[0]);
712 }
713 }
714
715 /+
716 Params:
717 dataKnownToBeAligned = whether the data is known at compile time to be uint-aligned.
718 +/
719 @nogc nothrow pure @trusted
bytesHash(bool dataKnownToBeAligned)720 private size_t bytesHash(bool dataKnownToBeAligned)(scope const(ubyte)[] bytes, size_t seed)
721 {
722 auto len = bytes.length;
723 auto data = bytes.ptr;
724 auto nblocks = len / 4;
725
726 uint h1 = cast(uint)seed;
727
728 enum uint c1 = 0xcc9e2d51;
729 enum uint c2 = 0x1b873593;
730 enum uint c3 = 0xe6546b64;
731
732 //----------
733 // body
734 auto end_data = data+nblocks*uint.sizeof;
735 for (; data!=end_data; data += uint.sizeof)
736 {
737 static if (dataKnownToBeAligned)
738 uint k1 = __ctfe ? get32bits(data) : *(cast(const uint*) data);
739 else
740 uint k1 = get32bits(data);
741 k1 *= c1;
742 k1 = (k1 << 15) | (k1 >> (32 - 15));
743 k1 *= c2;
744
745 h1 ^= k1;
746 h1 = (h1 << 13) | (h1 >> (32 - 13));
747 h1 = h1*5+c3;
748 }
749
750 //----------
751 // tail
752 uint k1 = 0;
753
754 switch (len & 3)
755 {
756 case 3: k1 ^= data[2] << 16; goto case;
757 case 2: k1 ^= data[1] << 8; goto case;
758 case 1: k1 ^= data[0];
759 k1 *= c1; k1 = (k1 << 15) | (k1 >> (32 - 15)); k1 *= c2; h1 ^= k1;
760 goto default;
761 default:
762 }
763
764 //----------
765 // finalization
766 h1 ^= len;
767 // Force all bits of the hash block to avalanche.
768 h1 = (h1 ^ (h1 >> 16)) * 0x85ebca6b;
769 h1 = (h1 ^ (h1 >> 13)) * 0xc2b2ae35;
770 h1 ^= h1 >> 16;
771 return h1;
772 }
773
774 // Check that bytesHash works with CTFE
775 pure nothrow @system @nogc unittest
776 {
ctfeHash(string x)777 size_t ctfeHash(string x)
778 {
779 return bytesHash(x.ptr, x.length, 0);
780 }
781
782 enum test_str = "Sample string";
783 enum size_t hashVal = ctfeHash(test_str);
784 assert(hashVal == bytesHash(&test_str[0], test_str.length, 0));
785
786 // Detect unintended changes to bytesHash on unaligned and aligned inputs.
version(BigEndian)787 version (BigEndian)
788 {
789 const ubyte[7] a = [99, 4, 3, 2, 1, 5, 88];
790 const uint[2] b = [0x04_03_02_01, 0x05_ff_ff_ff];
791 }
792 else
793 {
794 const ubyte[7] a = [99, 1, 2, 3, 4, 5, 88];
795 const uint[2] b = [0x04_03_02_01, 0xff_ff_ff_05];
796 }
797 // It is okay to change the below values if you make a change
798 // that you expect to change the result of bytesHash.
799 assert(bytesHash(&a[1], a.length - 2, 0) == 2727459272);
800 assert(bytesHash(&b, 5, 0) == 2727459272);
801 assert(bytesHashAlignedBy!uint((cast(const ubyte*) &b)[0 .. 5], 0) == 2727459272);
802 }
803