From b168b0758ac2a800c16c01ea8c209ce4b459cdcf Mon Sep 17 00:00:00 2001 From: Calvin Rose Date: Thu, 4 Feb 2021 19:37:11 -0600 Subject: [PATCH] Fix #625 - no fancy mixing in number hasing Just hash upper 32 bits with lower 32 bits. Trying to get too fancy was causing slowdowns in very trivial cases. Assuming that all combinations of 64 bits in a double are equally likely (suspect but probably not that incorrect), the obvious method of xoring the top 32 bits with the lower 32 bits gives a uniform distribution. --- src/core/value.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/core/value.c b/src/core/value.c index 4e001cbf..5da86bda 100644 --- a/src/core/value.c +++ b/src/core/value.c @@ -307,18 +307,10 @@ int32_t janet_hash(Janet x) { hash = janet_struct_hash(janet_unwrap_struct(x)); break; case JANET_NUMBER: { - double num = janet_unwrap_number(x); - if (isnan(num) || isinf(num) || num == 0) { - hash = 0; - } else { - hash = (int32_t)num; - hash = ((hash >> 16) ^ hash) * 0x45d9f3b; - hash = ((hash >> 16) ^ hash) * 0x45d9f3b; - hash = (hash >> 16) ^ hash; - - uint32_t lo = (uint32_t)(janet_u64(x) & 0xFFFFFFFF); - hash ^= lo + 0x9e3779b9 + (hash << 6) + (hash >> 2); - } + uint64_t i = janet_u64(x); + uint32_t lo = (uint32_t)(i & 0xFFFFFFFF); + uint32_t hi = (uint32_t)(i >> 32); + hash = (int32_t)(hi ^ (lo >> 3)); break; } case JANET_ABSTRACT: {