Jason A. Donenfeld | 2c956a6 | 2017-01-08 13:54:00 +0100 | [diff] [blame] | 1 | /* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. |
| 4 | * |
| 5 | * SipHash: a fast short-input PRF |
| 6 | * https://131002.net/siphash/ |
| 7 | * |
Jason A. Donenfeld | 1ae2324 | 2017-01-08 13:54:01 +0100 | [diff] [blame] | 8 | * This implementation is specifically for SipHash2-4 for a secure PRF |
| 9 | * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for |
| 10 | * hashtables. |
Jason A. Donenfeld | 2c956a6 | 2017-01-08 13:54:00 +0100 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #ifndef _LINUX_SIPHASH_H |
| 14 | #define _LINUX_SIPHASH_H |
| 15 | |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/kernel.h> |
| 18 | |
| 19 | #define SIPHASH_ALIGNMENT __alignof__(u64) |
| 20 | typedef struct { |
| 21 | u64 key[2]; |
| 22 | } siphash_key_t; |
| 23 | |
| 24 | u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); |
| 25 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 26 | u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); |
| 27 | #endif |
| 28 | |
| 29 | u64 siphash_1u64(const u64 a, const siphash_key_t *key); |
| 30 | u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); |
| 31 | u64 siphash_3u64(const u64 a, const u64 b, const u64 c, |
| 32 | const siphash_key_t *key); |
| 33 | u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, |
| 34 | const siphash_key_t *key); |
| 35 | u64 siphash_1u32(const u32 a, const siphash_key_t *key); |
| 36 | u64 siphash_3u32(const u32 a, const u32 b, const u32 c, |
| 37 | const siphash_key_t *key); |
| 38 | |
| 39 | static inline u64 siphash_2u32(const u32 a, const u32 b, |
| 40 | const siphash_key_t *key) |
| 41 | { |
| 42 | return siphash_1u64((u64)b << 32 | a, key); |
| 43 | } |
| 44 | static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, |
| 45 | const u32 d, const siphash_key_t *key) |
| 46 | { |
| 47 | return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); |
| 48 | } |
| 49 | |
| 50 | |
| 51 | static inline u64 ___siphash_aligned(const __le64 *data, size_t len, |
| 52 | const siphash_key_t *key) |
| 53 | { |
| 54 | if (__builtin_constant_p(len) && len == 4) |
| 55 | return siphash_1u32(le32_to_cpup((const __le32 *)data), key); |
| 56 | if (__builtin_constant_p(len) && len == 8) |
| 57 | return siphash_1u64(le64_to_cpu(data[0]), key); |
| 58 | if (__builtin_constant_p(len) && len == 16) |
| 59 | return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), |
| 60 | key); |
| 61 | if (__builtin_constant_p(len) && len == 24) |
| 62 | return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), |
| 63 | le64_to_cpu(data[2]), key); |
| 64 | if (__builtin_constant_p(len) && len == 32) |
| 65 | return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), |
| 66 | le64_to_cpu(data[2]), le64_to_cpu(data[3]), |
| 67 | key); |
| 68 | return __siphash_aligned(data, len, key); |
| 69 | } |
| 70 | |
| 71 | /** |
| 72 | * siphash - compute 64-bit siphash PRF value |
| 73 | * @data: buffer to hash |
| 74 | * @size: size of @data |
| 75 | * @key: the siphash key |
| 76 | */ |
| 77 | static inline u64 siphash(const void *data, size_t len, |
| 78 | const siphash_key_t *key) |
| 79 | { |
| 80 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 81 | if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) |
| 82 | return __siphash_unaligned(data, len, key); |
| 83 | #endif |
| 84 | return ___siphash_aligned(data, len, key); |
| 85 | } |
| 86 | |
Jason A. Donenfeld | 1ae2324 | 2017-01-08 13:54:01 +0100 | [diff] [blame] | 87 | #define HSIPHASH_ALIGNMENT __alignof__(unsigned long) |
| 88 | typedef struct { |
| 89 | unsigned long key[2]; |
| 90 | } hsiphash_key_t; |
| 91 | |
| 92 | u32 __hsiphash_aligned(const void *data, size_t len, |
| 93 | const hsiphash_key_t *key); |
| 94 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 95 | u32 __hsiphash_unaligned(const void *data, size_t len, |
| 96 | const hsiphash_key_t *key); |
| 97 | #endif |
| 98 | |
| 99 | u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); |
| 100 | u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); |
| 101 | u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, |
| 102 | const hsiphash_key_t *key); |
| 103 | u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, |
| 104 | const hsiphash_key_t *key); |
| 105 | |
| 106 | static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, |
| 107 | const hsiphash_key_t *key) |
| 108 | { |
| 109 | if (__builtin_constant_p(len) && len == 4) |
| 110 | return hsiphash_1u32(le32_to_cpu(data[0]), key); |
| 111 | if (__builtin_constant_p(len) && len == 8) |
| 112 | return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), |
| 113 | key); |
| 114 | if (__builtin_constant_p(len) && len == 12) |
| 115 | return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), |
| 116 | le32_to_cpu(data[2]), key); |
| 117 | if (__builtin_constant_p(len) && len == 16) |
| 118 | return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), |
| 119 | le32_to_cpu(data[2]), le32_to_cpu(data[3]), |
| 120 | key); |
| 121 | return __hsiphash_aligned(data, len, key); |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * hsiphash - compute 32-bit hsiphash PRF value |
| 126 | * @data: buffer to hash |
| 127 | * @size: size of @data |
| 128 | * @key: the hsiphash key |
| 129 | */ |
| 130 | static inline u32 hsiphash(const void *data, size_t len, |
| 131 | const hsiphash_key_t *key) |
| 132 | { |
| 133 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 134 | if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) |
| 135 | return __hsiphash_unaligned(data, len, key); |
| 136 | #endif |
| 137 | return ___hsiphash_aligned(data, len, key); |
| 138 | } |
| 139 | |
Jason A. Donenfeld | 2c956a6 | 2017-01-08 13:54:00 +0100 | [diff] [blame] | 140 | #endif /* _LINUX_SIPHASH_H */ |