blob: 4526416fc1535433939304a3c952475e74526b3e [file] [log] [blame]
junov@chromium.orgef760602012-06-27 20:03:16 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkChecksum_DEFINED
9#define SkChecksum_DEFINED
10
mtklein02f46cf2015-03-20 13:48:42 -070011#include "SkString.h"
12#include "SkTLogic.h"
junov@chromium.orgef760602012-06-27 20:03:16 +000013#include "SkTypes.h"
14
reed@google.comd42f7582013-04-23 15:35:24 +000015/**
16 * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant
17 * to be very very fast, as it is used internally by the font cache, in
18 * conjuction with the entire raw key. This algorithm does not generate
19 * unique values as well as others (e.g. MD5) but it performs much faster.
20 * Skia's use cases can survive non-unique values (since the entire key is
21 * always available). Clients should only be used in circumstances where speed
22 * over uniqueness is at a premium.
23 */
reed@google.com88db9ef2012-07-03 19:44:20 +000024class SkChecksum : SkNoncopyable {
25private:
26 /*
27 * Our Rotate and Mash helpers are meant to automatically do the right
28 * thing depending if sizeof(uintptr_t) is 4 or 8.
29 */
30 enum {
31 ROTR = 17,
32 ROTL = sizeof(uintptr_t) * 8 - ROTR,
33 HALFBITS = sizeof(uintptr_t) * 4
34 };
rmistry@google.comfbfcd562012-08-23 18:09:54 +000035
reed@google.com88db9ef2012-07-03 19:44:20 +000036 static inline uintptr_t Mash(uintptr_t total, uintptr_t value) {
37 return ((total >> ROTR) | (total << ROTL)) ^ value;
38 }
rmistry@google.comfbfcd562012-08-23 18:09:54 +000039
reed@google.com88db9ef2012-07-03 19:44:20 +000040public:
mtklein67a32712014-07-10 06:03:46 -070041 /**
42 * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
43 * suspect its low bits aren't well mixed.
44 *
45 * This is the Murmur3 finalizer.
46 */
47 static uint32_t Mix(uint32_t hash) {
48 hash ^= hash >> 16;
49 hash *= 0x85ebca6b;
50 hash ^= hash >> 13;
51 hash *= 0xc2b2ae35;
52 hash ^= hash >> 16;
53 return hash;
54 }
commit-bot@chromium.org70d75ca2013-07-23 20:25:34 +000055
56 /**
reed40dab982015-01-28 13:28:53 -080057 * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
58 * suspect its low bits aren't well mixed.
59 *
60 * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache.
61 */
62 static uint32_t CheapMix(uint32_t hash) {
63 hash ^= hash >> 16;
64 hash *= 0x85ebca6b;
65 hash ^= hash >> 16;
66 return hash;
67 }
68
69 /**
commit-bot@chromium.org70d75ca2013-07-23 20:25:34 +000070 * Calculate 32-bit Murmur hash (murmur3).
71 * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash.
72 * See en.wikipedia.org/wiki/MurmurHash.
73 *
mtklein02f46cf2015-03-20 13:48:42 -070074 * @param data Memory address of the data block to be processed.
75 * @param size Size of the data block in bytes.
commit-bot@chromium.org70d75ca2013-07-23 20:25:34 +000076 * @param seed Initial hash seed. (optional)
77 * @return hash result
78 */
mtkleine71000f2015-10-21 11:53:27 -070079 static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0);
commit-bot@chromium.org70d75ca2013-07-23 20:25:34 +000080
reed@google.com88db9ef2012-07-03 19:44:20 +000081 /**
82 * Compute a 32-bit checksum for a given data block
83 *
epoger@google.com41d0d2f2012-10-30 17:33:09 +000084 * WARNING: this algorithm is tuned for efficiency, not backward/forward
85 * compatibility. It may change at any time, so a checksum generated with
86 * one version of the Skia code may not match a checksum generated with
87 * a different version of the Skia code.
88 *
reed@google.com88db9ef2012-07-03 19:44:20 +000089 * @param data Memory address of the data block to be processed. Must be
90 * 32-bit aligned.
91 * @param size Size of the data block in bytes. Must be a multiple of 4.
92 * @return checksum result
93 */
reedbbf9f6d2014-07-10 14:29:43 -070094 static uint32_t Compute(const uint32_t* data, size_t size) {
mtklein19fcc742014-07-11 08:42:17 -070095 // Use may_alias to remind the compiler we're intentionally violating strict aliasing,
96 // and so not to apply strict-aliasing-based optimizations.
97 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t;
98 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data;
99
reed@google.com88db9ef2012-07-03 19:44:20 +0000100 SkASSERT(SkIsAlign4(size));
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000101
reed@google.com88db9ef2012-07-03 19:44:20 +0000102 /*
103 * We want to let the compiler use 32bit or 64bit addressing and math
104 * so we use uintptr_t as our magic type. This makes the code a little
105 * more obscure (we can't hard-code 32 or 64 anywhere, but have to use
106 * sizeof()).
107 */
108 uintptr_t result = 0;
mtklein19fcc742014-07-11 08:42:17 -0700109 const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(safe_data);
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000110
reed@google.com88db9ef2012-07-03 19:44:20 +0000111 /*
112 * count the number of quad element chunks. This takes into account
113 * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t)
114 * to compute how much to shift-down the size.
115 */
reed@google.comb158a822012-07-09 13:13:23 +0000116 size_t n4 = size / (sizeof(uintptr_t) << 2);
117 for (size_t i = 0; i < n4; ++i) {
reed@google.com88db9ef2012-07-03 19:44:20 +0000118 result = Mash(result, *ptr++);
119 result = Mash(result, *ptr++);
120 result = Mash(result, *ptr++);
121 result = Mash(result, *ptr++);
122 }
123 size &= ((sizeof(uintptr_t) << 2) - 1);
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000124
mtklein19fcc742014-07-11 08:42:17 -0700125 safe_data = reinterpret_cast<const aliased_uint32_t*>(ptr);
126 const aliased_uint32_t* stop = safe_data + (size >> 2);
127 while (safe_data < stop) {
128 result = Mash(result, *safe_data++);
reed@google.com88db9ef2012-07-03 19:44:20 +0000129 }
rmistry@google.comfbfcd562012-08-23 18:09:54 +0000130
reed@google.com88db9ef2012-07-03 19:44:20 +0000131 /*
132 * smash us down to 32bits if we were 64. Note that when uintptr_t is
133 * 32bits, this code-path should go away, but I still got a warning
134 * when I wrote
135 * result ^= result >> 32;
136 * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS
137 * define.
138 */
139 if (8 == sizeof(result)) {
140 result ^= result >> HALFBITS;
141 }
142 return static_cast<uint32_t>(result);
143 }
144};
145
mtklein02f46cf2015-03-20 13:48:42 -0700146// SkGoodHash should usually be your first choice in hashing data.
147// It should be both reasonably fast and high quality.
mtkleinc8d1dd42015-10-15 12:23:01 -0700148struct SkGoodHash {
149 template <typename K>
150 SK_WHEN(sizeof(K) == 4, uint32_t) operator()(const K& k) const {
mtklein02f46cf2015-03-20 13:48:42 -0700151 return SkChecksum::Mix(*(const uint32_t*)&k);
152 }
mtklein02f46cf2015-03-20 13:48:42 -0700153
mtkleinc8d1dd42015-10-15 12:23:01 -0700154 template <typename K>
155 SK_WHEN(sizeof(K) != 4, uint32_t) operator()(const K& k) const {
156 return SkChecksum::Murmur3(&k, sizeof(K));
157 }
158
159 uint32_t operator()(const SkString& k) const {
160 return SkChecksum::Murmur3(k.c_str(), k.size());
161 }
162};
mtklein02f46cf2015-03-20 13:48:42 -0700163
robertphillips@google.comfffc8d02012-06-28 00:29:23 +0000164#endif