blob: 8b7c53dde81cda430072bf98a1e443d453215a59 [file] [log] [blame]
epoger@google.com4adfab82012-11-02 18:35:04 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkConsistentChecksum_DEFINED
9#define SkConsistentChecksum_DEFINED
10
11#include "SkTypes.h"
12
13class SkConsistentChecksum : SkNoncopyable {
14private:
15 /*
16 * Our Rotate and Mash helpers are meant to automatically do the right
17 * thing depending if sizeof(uintptr_t) is 4 or 8.
18 */
19 enum {
20 ROTR = 17,
21 ROTL = sizeof(uintptr_t) * 8 - ROTR,
22 HALFBITS = sizeof(uintptr_t) * 4
23 };
24
25 static inline uintptr_t Mash(uintptr_t total, uintptr_t value) {
26 return ((total >> ROTR) | (total << ROTL)) ^ value;
27 }
28
29public:
30 /**
31 * Compute a 32-bit checksum for a given data block
32 *
33 * WARNING: As of 1 Nov 2012, this algorithm is still in
34 * flux... but once we get it doing what we want, it will be:
35 * 1. consistent across revisions of the library (for a given set
36 * of bytes, the checksum generated at one revision of the Skia
37 * library will match the one generated on any other revision of
38 * the Skia library)
39 * 2. consistent across platforms (for a given
40 * set of bytes, the checksum generated on one platform will
41 * match the one generated on any other platform)
42 *
43 * @param data Memory address of the data block to be processed. Must be
44 * 32-bit aligned.
45 * @param size Size of the data block in bytes. Must be a multiple of 4.
46 * @return checksum result
47 */
48 static uint32_t Compute(const uint32_t* data, size_t size) {
49 SkASSERT(SkIsAlign4(size));
50
51 /*
52 * We want to let the compiler use 32bit or 64bit addressing and math
53 * so we use uintptr_t as our magic type. This makes the code a little
54 * more obscure (we can't hard-code 32 or 64 anywhere, but have to use
55 * sizeof()).
56 */
57 uintptr_t result = 0;
58 const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(data);
59
60 /*
61 * count the number of quad element chunks. This takes into account
62 * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t)
63 * to compute how much to shift-down the size.
64 */
65 size_t n4 = size / (sizeof(uintptr_t) << 2);
66 for (size_t i = 0; i < n4; ++i) {
67 result = Mash(result, *ptr++);
68 result = Mash(result, *ptr++);
69 result = Mash(result, *ptr++);
70 result = Mash(result, *ptr++);
71 }
72 size &= ((sizeof(uintptr_t) << 2) - 1);
73
74 data = reinterpret_cast<const uint32_t*>(ptr);
75 const uint32_t* stop = data + (size >> 2);
76 while (data < stop) {
77 result = Mash(result, *data++);
78 }
79
80 /*
81 * smash us down to 32bits if we were 64. Note that when uintptr_t is
82 * 32bits, this code-path should go away, but I still got a warning
83 * when I wrote
84 * result ^= result >> 32;
85 * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS
86 * define.
87 */
88 if (8 == sizeof(result)) {
89 result ^= result >> HALFBITS;
90 }
91 return static_cast<uint32_t>(result);
92 }
93};
94
95#endif