reed@google.com | 602a1d7 | 2013-07-23 19:13:54 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2013 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "SkScaledImageCache.h" |
| 9 | #include "SkPixelRef.h" |
| 10 | #include "SkRect.h" |
| 11 | |
| 12 | #ifndef SK_DEFAULT_IMAGE_CACHE_LIMIT |
| 13 | #define SK_DEFAULT_IMAGE_CACHE_LIMIT (2 * 1024 * 1024) |
| 14 | #endif |
| 15 | |
| 16 | #if 1 |
| 17 | // Implemented from en.wikipedia.org/wiki/MurmurHash. |
| 18 | static uint32_t compute_hash(const uint32_t data[], int count) { |
| 19 | uint32_t hash = 0; |
| 20 | |
| 21 | for (int i = 0; i < count; ++i) { |
| 22 | uint32_t k = data[i]; |
| 23 | k *= 0xcc9e2d51; |
| 24 | k = (k << 15) | (k >> 17); |
| 25 | k *= 0x1b873593; |
| 26 | |
| 27 | hash ^= k; |
| 28 | hash = (hash << 13) | (hash >> 19); |
| 29 | hash *= 5; |
| 30 | hash += 0xe6546b64; |
| 31 | } |
| 32 | |
| 33 | // hash ^= size; |
| 34 | hash ^= hash >> 16; |
| 35 | hash *= 0x85ebca6b; |
| 36 | hash ^= hash >> 13; |
| 37 | hash *= 0xc2b2ae35; |
| 38 | hash ^= hash >> 16; |
| 39 | |
| 40 | return hash; |
| 41 | } |
| 42 | #else |
| 43 | static uint32_t mix(uint32_t a, uint32_t b) { |
| 44 | return ((a >> 1) | (a << 31)) ^ b; |
| 45 | } |
| 46 | |
| 47 | static uint32_t compute_hash(const uint32_t data[], int count) { |
| 48 | uint32_t hash = 0; |
| 49 | |
| 50 | for (int i = 0; i < count; ++i) { |
| 51 | hash = mix(hash, data[i]); |
| 52 | } |
| 53 | return hash; |
| 54 | } |
| 55 | #endif |
| 56 | |
| 57 | struct Key { |
| 58 | bool init(const SkBitmap& bm, SkScalar scaleX, SkScalar scaleY) { |
| 59 | SkPixelRef* pr = bm.pixelRef(); |
| 60 | if (!pr) { |
| 61 | return false; |
| 62 | } |
| 63 | |
| 64 | size_t offset = bm.pixelRefOffset(); |
| 65 | size_t rowBytes = bm.rowBytes(); |
| 66 | int x = (offset % rowBytes) >> 2; |
| 67 | int y = offset / rowBytes; |
| 68 | |
| 69 | fGenID = pr->getGenerationID(); |
| 70 | fBounds.set(x, y, x + bm.width(), y + bm.height()); |
| 71 | fScaleX = scaleX; |
| 72 | fScaleY = scaleY; |
| 73 | |
| 74 | fHash = compute_hash(&fGenID, 7); |
| 75 | return true; |
| 76 | } |
| 77 | |
| 78 | bool operator<(const Key& other) { |
| 79 | const uint32_t* a = &fGenID; |
| 80 | const uint32_t* b = &other.fGenID; |
| 81 | for (int i = 0; i < 7; ++i) { |
| 82 | if (a[i] < b[i]) { |
| 83 | return true; |
| 84 | } |
| 85 | if (a[i] > b[i]) { |
| 86 | return false; |
| 87 | } |
| 88 | } |
| 89 | return false; |
| 90 | } |
| 91 | |
| 92 | bool operator==(const Key& other) { |
| 93 | const uint32_t* a = &fHash; |
| 94 | const uint32_t* b = &other.fHash; |
| 95 | for (int i = 0; i < 8; ++i) { |
| 96 | if (a[i] != b[i]) { |
| 97 | return false; |
| 98 | } |
| 99 | } |
| 100 | return true; |
| 101 | } |
| 102 | |
| 103 | uint32_t fHash; |
| 104 | uint32_t fGenID; |
| 105 | float fScaleX; |
| 106 | float fScaleY; |
| 107 | SkIRect fBounds; |
| 108 | }; |
| 109 | |
| 110 | struct SkScaledImageCache::Rec { |
| 111 | Rec(const Key& key, const SkBitmap& bm) : fKey(key), fBitmap(bm) { |
| 112 | fLockCount = 1; |
| 113 | } |
| 114 | |
| 115 | size_t bytesUsed() const { |
| 116 | return fBitmap.getSize(); |
| 117 | } |
| 118 | |
| 119 | Rec* fNext; |
| 120 | Rec* fPrev; |
| 121 | |
| 122 | // this guy wants to be 64bit aligned |
| 123 | Key fKey; |
| 124 | |
| 125 | int32_t fLockCount; |
| 126 | SkBitmap fBitmap; |
| 127 | }; |
| 128 | |
| 129 | SkScaledImageCache::SkScaledImageCache(size_t byteLimit) { |
| 130 | fHead = NULL; |
| 131 | fTail = NULL; |
| 132 | fBytesUsed = 0; |
| 133 | fByteLimit = byteLimit; |
| 134 | fCount = 0; |
| 135 | } |
| 136 | |
| 137 | SkScaledImageCache::~SkScaledImageCache() { |
| 138 | Rec* rec = fHead; |
| 139 | while (rec) { |
| 140 | Rec* next = rec->fNext; |
| 141 | SkDELETE(rec); |
| 142 | rec = next; |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const SkBitmap& orig, |
| 147 | SkScalar scaleX, |
| 148 | SkScalar scaleY, |
| 149 | SkBitmap* scaled) { |
| 150 | Key key; |
| 151 | if (!key.init(orig, scaleX, scaleY)) { |
| 152 | return NULL; |
| 153 | } |
| 154 | |
| 155 | Rec* rec = fHead; |
| 156 | while (rec != NULL) { |
| 157 | if (rec->fKey == key) { |
| 158 | this->moveToHead(rec); // for our LRU |
| 159 | rec->fLockCount += 1; |
| 160 | *scaled = rec->fBitmap; |
| 161 | // SkDebugf("Found: [%d %d] %d\n", rec->fBitmap.width(), rec->fBitmap.height(), rec->fLockCount); |
| 162 | return (ID*)rec; |
| 163 | } |
| 164 | rec = rec->fNext; |
| 165 | } |
| 166 | return NULL; |
| 167 | } |
| 168 | |
| 169 | SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const SkBitmap& orig, |
| 170 | SkScalar scaleX, |
| 171 | SkScalar scaleY, |
| 172 | const SkBitmap& scaled) { |
| 173 | Key key; |
| 174 | if (!key.init(orig, scaleX, scaleY)) { |
| 175 | return NULL; |
| 176 | } |
| 177 | |
| 178 | Rec* rec = SkNEW_ARGS(Rec, (key, scaled)); |
| 179 | this->addToHead(rec); |
| 180 | SkASSERT(1 == rec->fLockCount); |
| 181 | |
| 182 | // SkDebugf("Added: [%d %d]\n", rec->fBitmap.width(), rec->fBitmap.height()); |
| 183 | |
| 184 | // We may (now) be overbudget, so see if we need to purge something. |
| 185 | this->purgeAsNeeded(); |
| 186 | return (ID*)rec; |
| 187 | } |
| 188 | |
| 189 | void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) { |
| 190 | SkASSERT(id); |
| 191 | |
| 192 | #ifdef SK_DEBUG |
| 193 | { |
| 194 | bool found = false; |
| 195 | Rec* rec = fHead; |
| 196 | while (rec != NULL) { |
| 197 | if ((ID*)rec == id) { |
| 198 | found = true; |
| 199 | break; |
| 200 | } |
| 201 | rec = rec->fNext; |
| 202 | } |
| 203 | SkASSERT(found); |
| 204 | } |
| 205 | #endif |
| 206 | Rec* rec = (Rec*)id; |
| 207 | SkASSERT(rec->fLockCount > 0); |
| 208 | rec->fLockCount -= 1; |
| 209 | |
| 210 | // SkDebugf("Unlock: [%d %d] %d\n", rec->fBitmap.width(), rec->fBitmap.height(), rec->fLockCount); |
| 211 | |
| 212 | // we may have been over-budget, but now have released something, so check |
| 213 | // if we should purge. |
| 214 | if (0 == rec->fLockCount) { |
| 215 | this->purgeAsNeeded(); |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | void SkScaledImageCache::purgeAsNeeded() { |
| 220 | size_t byteLimit = fByteLimit; |
| 221 | size_t bytesUsed = fBytesUsed; |
| 222 | |
| 223 | Rec* rec = fTail; |
| 224 | while (rec) { |
| 225 | if (bytesUsed < byteLimit) { |
| 226 | break; |
| 227 | } |
| 228 | Rec* prev = rec->fPrev; |
| 229 | if (0 == rec->fLockCount) { |
| 230 | // SkDebugf("Purge: [%d %d] %d\n", rec->fBitmap.width(), rec->fBitmap.height(), fCount); |
| 231 | size_t used = rec->bytesUsed(); |
| 232 | SkASSERT(used <= bytesUsed); |
| 233 | bytesUsed -= used; |
| 234 | this->detach(rec); |
| 235 | SkDELETE(rec); |
| 236 | fCount -= 1; |
| 237 | } |
| 238 | rec = prev; |
| 239 | } |
| 240 | fBytesUsed = bytesUsed; |
| 241 | } |
| 242 | |
| 243 | size_t SkScaledImageCache::setByteLimit(size_t newLimit) { |
| 244 | size_t prevLimit = fByteLimit; |
| 245 | fByteLimit = newLimit; |
| 246 | if (newLimit < prevLimit) { |
| 247 | this->purgeAsNeeded(); |
| 248 | } |
| 249 | return prevLimit; |
| 250 | } |
| 251 | |
| 252 | /////////////////////////////////////////////////////////////////////////////// |
| 253 | |
| 254 | void SkScaledImageCache::detach(Rec* rec) { |
| 255 | Rec* prev = rec->fPrev; |
| 256 | Rec* next = rec->fNext; |
| 257 | |
| 258 | if (!prev) { |
| 259 | SkASSERT(fHead == rec); |
| 260 | fHead = next; |
| 261 | } else { |
| 262 | prev->fNext = next; |
| 263 | } |
| 264 | |
| 265 | if (!next) { |
| 266 | fTail = prev; |
| 267 | } else { |
| 268 | next->fPrev = prev; |
| 269 | } |
| 270 | |
| 271 | rec->fNext = rec->fPrev = NULL; |
| 272 | } |
| 273 | |
| 274 | void SkScaledImageCache::moveToHead(Rec* rec) { |
| 275 | if (fHead == rec) { |
| 276 | return; |
| 277 | } |
| 278 | |
| 279 | SkASSERT(fHead); |
| 280 | SkASSERT(fTail); |
| 281 | |
| 282 | this->validate(); |
| 283 | |
| 284 | this->detach(rec); |
| 285 | |
| 286 | fHead->fPrev = rec; |
| 287 | rec->fNext = fHead; |
| 288 | fHead = rec; |
| 289 | |
| 290 | this->validate(); |
| 291 | } |
| 292 | |
| 293 | void SkScaledImageCache::addToHead(Rec* rec) { |
| 294 | this->validate(); |
| 295 | |
| 296 | rec->fPrev = NULL; |
| 297 | rec->fNext = fHead; |
| 298 | if (fHead) { |
| 299 | fHead->fPrev = rec; |
| 300 | } |
| 301 | fHead = rec; |
| 302 | if (!fTail) { |
| 303 | fTail = rec; |
| 304 | } |
| 305 | fBytesUsed += rec->bytesUsed(); |
| 306 | fCount += 1; |
| 307 | |
| 308 | this->validate(); |
| 309 | } |
| 310 | |
| 311 | #ifdef SK_DEBUG |
| 312 | void SkScaledImageCache::validate() const { |
| 313 | if (NULL == fHead) { |
| 314 | SkASSERT(NULL == fTail); |
| 315 | SkASSERT(0 == fBytesUsed); |
| 316 | return; |
| 317 | } |
| 318 | |
| 319 | if (fHead == fTail) { |
| 320 | SkASSERT(NULL == fHead->fPrev); |
| 321 | SkASSERT(NULL == fHead->fNext); |
| 322 | SkASSERT(fHead->bytesUsed() == fBytesUsed); |
| 323 | return; |
| 324 | } |
| 325 | |
| 326 | SkASSERT(NULL == fHead->fPrev); |
| 327 | SkASSERT(NULL != fHead->fNext); |
| 328 | SkASSERT(NULL == fTail->fNext); |
| 329 | SkASSERT(NULL != fTail->fPrev); |
| 330 | |
| 331 | size_t used = 0; |
| 332 | int count = 0; |
| 333 | const Rec* rec = fHead; |
| 334 | while (rec) { |
| 335 | count += 1; |
| 336 | used += rec->bytesUsed(); |
| 337 | SkASSERT(used <= fBytesUsed); |
| 338 | rec = rec->fNext; |
| 339 | } |
| 340 | SkASSERT(fCount == count); |
| 341 | |
| 342 | rec = fTail; |
| 343 | while (rec) { |
| 344 | SkASSERT(count > 0); |
| 345 | count -= 1; |
| 346 | SkASSERT(used >= rec->bytesUsed()); |
| 347 | used -= rec->bytesUsed(); |
| 348 | rec = rec->fPrev; |
| 349 | } |
| 350 | |
| 351 | SkASSERT(0 == count); |
| 352 | SkASSERT(0 == used); |
| 353 | } |
| 354 | #endif |
| 355 | |
| 356 | /////////////////////////////////////////////////////////////////////////////// |
| 357 | |
| 358 | #include "SkThread.h" |
| 359 | |
| 360 | static SkMutex gMutex; |
| 361 | |
| 362 | static SkScaledImageCache* get_cache() { |
| 363 | static SkScaledImageCache* gCache; |
| 364 | if (!gCache) { |
| 365 | gCache = SkNEW_ARGS(SkScaledImageCache, (SK_DEFAULT_IMAGE_CACHE_LIMIT)); |
| 366 | } |
| 367 | return gCache; |
| 368 | } |
| 369 | |
| 370 | SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const SkBitmap& orig, |
| 371 | SkScalar scaleX, |
| 372 | SkScalar scaleY, |
| 373 | SkBitmap* scaled) { |
| 374 | SkAutoMutexAcquire am(gMutex); |
| 375 | return get_cache()->findAndLock(orig, scaleX, scaleY, scaled); |
| 376 | } |
| 377 | |
| 378 | SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(const SkBitmap& orig, |
| 379 | SkScalar scaleX, |
| 380 | SkScalar scaleY, |
| 381 | const SkBitmap& scaled) { |
| 382 | SkAutoMutexAcquire am(gMutex); |
| 383 | return get_cache()->addAndLock(orig, scaleX, scaleY, scaled); |
| 384 | } |
| 385 | |
| 386 | void SkScaledImageCache::Unlock(SkScaledImageCache::ID* id) { |
| 387 | SkAutoMutexAcquire am(gMutex); |
| 388 | return get_cache()->unlock(id); |
| 389 | } |
| 390 | |
| 391 | size_t SkScaledImageCache::GetBytesUsed() { |
| 392 | SkAutoMutexAcquire am(gMutex); |
| 393 | return get_cache()->getBytesUsed(); |
| 394 | } |
| 395 | |
| 396 | size_t SkScaledImageCache::GetByteLimit() { |
| 397 | SkAutoMutexAcquire am(gMutex); |
| 398 | return get_cache()->getByteLimit(); |
| 399 | } |
| 400 | |
| 401 | size_t SkScaledImageCache::SetByteLimit(size_t newLimit) { |
| 402 | SkAutoMutexAcquire am(gMutex); |
| 403 | return get_cache()->setByteLimit(newLimit); |
| 404 | } |
| 405 | |
| 406 | /////////////////////////////////////////////////////////////////////////////// |
| 407 | |
| 408 | #include "SkGraphics.h" |
| 409 | |
| 410 | size_t SkGraphics::GetImageCacheBytesUsed() { |
| 411 | return SkScaledImageCache::GetBytesUsed(); |
| 412 | } |
| 413 | |
| 414 | size_t SkGraphics::GetImageCacheByteLimit() { |
| 415 | return SkScaledImageCache::GetByteLimit(); |
| 416 | } |
| 417 | |
| 418 | size_t SkGraphics::SetImageCacheByteLimit(size_t newLimit) { |
| 419 | return SkScaledImageCache::SetByteLimit(newLimit); |
| 420 | } |
| 421 | |