blob: 57b781e4060e1988c66ff1d57baf12095b163434 [file] [log] [blame]
halcanary805ef152014-07-17 06:58:01 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
reed30ad5302014-09-16 10:39:55 -07007
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "include/core/SkCanvas.h"
9#include "include/core/SkGraphics.h"
10#include "include/core/SkPicture.h"
11#include "include/core/SkPictureRecorder.h"
12#include "include/core/SkSurface.h"
13#include "src/core/SkBitmapCache.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050014#include "src/core/SkMakeUnique.h"
15#include "src/core/SkMipMap.h"
16#include "src/core/SkResourceCache.h"
Mike Reed64acf4f2019-08-01 15:35:20 -040017#include "src/image/SkImage_Base.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050018#include "src/lazy/SkDiscardableMemoryPool.h"
19#include "tests/Test.h"
halcanary805ef152014-07-17 06:58:01 -070020
reed30ad5302014-09-16 10:39:55 -070021////////////////////////////////////////////////////////////////////////////////////////
mtklein26abcf12014-09-04 10:50:53 -070022
reed9d93c2e2014-10-08 05:17:12 -070023enum LockedState {
24 kNotLocked,
25 kLocked,
26};
27
28enum CachedState {
29 kNotInCache,
30 kInCache,
31};
32
33static void check_data(skiatest::Reporter* reporter, const SkCachedData* data,
34 int refcnt, CachedState cacheState, LockedState lockedState) {
35 REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
36 REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
halcanary96fcdcc2015-08-27 07:41:13 -070037 bool isLocked = (data->data() != nullptr);
reed9d93c2e2014-10-08 05:17:12 -070038 REPORTER_ASSERT(reporter, isLocked == (lockedState == kLocked));
39}
40
41static void test_mipmapcache(skiatest::Reporter* reporter, SkResourceCache* cache) {
42 cache->purgeAll();
43
44 SkBitmap src;
45 src.allocN32Pixels(5, 5);
46 src.setImmutable();
Brian Osman3a160732018-10-15 15:32:06 -040047 sk_sp<SkImage> img = SkImage::MakeFromBitmap(src);
Mike Reed64acf4f2019-08-01 15:35:20 -040048 const auto desc = SkBitmapCacheDesc::Make(img.get());
reed9d93c2e2014-10-08 05:17:12 -070049
Brian Osman3a160732018-10-15 15:32:06 -040050 const SkMipMap* mipmap = SkMipMapCache::FindAndRef(desc, cache);
halcanary96fcdcc2015-08-27 07:41:13 -070051 REPORTER_ASSERT(reporter, nullptr == mipmap);
reed9d93c2e2014-10-08 05:17:12 -070052
Mike Reed64acf4f2019-08-01 15:35:20 -040053 mipmap = SkMipMapCache::AddAndRef(as_IB(img.get()), cache);
reed9d93c2e2014-10-08 05:17:12 -070054 REPORTER_ASSERT(reporter, mipmap);
reed7eeba252015-02-24 13:54:23 -080055
56 {
Brian Osman3a160732018-10-15 15:32:06 -040057 const SkMipMap* mm = SkMipMapCache::FindAndRef(desc, cache);
reed7eeba252015-02-24 13:54:23 -080058 REPORTER_ASSERT(reporter, mm);
59 REPORTER_ASSERT(reporter, mm == mipmap);
60 mm->unref();
61 }
62
reed9d93c2e2014-10-08 05:17:12 -070063 check_data(reporter, mipmap, 2, kInCache, kLocked);
64
65 mipmap->unref();
66 // tricky, since technically after this I'm no longer an owner, but since the cache is
67 // local, I know it won't get purged behind my back
68 check_data(reporter, mipmap, 1, kInCache, kNotLocked);
69
70 // find us again
Brian Osman3a160732018-10-15 15:32:06 -040071 mipmap = SkMipMapCache::FindAndRef(desc, cache);
reed9d93c2e2014-10-08 05:17:12 -070072 check_data(reporter, mipmap, 2, kInCache, kLocked);
73
74 cache->purgeAll();
75 check_data(reporter, mipmap, 1, kNotInCache, kLocked);
76
77 mipmap->unref();
78}
79
reed7eeba252015-02-24 13:54:23 -080080static void test_mipmap_notify(skiatest::Reporter* reporter, SkResourceCache* cache) {
81 const int N = 3;
reed6644d932016-06-10 11:41:47 -070082
reed7eeba252015-02-24 13:54:23 -080083 SkBitmap src[N];
Brian Osman3a160732018-10-15 15:32:06 -040084 sk_sp<SkImage> img[N];
85 SkBitmapCacheDesc desc[N];
reed7eeba252015-02-24 13:54:23 -080086 for (int i = 0; i < N; ++i) {
87 src[i].allocN32Pixels(5, 5);
88 src[i].setImmutable();
Brian Osman3a160732018-10-15 15:32:06 -040089 img[i] = SkImage::MakeFromBitmap(src[i]);
Mike Reed64acf4f2019-08-01 15:35:20 -040090 SkMipMapCache::AddAndRef(as_IB(img[i].get()), cache)->unref();
91 desc[i] = SkBitmapCacheDesc::Make(img[i].get());
reed7eeba252015-02-24 13:54:23 -080092 }
93
94 for (int i = 0; i < N; ++i) {
Brian Osman3a160732018-10-15 15:32:06 -040095 const SkMipMap* mipmap = SkMipMapCache::FindAndRef(desc[i], cache);
96 // We're always using a local cache, so we know we won't be purged by other threads
97 REPORTER_ASSERT(reporter, mipmap);
98 SkSafeUnref(mipmap);
99
100 img[i].reset(); // delete the image, which *should not* remove us from the cache
101 mipmap = SkMipMapCache::FindAndRef(desc[i], cache);
102 REPORTER_ASSERT(reporter, mipmap);
reed7eeba252015-02-24 13:54:23 -0800103 SkSafeUnref(mipmap);
104
Brian Osman087c9172018-10-15 10:33:54 -0400105 src[i].reset(); // delete the underlying pixelref, which *should* remove us from the cache
Brian Osman3a160732018-10-15 15:32:06 -0400106 mipmap = SkMipMapCache::FindAndRef(desc[i], cache);
reed7eeba252015-02-24 13:54:23 -0800107 REPORTER_ASSERT(reporter, !mipmap);
108 }
109}
110
Mike Kleinc0bd9f92019-04-23 12:05:21 -0500111#include "src/lazy/SkDiscardableMemoryPool.h"
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800112
Ben Wagnera93a14a2017-08-28 10:34:05 -0400113static SkDiscardableMemoryPool* gPool = nullptr;
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800114static SkDiscardableMemory* pool_factory(size_t bytes) {
115 SkASSERT(gPool);
116 return gPool->create(bytes);
117}
118
119static void testBitmapCache_discarded_bitmap(skiatest::Reporter* reporter, SkResourceCache* cache,
120 SkResourceCache::DiscardableFactory factory) {
reed9d93c2e2014-10-08 05:17:12 -0700121 test_mipmapcache(reporter, cache);
reed7eeba252015-02-24 13:54:23 -0800122 test_mipmap_notify(reporter, cache);
danakj790ffe32014-09-11 10:49:52 -0700123}
fmalita3b0d5322015-09-18 08:07:31 -0700124
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800125DEF_TEST(BitmapCache_discarded_bitmap, reporter) {
126 const size_t byteLimit = 100 * 1024;
127 {
128 SkResourceCache cache(byteLimit);
129 testBitmapCache_discarded_bitmap(reporter, &cache, nullptr);
130 }
131 {
Hal Canary788c3c42017-04-25 08:58:57 -0400132 sk_sp<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Make(byteLimit));
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800133 gPool = pool.get();
134 SkResourceCache::DiscardableFactory factory = pool_factory;
135 SkResourceCache cache(factory);
136 testBitmapCache_discarded_bitmap(reporter, &cache, factory);
137 }
138}
139
fmalita3b0d5322015-09-18 08:07:31 -0700140static void test_discarded_image(skiatest::Reporter* reporter, const SkMatrix& transform,
reed9ce9d672016-03-17 10:51:11 -0700141 sk_sp<SkImage> (*buildImage)()) {
reede8f30622016-03-23 18:59:25 -0700142 auto surface(SkSurface::MakeRasterN32Premul(10, 10));
fmalita3b0d5322015-09-18 08:07:31 -0700143 SkCanvas* canvas = surface->getCanvas();
144
145 // SkBitmapCache is global, so other threads could be evicting our bitmaps. Loop a few times
146 // to mitigate this risk.
147 const unsigned kRepeatCount = 42;
148 for (unsigned i = 0; i < kRepeatCount; ++i) {
149 SkAutoCanvasRestore acr(canvas, true);
150
reed9ce9d672016-03-17 10:51:11 -0700151 sk_sp<SkImage> image(buildImage());
fmalita3b0d5322015-09-18 08:07:31 -0700152
153 // always use high quality to ensure caching when scaled
154 SkPaint paint;
155 paint.setFilterQuality(kHigh_SkFilterQuality);
156
157 // draw the image (with a transform, to tickle different code paths) to ensure
158 // any associated resources get cached
159 canvas->concat(transform);
160 canvas->drawImage(image, 0, 0, &paint);
161
Mike Reed5fa3d6d2017-03-25 09:51:00 -0400162 const auto desc = SkBitmapCacheDesc::Make(image.get());
fmalita3b0d5322015-09-18 08:07:31 -0700163
164 // delete the image
165 image.reset(nullptr);
166
167 // all resources should have been purged
168 SkBitmap result;
Mike Reed5fa3d6d2017-03-25 09:51:00 -0400169 REPORTER_ASSERT(reporter, !SkBitmapCache::Find(desc, &result));
fmalita3b0d5322015-09-18 08:07:31 -0700170 }
171}
172
173
174// Verify that associated bitmap cache entries are purged on SkImage destruction.
175DEF_TEST(BitmapCache_discarded_image, reporter) {
176 // Cache entries associated with SkImages fall into two categories:
177 //
178 // 1) generated image bitmaps (managed by the image cacherator)
179 // 2) scaled/resampled bitmaps (cached when HQ filters are used)
180 //
181 // To exercise the first cache type, we use generated/picture-backed SkImages.
182 // To exercise the latter, we draw scaled bitmap images using HQ filters.
183
184 const SkMatrix xforms[] = {
185 SkMatrix::MakeScale(1, 1),
186 SkMatrix::MakeScale(1.7f, 0.5f),
187 };
188
189 for (size_t i = 0; i < SK_ARRAY_COUNT(xforms); ++i) {
190 test_discarded_image(reporter, xforms[i], []() {
reede8f30622016-03-23 18:59:25 -0700191 auto surface(SkSurface::MakeRasterN32Premul(10, 10));
fmalita3b0d5322015-09-18 08:07:31 -0700192 surface->getCanvas()->clear(SK_ColorCYAN);
reed9ce9d672016-03-17 10:51:11 -0700193 return surface->makeImageSnapshot();
fmalita3b0d5322015-09-18 08:07:31 -0700194 });
195
196 test_discarded_image(reporter, xforms[i], []() {
197 SkPictureRecorder recorder;
198 SkCanvas* canvas = recorder.beginRecording(10, 10);
199 canvas->clear(SK_ColorCYAN);
reedca2622b2016-03-18 07:25:55 -0700200 return SkImage::MakeFromPicture(recorder.finishRecordingAsPicture(),
Brian Osman138ea972016-12-16 11:55:18 -0500201 SkISize::Make(10, 10), nullptr, nullptr,
Matt Sarette94255d2017-01-09 12:38:59 -0500202 SkImage::BitDepth::kU8,
Matt Sarett77a7a1b2017-02-07 13:56:11 -0500203 SkColorSpace::MakeSRGB());
fmalita3b0d5322015-09-18 08:07:31 -0700204 });
205 }
206}
Mike Reed7a542c52017-04-11 12:03:44 -0400207
208///////////////////////////////////////////////////////////////////////////////////////////////////
209
210static void* gTestNamespace;
211
212struct TestKey : SkResourceCache::Key {
213 int32_t fData;
214
215 TestKey(int sharedID, int32_t data) : fData(data) {
216 this->init(&gTestNamespace, sharedID, sizeof(fData));
217 }
218};
219
220struct TestRec : SkResourceCache::Rec {
221 enum {
222 kDidInstall = 1 << 0,
223 };
224
225 TestKey fKey;
226 int* fFlags;
227 bool fCanBePurged;
228
229 TestRec(int sharedID, int32_t data, int* flagPtr) : fKey(sharedID, data), fFlags(flagPtr) {
230 fCanBePurged = false;
231 }
232
233 const Key& getKey() const override { return fKey; }
234 size_t bytesUsed() const override { return 1024; /* just need a value */ }
235 bool canBePurged() override { return fCanBePurged; }
236 void postAddInstall(void*) override {
237 *fFlags |= kDidInstall;
238 }
239 const char* getCategory() const override { return "test-category"; }
240};
241
242static void test_duplicate_add(SkResourceCache* cache, skiatest::Reporter* reporter,
243 bool purgable) {
244 int sharedID = 1;
245 int data = 0;
246
247 int flags0 = 0, flags1 = 0;
248
249 auto rec0 = skstd::make_unique<TestRec>(sharedID, data, &flags0);
250 auto rec1 = skstd::make_unique<TestRec>(sharedID, data, &flags1);
251 SkASSERT(rec0->getKey() == rec1->getKey());
252
253 TestRec* r0 = rec0.get(); // save the bare-pointer since we will release rec0
254 r0->fCanBePurged = purgable;
255
256 REPORTER_ASSERT(reporter, !(flags0 & TestRec::kDidInstall));
257 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
258
259 cache->add(rec0.release(), nullptr);
260 REPORTER_ASSERT(reporter, flags0 & TestRec::kDidInstall);
261 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
262 flags0 = 0; // reset the flag
263
264 cache->add(rec1.release(), nullptr);
265 if (purgable) {
266 // we purged rec0, and did install rec1
267 REPORTER_ASSERT(reporter, !(flags0 & TestRec::kDidInstall));
268 REPORTER_ASSERT(reporter, flags1 & TestRec::kDidInstall);
269 } else {
270 // we re-used rec0 and did not install rec1
271 REPORTER_ASSERT(reporter, flags0 & TestRec::kDidInstall);
272 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
273 r0->fCanBePurged = true; // so we can cleanup the cache
274 }
275}
276
277/*
278 * Test behavior when the same key is added more than once.
279 */
280DEF_TEST(ResourceCache_purge, reporter) {
281 for (bool purgable : { false, true }) {
282 {
283 SkResourceCache cache(1024 * 1024);
284 test_duplicate_add(&cache, reporter, purgable);
285 }
286 {
287 SkResourceCache cache(SkDiscardableMemory::Create);
288 test_duplicate_add(&cache, reporter, purgable);
289 }
290 }
291}