blob: 89339cd6be2898f82435c4eef60e74213191d3f2 [file] [log] [blame]
halcanary805ef152014-07-17 06:58:01 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
reed30ad5302014-09-16 10:39:55 -07007
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "include/core/SkCanvas.h"
9#include "include/core/SkGraphics.h"
10#include "include/core/SkPicture.h"
11#include "include/core/SkPictureRecorder.h"
12#include "include/core/SkSurface.h"
13#include "src/core/SkBitmapCache.h"
Mike Reed13711eb2020-07-14 17:16:32 -040014#include "src/core/SkMipmap.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050015#include "src/core/SkResourceCache.h"
Mike Reed64acf4f2019-08-01 15:35:20 -040016#include "src/image/SkImage_Base.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050017#include "src/lazy/SkDiscardableMemoryPool.h"
18#include "tests/Test.h"
halcanary805ef152014-07-17 06:58:01 -070019
reed30ad5302014-09-16 10:39:55 -070020////////////////////////////////////////////////////////////////////////////////////////
mtklein26abcf12014-09-04 10:50:53 -070021
reed9d93c2e2014-10-08 05:17:12 -070022enum LockedState {
23 kNotLocked,
24 kLocked,
25};
26
27enum CachedState {
28 kNotInCache,
29 kInCache,
30};
31
32static void check_data(skiatest::Reporter* reporter, const SkCachedData* data,
33 int refcnt, CachedState cacheState, LockedState lockedState) {
34 REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
35 REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
halcanary96fcdcc2015-08-27 07:41:13 -070036 bool isLocked = (data->data() != nullptr);
reed9d93c2e2014-10-08 05:17:12 -070037 REPORTER_ASSERT(reporter, isLocked == (lockedState == kLocked));
38}
39
40static void test_mipmapcache(skiatest::Reporter* reporter, SkResourceCache* cache) {
41 cache->purgeAll();
42
43 SkBitmap src;
44 src.allocN32Pixels(5, 5);
45 src.setImmutable();
Brian Osman3a160732018-10-15 15:32:06 -040046 sk_sp<SkImage> img = SkImage::MakeFromBitmap(src);
Mike Reed64acf4f2019-08-01 15:35:20 -040047 const auto desc = SkBitmapCacheDesc::Make(img.get());
reed9d93c2e2014-10-08 05:17:12 -070048
Mike Reed13711eb2020-07-14 17:16:32 -040049 const SkMipmap* mipmap = SkMipmapCache::FindAndRef(desc, cache);
halcanary96fcdcc2015-08-27 07:41:13 -070050 REPORTER_ASSERT(reporter, nullptr == mipmap);
reed9d93c2e2014-10-08 05:17:12 -070051
Mike Reed13711eb2020-07-14 17:16:32 -040052 mipmap = SkMipmapCache::AddAndRef(as_IB(img.get()), cache);
reed9d93c2e2014-10-08 05:17:12 -070053 REPORTER_ASSERT(reporter, mipmap);
reed7eeba252015-02-24 13:54:23 -080054
55 {
Mike Reed13711eb2020-07-14 17:16:32 -040056 const SkMipmap* mm = SkMipmapCache::FindAndRef(desc, cache);
reed7eeba252015-02-24 13:54:23 -080057 REPORTER_ASSERT(reporter, mm);
58 REPORTER_ASSERT(reporter, mm == mipmap);
59 mm->unref();
60 }
61
reed9d93c2e2014-10-08 05:17:12 -070062 check_data(reporter, mipmap, 2, kInCache, kLocked);
63
64 mipmap->unref();
65 // tricky, since technically after this I'm no longer an owner, but since the cache is
66 // local, I know it won't get purged behind my back
67 check_data(reporter, mipmap, 1, kInCache, kNotLocked);
68
69 // find us again
Mike Reed13711eb2020-07-14 17:16:32 -040070 mipmap = SkMipmapCache::FindAndRef(desc, cache);
reed9d93c2e2014-10-08 05:17:12 -070071 check_data(reporter, mipmap, 2, kInCache, kLocked);
72
73 cache->purgeAll();
74 check_data(reporter, mipmap, 1, kNotInCache, kLocked);
75
76 mipmap->unref();
77}
78
reed7eeba252015-02-24 13:54:23 -080079static void test_mipmap_notify(skiatest::Reporter* reporter, SkResourceCache* cache) {
80 const int N = 3;
reed6644d932016-06-10 11:41:47 -070081
reed7eeba252015-02-24 13:54:23 -080082 SkBitmap src[N];
Brian Osman3a160732018-10-15 15:32:06 -040083 sk_sp<SkImage> img[N];
84 SkBitmapCacheDesc desc[N];
reed7eeba252015-02-24 13:54:23 -080085 for (int i = 0; i < N; ++i) {
86 src[i].allocN32Pixels(5, 5);
87 src[i].setImmutable();
Brian Osman3a160732018-10-15 15:32:06 -040088 img[i] = SkImage::MakeFromBitmap(src[i]);
Mike Reed13711eb2020-07-14 17:16:32 -040089 SkMipmapCache::AddAndRef(as_IB(img[i].get()), cache)->unref();
Mike Reed64acf4f2019-08-01 15:35:20 -040090 desc[i] = SkBitmapCacheDesc::Make(img[i].get());
reed7eeba252015-02-24 13:54:23 -080091 }
92
93 for (int i = 0; i < N; ++i) {
Mike Reed13711eb2020-07-14 17:16:32 -040094 const SkMipmap* mipmap = SkMipmapCache::FindAndRef(desc[i], cache);
Brian Osman3a160732018-10-15 15:32:06 -040095 // We're always using a local cache, so we know we won't be purged by other threads
96 REPORTER_ASSERT(reporter, mipmap);
97 SkSafeUnref(mipmap);
98
99 img[i].reset(); // delete the image, which *should not* remove us from the cache
Mike Reed13711eb2020-07-14 17:16:32 -0400100 mipmap = SkMipmapCache::FindAndRef(desc[i], cache);
Brian Osman3a160732018-10-15 15:32:06 -0400101 REPORTER_ASSERT(reporter, mipmap);
reed7eeba252015-02-24 13:54:23 -0800102 SkSafeUnref(mipmap);
103
Brian Osman087c9172018-10-15 10:33:54 -0400104 src[i].reset(); // delete the underlying pixelref, which *should* remove us from the cache
Mike Reed13711eb2020-07-14 17:16:32 -0400105 mipmap = SkMipmapCache::FindAndRef(desc[i], cache);
reed7eeba252015-02-24 13:54:23 -0800106 REPORTER_ASSERT(reporter, !mipmap);
107 }
108}
109
Mike Kleinc0bd9f92019-04-23 12:05:21 -0500110#include "src/lazy/SkDiscardableMemoryPool.h"
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800111
Ben Wagnera93a14a2017-08-28 10:34:05 -0400112static SkDiscardableMemoryPool* gPool = nullptr;
John Stilesefc17ce2020-08-07 12:14:14 -0400113static int gFactoryCalls = 0;
114
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800115static SkDiscardableMemory* pool_factory(size_t bytes) {
116 SkASSERT(gPool);
John Stilesefc17ce2020-08-07 12:14:14 -0400117 gFactoryCalls++;
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800118 return gPool->create(bytes);
119}
120
121static void testBitmapCache_discarded_bitmap(skiatest::Reporter* reporter, SkResourceCache* cache,
122 SkResourceCache::DiscardableFactory factory) {
reed9d93c2e2014-10-08 05:17:12 -0700123 test_mipmapcache(reporter, cache);
reed7eeba252015-02-24 13:54:23 -0800124 test_mipmap_notify(reporter, cache);
danakj790ffe32014-09-11 10:49:52 -0700125}
fmalita3b0d5322015-09-18 08:07:31 -0700126
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800127DEF_TEST(BitmapCache_discarded_bitmap, reporter) {
128 const size_t byteLimit = 100 * 1024;
129 {
130 SkResourceCache cache(byteLimit);
131 testBitmapCache_discarded_bitmap(reporter, &cache, nullptr);
132 }
133 {
Hal Canary788c3c42017-04-25 08:58:57 -0400134 sk_sp<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Make(byteLimit));
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800135 gPool = pool.get();
136 SkResourceCache::DiscardableFactory factory = pool_factory;
137 SkResourceCache cache(factory);
138 testBitmapCache_discarded_bitmap(reporter, &cache, factory);
139 }
John Stilesefc17ce2020-08-07 12:14:14 -0400140 REPORTER_ASSERT(reporter, gFactoryCalls > 0);
aleksandar.stojiljkovic07e26922015-11-10 04:55:15 -0800141}
142
fmalita3b0d5322015-09-18 08:07:31 -0700143static void test_discarded_image(skiatest::Reporter* reporter, const SkMatrix& transform,
reed9ce9d672016-03-17 10:51:11 -0700144 sk_sp<SkImage> (*buildImage)()) {
reede8f30622016-03-23 18:59:25 -0700145 auto surface(SkSurface::MakeRasterN32Premul(10, 10));
fmalita3b0d5322015-09-18 08:07:31 -0700146 SkCanvas* canvas = surface->getCanvas();
147
148 // SkBitmapCache is global, so other threads could be evicting our bitmaps. Loop a few times
149 // to mitigate this risk.
150 const unsigned kRepeatCount = 42;
151 for (unsigned i = 0; i < kRepeatCount; ++i) {
152 SkAutoCanvasRestore acr(canvas, true);
153
reed9ce9d672016-03-17 10:51:11 -0700154 sk_sp<SkImage> image(buildImage());
fmalita3b0d5322015-09-18 08:07:31 -0700155
156 // always use high quality to ensure caching when scaled
157 SkPaint paint;
158 paint.setFilterQuality(kHigh_SkFilterQuality);
159
160 // draw the image (with a transform, to tickle different code paths) to ensure
161 // any associated resources get cached
162 canvas->concat(transform);
163 canvas->drawImage(image, 0, 0, &paint);
164
Mike Reed5fa3d6d2017-03-25 09:51:00 -0400165 const auto desc = SkBitmapCacheDesc::Make(image.get());
fmalita3b0d5322015-09-18 08:07:31 -0700166
167 // delete the image
168 image.reset(nullptr);
169
170 // all resources should have been purged
171 SkBitmap result;
Mike Reed5fa3d6d2017-03-25 09:51:00 -0400172 REPORTER_ASSERT(reporter, !SkBitmapCache::Find(desc, &result));
fmalita3b0d5322015-09-18 08:07:31 -0700173 }
174}
175
176
177// Verify that associated bitmap cache entries are purged on SkImage destruction.
178DEF_TEST(BitmapCache_discarded_image, reporter) {
179 // Cache entries associated with SkImages fall into two categories:
180 //
181 // 1) generated image bitmaps (managed by the image cacherator)
182 // 2) scaled/resampled bitmaps (cached when HQ filters are used)
183 //
184 // To exercise the first cache type, we use generated/picture-backed SkImages.
185 // To exercise the latter, we draw scaled bitmap images using HQ filters.
186
187 const SkMatrix xforms[] = {
Mike Reed1f607332020-05-21 12:11:27 -0400188 SkMatrix::Scale(1, 1),
189 SkMatrix::Scale(1.7f, 0.5f),
fmalita3b0d5322015-09-18 08:07:31 -0700190 };
191
192 for (size_t i = 0; i < SK_ARRAY_COUNT(xforms); ++i) {
193 test_discarded_image(reporter, xforms[i], []() {
reede8f30622016-03-23 18:59:25 -0700194 auto surface(SkSurface::MakeRasterN32Premul(10, 10));
fmalita3b0d5322015-09-18 08:07:31 -0700195 surface->getCanvas()->clear(SK_ColorCYAN);
reed9ce9d672016-03-17 10:51:11 -0700196 return surface->makeImageSnapshot();
fmalita3b0d5322015-09-18 08:07:31 -0700197 });
198
199 test_discarded_image(reporter, xforms[i], []() {
200 SkPictureRecorder recorder;
201 SkCanvas* canvas = recorder.beginRecording(10, 10);
202 canvas->clear(SK_ColorCYAN);
reedca2622b2016-03-18 07:25:55 -0700203 return SkImage::MakeFromPicture(recorder.finishRecordingAsPicture(),
Brian Osman138ea972016-12-16 11:55:18 -0500204 SkISize::Make(10, 10), nullptr, nullptr,
Matt Sarette94255d2017-01-09 12:38:59 -0500205 SkImage::BitDepth::kU8,
Matt Sarett77a7a1b2017-02-07 13:56:11 -0500206 SkColorSpace::MakeSRGB());
fmalita3b0d5322015-09-18 08:07:31 -0700207 });
208 }
209}
Mike Reed7a542c52017-04-11 12:03:44 -0400210
211///////////////////////////////////////////////////////////////////////////////////////////////////
212
213static void* gTestNamespace;
214
215struct TestKey : SkResourceCache::Key {
216 int32_t fData;
217
218 TestKey(int sharedID, int32_t data) : fData(data) {
219 this->init(&gTestNamespace, sharedID, sizeof(fData));
220 }
221};
222
223struct TestRec : SkResourceCache::Rec {
224 enum {
225 kDidInstall = 1 << 0,
226 };
227
228 TestKey fKey;
229 int* fFlags;
230 bool fCanBePurged;
231
232 TestRec(int sharedID, int32_t data, int* flagPtr) : fKey(sharedID, data), fFlags(flagPtr) {
233 fCanBePurged = false;
234 }
235
236 const Key& getKey() const override { return fKey; }
237 size_t bytesUsed() const override { return 1024; /* just need a value */ }
238 bool canBePurged() override { return fCanBePurged; }
239 void postAddInstall(void*) override {
240 *fFlags |= kDidInstall;
241 }
242 const char* getCategory() const override { return "test-category"; }
243};
244
245static void test_duplicate_add(SkResourceCache* cache, skiatest::Reporter* reporter,
246 bool purgable) {
247 int sharedID = 1;
248 int data = 0;
249
250 int flags0 = 0, flags1 = 0;
251
Mike Kleinf46d5ca2019-12-11 10:45:01 -0500252 auto rec0 = std::make_unique<TestRec>(sharedID, data, &flags0);
253 auto rec1 = std::make_unique<TestRec>(sharedID, data, &flags1);
Mike Reed7a542c52017-04-11 12:03:44 -0400254 SkASSERT(rec0->getKey() == rec1->getKey());
255
256 TestRec* r0 = rec0.get(); // save the bare-pointer since we will release rec0
257 r0->fCanBePurged = purgable;
258
259 REPORTER_ASSERT(reporter, !(flags0 & TestRec::kDidInstall));
260 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
261
262 cache->add(rec0.release(), nullptr);
263 REPORTER_ASSERT(reporter, flags0 & TestRec::kDidInstall);
264 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
265 flags0 = 0; // reset the flag
266
267 cache->add(rec1.release(), nullptr);
268 if (purgable) {
269 // we purged rec0, and did install rec1
270 REPORTER_ASSERT(reporter, !(flags0 & TestRec::kDidInstall));
271 REPORTER_ASSERT(reporter, flags1 & TestRec::kDidInstall);
272 } else {
273 // we re-used rec0 and did not install rec1
274 REPORTER_ASSERT(reporter, flags0 & TestRec::kDidInstall);
275 REPORTER_ASSERT(reporter, !(flags1 & TestRec::kDidInstall));
276 r0->fCanBePurged = true; // so we can cleanup the cache
277 }
278}
279
280/*
281 * Test behavior when the same key is added more than once.
282 */
283DEF_TEST(ResourceCache_purge, reporter) {
284 for (bool purgable : { false, true }) {
285 {
286 SkResourceCache cache(1024 * 1024);
287 test_duplicate_add(&cache, reporter, purgable);
288 }
289 {
290 SkResourceCache cache(SkDiscardableMemory::Create);
291 test_duplicate_add(&cache, reporter, purgable);
292 }
293 }
294}