blob: c65ea9e7f17282d0e8fdd457b3a65c7fcff2a70d [file] [log] [blame]
bungeman@google.coma02bc152012-05-16 18:21:56 +00001/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkWeakRefCnt_DEFINED
9#define SkWeakRefCnt_DEFINED
10
11#include "SkRefCnt.h"
bungeman2c4bd072016-04-08 06:58:51 -070012#include <atomic>
bungeman@google.coma02bc152012-05-16 18:21:56 +000013
14/** \class SkWeakRefCnt
15
16 SkWeakRefCnt is the base class for objects that may be shared by multiple
17 objects. When an existing strong owner wants to share a reference, it calls
18 ref(). When a strong owner wants to release its reference, it calls
19 unref(). When the shared object's strong reference count goes to zero as
20 the result of an unref() call, its (virtual) weak_dispose method is called.
21 It is an error for the destructor to be called explicitly (or via the
22 object going out of scope on the stack or calling delete) if
23 getRefCnt() > 1.
24
25 In addition to strong ownership, an owner may instead obtain a weak
bungeman@google.com1ad75a12013-12-02 19:12:04 +000026 reference by calling weak_ref(). A call to weak_ref() must be balanced by a
bungeman@google.coma02bc152012-05-16 18:21:56 +000027 call to weak_unref(). To obtain a strong reference from a weak reference,
28 call try_ref(). If try_ref() returns true, the owner's pointer is now also
29 a strong reference on which unref() must be called. Note that this does not
30 affect the original weak reference, weak_unref() must still be called. When
31 the weak reference count goes to zero, the object is deleted. While the
32 weak reference count is positive and the strong reference count is zero the
33 object still exists, but will be in the disposed state. It is up to the
34 object to define what this means.
35
36 Note that a strong reference implicitly implies a weak reference. As a
37 result, it is allowable for the owner of a strong ref to call try_ref().
38 This will have the same effect as calling ref(), but may be more expensive.
39
40 Example:
41
42 SkWeakRefCnt myRef = strongRef.weak_ref();
43 ... // strongRef.unref() may or may not be called
44 if (myRef.try_ref()) {
45 ... // use myRef
46 myRef.unref();
47 } else {
48 // myRef is in the disposed state
49 }
50 myRef.weak_unref();
51*/
52class SK_API SkWeakRefCnt : public SkRefCnt {
53public:
54 /** Default construct, initializing the reference counts to 1.
55 The strong references collectively hold one weak reference. When the
56 strong reference count goes to zero, the collectively held weak
57 reference is released.
58 */
59 SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
60
61 /** Destruct, asserting that the weak reference count is 1.
62 */
Brian Salomond3b65972017-03-22 12:05:03 -040063 ~SkWeakRefCnt() override {
Mike Klein874a62a2014-07-09 09:04:07 -040064#ifdef SK_DEBUG
bungeman2c4bd072016-04-08 06:58:51 -070065 SkASSERT(getWeakCnt() == 1);
66 fWeakCnt.store(0, std::memory_order_relaxed);
Mike Klein874a62a2014-07-09 09:04:07 -040067#endif
bungeman@google.coma02bc152012-05-16 18:21:56 +000068 }
69
Mike Klein874a62a2014-07-09 09:04:07 -040070#ifdef SK_DEBUG
bungeman2c4bd072016-04-08 06:58:51 -070071 /** Return the weak reference count. */
72 int32_t getWeakCnt() const {
73 return fWeakCnt.load(std::memory_order_relaxed);
74 }
75
bungeman@google.coma02bc152012-05-16 18:21:56 +000076 void validate() const {
robertphillips@google.com03087072013-10-02 16:42:21 +000077 this->INHERITED::validate();
bungeman2c4bd072016-04-08 06:58:51 -070078 SkASSERT(getWeakCnt() > 0);
bungeman@google.coma02bc152012-05-16 18:21:56 +000079 }
robertphillips@google.com03087072013-10-02 16:42:21 +000080#endif
bungeman@google.coma02bc152012-05-16 18:21:56 +000081
bungeman2c4bd072016-04-08 06:58:51 -070082private:
83 /** If fRefCnt is 0, returns 0.
84 * Otherwise increments fRefCnt, acquires, and returns the old value.
85 */
86 int32_t atomic_conditional_acquire_strong_ref() const {
87 int32_t prev = fRefCnt.load(std::memory_order_relaxed);
88 do {
89 if (0 == prev) {
90 break;
91 }
92 } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
93 std::memory_order_relaxed));
94 return prev;
95 }
96
97public:
bungeman@google.coma02bc152012-05-16 18:21:56 +000098 /** Creates a strong reference from a weak reference, if possible. The
99 caller must already be an owner. If try_ref() returns true the owner
100 is in posession of an additional strong reference. Both the original
101 reference and new reference must be properly unreferenced. If try_ref()
102 returns false, no strong reference could be created and the owner's
103 reference is in the same state as before the call.
104 */
105 bool SK_WARN_UNUSED_RESULT try_ref() const {
bungeman2c4bd072016-04-08 06:58:51 -0700106 if (atomic_conditional_acquire_strong_ref() != 0) {
bungeman@google.comd9947f62013-12-18 15:27:39 +0000107 // Acquire barrier (L/SL), if not provided above.
bungeman@google.coma02bc152012-05-16 18:21:56 +0000108 // Prevents subsequent code from happening before the increment.
bungeman@google.coma02bc152012-05-16 18:21:56 +0000109 return true;
110 }
111 return false;
112 }
113
114 /** Increment the weak reference count. Must be balanced by a call to
115 weak_unref().
116 */
117 void weak_ref() const {
bungeman2c4bd072016-04-08 06:58:51 -0700118 SkASSERT(getRefCnt() > 0);
119 SkASSERT(getWeakCnt() > 0);
120 // No barrier required.
121 (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
bungeman@google.coma02bc152012-05-16 18:21:56 +0000122 }
123
124 /** Decrement the weak reference count. If the weak reference count is 1
125 before the decrement, then call delete on the object. Note that if this
126 is the case, then the object needs to have been allocated via new, and
127 not on the stack.
128 */
129 void weak_unref() const {
bungeman2c4bd072016-04-08 06:58:51 -0700130 SkASSERT(getWeakCnt() > 0);
131 // A release here acts in place of all releases we "should" have been doing in ref().
132 if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
133 // Like try_ref(), the acquire is only needed on success, to make sure
134 // code in internal_dispose() doesn't happen before the decrement.
bungeman@google.coma02bc152012-05-16 18:21:56 +0000135#ifdef SK_DEBUG
136 // so our destructor won't complain
bungeman2c4bd072016-04-08 06:58:51 -0700137 fWeakCnt.store(1, std::memory_order_relaxed);
bungeman@google.coma02bc152012-05-16 18:21:56 +0000138#endif
bungeman@google.com1ad75a12013-12-02 19:12:04 +0000139 this->INHERITED::internal_dispose();
bungeman@google.coma02bc152012-05-16 18:21:56 +0000140 }
141 }
142
143 /** Returns true if there are no strong references to the object. When this
144 is the case all future calls to try_ref() will return false.
145 */
146 bool weak_expired() const {
bungeman2c4bd072016-04-08 06:58:51 -0700147 return fRefCnt.load(std::memory_order_relaxed) == 0;
bungeman@google.coma02bc152012-05-16 18:21:56 +0000148 }
149
150protected:
151 /** Called when the strong reference count goes to zero. This allows the
152 object to free any resources it may be holding. Weak references may
153 still exist and their level of allowed access to the object is defined
154 by the object's class.
155 */
156 virtual void weak_dispose() const {
157 }
158
159private:
160 /** Called when the strong reference count goes to zero. Calls weak_dispose
161 on the object and releases the implicit weak reference held
162 collectively by the strong references.
163 */
mtklein36352bf2015-03-25 18:17:31 -0700164 void internal_dispose() const override {
bungeman@google.coma02bc152012-05-16 18:21:56 +0000165 weak_dispose();
166 weak_unref();
167 }
168
169 /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
bungeman2c4bd072016-04-08 06:58:51 -0700170 mutable std::atomic<int32_t> fWeakCnt;
robertphillips@google.com15e9d3e2012-06-21 20:25:03 +0000171
172 typedef SkRefCnt INHERITED;
bungeman@google.coma02bc152012-05-16 18:21:56 +0000173};
174
175#endif