bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkWeakRefCnt_DEFINED |
| 9 | #define SkWeakRefCnt_DEFINED |
| 10 | |
| 11 | #include "SkRefCnt.h" |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 12 | #include <atomic> |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 13 | |
| 14 | /** \class SkWeakRefCnt |
| 15 | |
| 16 | SkWeakRefCnt is the base class for objects that may be shared by multiple |
| 17 | objects. When an existing strong owner wants to share a reference, it calls |
| 18 | ref(). When a strong owner wants to release its reference, it calls |
| 19 | unref(). When the shared object's strong reference count goes to zero as |
| 20 | the result of an unref() call, its (virtual) weak_dispose method is called. |
| 21 | It is an error for the destructor to be called explicitly (or via the |
| 22 | object going out of scope on the stack or calling delete) if |
| 23 | getRefCnt() > 1. |
| 24 | |
| 25 | In addition to strong ownership, an owner may instead obtain a weak |
bungeman@google.com | 1ad75a1 | 2013-12-02 19:12:04 +0000 | [diff] [blame] | 26 | reference by calling weak_ref(). A call to weak_ref() must be balanced by a |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 27 | call to weak_unref(). To obtain a strong reference from a weak reference, |
| 28 | call try_ref(). If try_ref() returns true, the owner's pointer is now also |
| 29 | a strong reference on which unref() must be called. Note that this does not |
| 30 | affect the original weak reference, weak_unref() must still be called. When |
| 31 | the weak reference count goes to zero, the object is deleted. While the |
| 32 | weak reference count is positive and the strong reference count is zero the |
| 33 | object still exists, but will be in the disposed state. It is up to the |
| 34 | object to define what this means. |
| 35 | |
| 36 | Note that a strong reference implicitly implies a weak reference. As a |
| 37 | result, it is allowable for the owner of a strong ref to call try_ref(). |
| 38 | This will have the same effect as calling ref(), but may be more expensive. |
| 39 | |
| 40 | Example: |
| 41 | |
| 42 | SkWeakRefCnt myRef = strongRef.weak_ref(); |
| 43 | ... // strongRef.unref() may or may not be called |
| 44 | if (myRef.try_ref()) { |
| 45 | ... // use myRef |
| 46 | myRef.unref(); |
| 47 | } else { |
| 48 | // myRef is in the disposed state |
| 49 | } |
| 50 | myRef.weak_unref(); |
| 51 | */ |
| 52 | class SK_API SkWeakRefCnt : public SkRefCnt { |
| 53 | public: |
| 54 | /** Default construct, initializing the reference counts to 1. |
| 55 | The strong references collectively hold one weak reference. When the |
| 56 | strong reference count goes to zero, the collectively held weak |
| 57 | reference is released. |
| 58 | */ |
| 59 | SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {} |
| 60 | |
| 61 | /** Destruct, asserting that the weak reference count is 1. |
| 62 | */ |
Brian Salomon | d3b6597 | 2017-03-22 12:05:03 -0400 | [diff] [blame] | 63 | ~SkWeakRefCnt() override { |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 64 | #ifdef SK_DEBUG |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 65 | SkASSERT(getWeakCnt() == 1); |
| 66 | fWeakCnt.store(0, std::memory_order_relaxed); |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 67 | #endif |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 68 | } |
| 69 | |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 70 | #ifdef SK_DEBUG |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 71 | /** Return the weak reference count. */ |
| 72 | int32_t getWeakCnt() const { |
| 73 | return fWeakCnt.load(std::memory_order_relaxed); |
| 74 | } |
| 75 | |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 76 | void validate() const { |
robertphillips@google.com | 0308707 | 2013-10-02 16:42:21 +0000 | [diff] [blame] | 77 | this->INHERITED::validate(); |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 78 | SkASSERT(getWeakCnt() > 0); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 79 | } |
robertphillips@google.com | 0308707 | 2013-10-02 16:42:21 +0000 | [diff] [blame] | 80 | #endif |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 81 | |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 82 | private: |
| 83 | /** If fRefCnt is 0, returns 0. |
| 84 | * Otherwise increments fRefCnt, acquires, and returns the old value. |
| 85 | */ |
| 86 | int32_t atomic_conditional_acquire_strong_ref() const { |
| 87 | int32_t prev = fRefCnt.load(std::memory_order_relaxed); |
| 88 | do { |
| 89 | if (0 == prev) { |
| 90 | break; |
| 91 | } |
| 92 | } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire, |
| 93 | std::memory_order_relaxed)); |
| 94 | return prev; |
| 95 | } |
| 96 | |
| 97 | public: |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 98 | /** Creates a strong reference from a weak reference, if possible. The |
| 99 | caller must already be an owner. If try_ref() returns true the owner |
| 100 | is in posession of an additional strong reference. Both the original |
| 101 | reference and new reference must be properly unreferenced. If try_ref() |
| 102 | returns false, no strong reference could be created and the owner's |
| 103 | reference is in the same state as before the call. |
| 104 | */ |
| 105 | bool SK_WARN_UNUSED_RESULT try_ref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 106 | if (atomic_conditional_acquire_strong_ref() != 0) { |
bungeman@google.com | d9947f6 | 2013-12-18 15:27:39 +0000 | [diff] [blame] | 107 | // Acquire barrier (L/SL), if not provided above. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 108 | // Prevents subsequent code from happening before the increment. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 109 | return true; |
| 110 | } |
| 111 | return false; |
| 112 | } |
| 113 | |
| 114 | /** Increment the weak reference count. Must be balanced by a call to |
| 115 | weak_unref(). |
| 116 | */ |
| 117 | void weak_ref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 118 | SkASSERT(getRefCnt() > 0); |
| 119 | SkASSERT(getWeakCnt() > 0); |
| 120 | // No barrier required. |
| 121 | (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | /** Decrement the weak reference count. If the weak reference count is 1 |
| 125 | before the decrement, then call delete on the object. Note that if this |
| 126 | is the case, then the object needs to have been allocated via new, and |
| 127 | not on the stack. |
| 128 | */ |
| 129 | void weak_unref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 130 | SkASSERT(getWeakCnt() > 0); |
| 131 | // A release here acts in place of all releases we "should" have been doing in ref(). |
| 132 | if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) { |
| 133 | // Like try_ref(), the acquire is only needed on success, to make sure |
| 134 | // code in internal_dispose() doesn't happen before the decrement. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 135 | #ifdef SK_DEBUG |
| 136 | // so our destructor won't complain |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 137 | fWeakCnt.store(1, std::memory_order_relaxed); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 138 | #endif |
bungeman@google.com | 1ad75a1 | 2013-12-02 19:12:04 +0000 | [diff] [blame] | 139 | this->INHERITED::internal_dispose(); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 140 | } |
| 141 | } |
| 142 | |
| 143 | /** Returns true if there are no strong references to the object. When this |
| 144 | is the case all future calls to try_ref() will return false. |
| 145 | */ |
| 146 | bool weak_expired() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 147 | return fRefCnt.load(std::memory_order_relaxed) == 0; |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | protected: |
| 151 | /** Called when the strong reference count goes to zero. This allows the |
| 152 | object to free any resources it may be holding. Weak references may |
| 153 | still exist and their level of allowed access to the object is defined |
| 154 | by the object's class. |
| 155 | */ |
| 156 | virtual void weak_dispose() const { |
| 157 | } |
| 158 | |
| 159 | private: |
| 160 | /** Called when the strong reference count goes to zero. Calls weak_dispose |
| 161 | on the object and releases the implicit weak reference held |
| 162 | collectively by the strong references. |
| 163 | */ |
mtklein | 36352bf | 2015-03-25 18:17:31 -0700 | [diff] [blame] | 164 | void internal_dispose() const override { |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 165 | weak_dispose(); |
| 166 | weak_unref(); |
| 167 | } |
| 168 | |
| 169 | /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 170 | mutable std::atomic<int32_t> fWeakCnt; |
robertphillips@google.com | 15e9d3e | 2012-06-21 20:25:03 +0000 | [diff] [blame] | 171 | |
| 172 | typedef SkRefCnt INHERITED; |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 173 | }; |
| 174 | |
| 175 | #endif |