blob: 2e0cf6e461ab06d4e16f5c5c4bc0e50af77ba778 [file] [log] [blame]
Hans Boehm4a8276c2016-06-01 15:29:55 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <utils/StrongPointer.h>
20#include <utils/RefBase.h>
21
22#include <thread>
23#include <atomic>
24#include <sched.h>
25#include <errno.h>
26
27// Enhanced version of StrongPointer_test, but using RefBase underneath.
28
29using namespace android;
30
31static constexpr int NITERS = 1000000;
32
33static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
34
35class Foo : public RefBase {
36public:
37 Foo(bool* deleted_check) : mDeleted(deleted_check) {
38 *mDeleted = false;
39 }
40
41 ~Foo() {
42 *mDeleted = true;
43 }
44private:
45 bool* mDeleted;
46};
47
48TEST(RefBase, StrongMoves) {
49 bool isDeleted;
50 Foo* foo = new Foo(&isDeleted);
51 ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
52 ASSERT_FALSE(isDeleted) << "Already deleted...?";
53 sp<Foo> sp1(foo);
54 wp<Foo> wp1(sp1);
55 ASSERT_EQ(1, foo->getStrongCount());
56 // Weak count includes both strong and weak references.
57 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
58 {
59 sp<Foo> sp2 = std::move(sp1);
60 ASSERT_EQ(1, foo->getStrongCount())
61 << "std::move failed, incremented refcnt";
62 ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
63 // The strong count isn't increasing, let's double check the old object
64 // is properly reset and doesn't early delete
65 sp1 = std::move(sp2);
66 }
67 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
68 {
69 // Now let's double check it deletes on time
70 sp<Foo> sp2 = std::move(sp1);
71 }
72 ASSERT_TRUE(isDeleted) << "foo was leaked!";
73 ASSERT_TRUE(wp1.promote().get() == nullptr);
74}
75
76TEST(RefBase, WeakCopies) {
77 bool isDeleted;
78 Foo* foo = new Foo(&isDeleted);
79 EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
80 ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
81 wp<Foo> wp1(foo);
82 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
83 {
84 wp<Foo> wp2 = wp1;
85 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
86 }
87 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
88 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
89 wp1 = nullptr;
Hans Boehm23c857e2016-08-02 18:39:30 -070090 ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
Hans Boehm4a8276c2016-06-01 15:29:55 -070091}
92
93
94// Set up a situation in which we race with visit2AndRremove() to delete
95// 2 strong references. Bar destructor checks that there are no early
96// deletions and prior updates are visible to destructor.
97class Bar : public RefBase {
98public:
99 Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
100 mDeleteCount(delete_count) {
101 }
102
103 ~Bar() {
104 EXPECT_TRUE(mVisited1);
105 EXPECT_TRUE(mVisited2);
106 (*mDeleteCount)++;
107 }
108 bool mVisited1;
109 bool mVisited2;
110private:
111 std::atomic<int>* mDeleteCount;
112};
113
114static sp<Bar> buffer;
115static std::atomic<bool> bufferFull(false);
116
117// Wait until bufferFull has value val.
118static inline void waitFor(bool val) {
119 while (bufferFull != val) {}
120}
121
122cpu_set_t otherCpus;
123
Hans Boehm23c857e2016-08-02 18:39:30 -0700124// Divide the cpus we're allowed to run on into myCpus and otherCpus.
125// Set origCpus to the processors we were originally allowed to run on.
126// Return false if origCpus doesn't include at least processors 0 and 1.
127static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
128 cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
129 if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
130 return false;
131 }
132 if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
133 return false;
134 }
135 CPU_ZERO(myCpus);
136 CPU_ZERO(otherCpus);
137 CPU_OR(myCpus, myCpus, origCpus);
138 CPU_OR(otherCpus, otherCpus, origCpus);
139 for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
140 // I get the even cores, the other thread gets the odd ones.
141 if (i & 1) {
142 CPU_CLR(i, myCpus);
143 } else {
144 CPU_CLR(i, otherCpus);
145 }
146 }
147 return true;
148}
149
Hans Boehm4a8276c2016-06-01 15:29:55 -0700150static void visit2AndRemove() {
Hans Boehm4a8276c2016-06-01 15:29:55 -0700151 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
152 FAIL() << "setaffinity returned:" << errno;
153 }
154 for (int i = 0; i < NITERS; ++i) {
155 waitFor(true);
156 buffer->mVisited2 = true;
157 buffer = nullptr;
158 bufferFull = false;
159 }
160}
161
162TEST(RefBase, RacingDestructors) {
163 cpu_set_t origCpus;
164 cpu_set_t myCpus;
165 // Restrict us and the helper thread to disjoint cpu sets.
166 // This prevents us from getting scheduled against each other,
Hans Boehm23c857e2016-08-02 18:39:30 -0700167 // which would be atrociously slow.
168 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
Hans Boehm4a8276c2016-06-01 15:29:55 -0700169 std::thread t(visit2AndRemove);
170 std::atomic<int> deleteCount(0);
Hans Boehm4a8276c2016-06-01 15:29:55 -0700171 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
172 FAIL() << "setaffinity returned:" << errno;
173 }
174 for (int i = 0; i < NITERS; ++i) {
175 waitFor(false);
176 Bar* bar = new Bar(&deleteCount);
177 sp<Bar> sp3(bar);
178 buffer = sp3;
179 bufferFull = true;
180 ASSERT_TRUE(bar->getStrongCount() >= 1);
181 // Weak count includes strong count.
182 ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
183 sp3->mVisited1 = true;
184 sp3 = nullptr;
185 }
186 t.join();
187 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
188 FAIL();
189 }
190 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
191 } // Otherwise this is slow and probably pointless on a uniprocessor.
192}
Hans Boehm23c857e2016-08-02 18:39:30 -0700193
194static wp<Bar> wpBuffer;
195static std::atomic<bool> wpBufferFull(false);
196
197// Wait until wpBufferFull has value val.
198static inline void wpWaitFor(bool val) {
199 while (wpBufferFull != val) {}
200}
201
202static void visit3AndRemove() {
203 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
204 FAIL() << "setaffinity returned:" << errno;
205 }
206 for (int i = 0; i < NITERS; ++i) {
207 wpWaitFor(true);
208 {
209 sp<Bar> sp1 = wpBuffer.promote();
210 // We implicitly check that sp1 != NULL
211 sp1->mVisited2 = true;
212 }
213 wpBuffer = nullptr;
214 wpBufferFull = false;
215 }
216}
217
218TEST(RefBase, RacingPromotions) {
219 cpu_set_t origCpus;
220 cpu_set_t myCpus;
221 // Restrict us and the helper thread to disjoint cpu sets.
222 // This prevents us from getting scheduled against each other,
223 // which would be atrociously slow.
224 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
225 std::thread t(visit3AndRemove);
226 std::atomic<int> deleteCount(0);
227 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
228 FAIL() << "setaffinity returned:" << errno;
229 }
230 for (int i = 0; i < NITERS; ++i) {
231 Bar* bar = new Bar(&deleteCount);
232 wp<Bar> wp1(bar);
233 bar->mVisited1 = true;
234 if (i % (NITERS / 10) == 0) {
235 // Do this rarely, since it generates a log message.
236 wp1 = nullptr; // No longer destroys the object.
237 wp1 = bar;
238 }
239 wpBuffer = wp1;
240 ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
241 wpBufferFull = true;
242 // Promotion races with that in visit3AndRemove.
243 // This may or may not succeed, but it shouldn't interfere with
244 // the concurrent one.
245 sp<Bar> sp1 = wp1.promote();
246 wpWaitFor(false); // Waits for other thread to drop strong pointer.
247 sp1 = nullptr;
248 // No strong pointers here.
249 sp1 = wp1.promote();
250 ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
251 }
252 t.join();
253 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
254 FAIL();
255 }
256 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
257 } // Otherwise this is slow and probably pointless on a uniprocessor.
258}