blob: c0bd5722c997e29d36a2771f18d4feb9b44c5a74 [file] [log] [blame]
Maarten Lankhorst786d7252013-06-27 13:48:16 +02001/*
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +02002 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
Maarten Lankhorst786d7252013-06-27 13:48:16 +02003 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020040
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +020041struct lock_class_key reservation_seqcount_class;
42EXPORT_SYMBOL(reservation_seqcount_class);
43
44const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020046/*
47 * Reserve space to add a shared fence to a reservation_object,
48 * must be called with obj->lock held.
49 */
50int reservation_object_reserve_shared(struct reservation_object *obj)
51{
52 struct reservation_object_list *fobj, *old;
53 u32 max;
54
55 old = reservation_object_get_list(obj);
56
57 if (old && old->shared_max) {
58 if (old->shared_count < old->shared_max) {
59 /* perform an in-place update */
60 kfree(obj->staged);
61 obj->staged = NULL;
62 return 0;
63 } else
64 max = old->shared_max * 2;
65 } else
66 max = 4;
67
68 /*
69 * resize obj->staged or allocate if it doesn't exist,
70 * noop if already correct size
71 */
72 fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
73 GFP_KERNEL);
74 if (!fobj)
75 return -ENOMEM;
76
77 obj->staged = fobj;
78 fobj->shared_max = max;
79 return 0;
80}
81EXPORT_SYMBOL(reservation_object_reserve_shared);
82
83static void
84reservation_object_add_shared_inplace(struct reservation_object *obj,
85 struct reservation_object_list *fobj,
86 struct fence *fence)
87{
88 u32 i;
89
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +020090 fence_get(fence);
91
92 preempt_disable();
93 write_seqcount_begin(&obj->seq);
94
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020095 for (i = 0; i < fobj->shared_count; ++i) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +020096 struct fence *old_fence;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020097
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +020098 old_fence = rcu_dereference_protected(fobj->shared[i],
99 reservation_object_held(obj));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200100
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200101 if (old_fence->context == fence->context) {
102 /* memory barrier is added by write_seqcount_begin */
103 RCU_INIT_POINTER(fobj->shared[i], fence);
104 write_seqcount_end(&obj->seq);
105 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200106
107 fence_put(old_fence);
108 return;
109 }
110 }
111
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200112 /*
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200113 * memory barrier is added by write_seqcount_begin,
114 * fobj->shared_count is protected by this lock too
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200115 */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200116 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200117 fobj->shared_count++;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200118
119 write_seqcount_end(&obj->seq);
120 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200121}
122
123static void
124reservation_object_add_shared_replace(struct reservation_object *obj,
125 struct reservation_object_list *old,
126 struct reservation_object_list *fobj,
127 struct fence *fence)
128{
129 unsigned i;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200130 struct fence *old_fence = NULL;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200131
132 fence_get(fence);
133
134 if (!old) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200135 RCU_INIT_POINTER(fobj->shared[0], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200136 fobj->shared_count = 1;
137 goto done;
138 }
139
140 /*
141 * no need to bump fence refcounts, rcu_read access
142 * requires the use of kref_get_unless_zero, and the
143 * references from the old struct are carried over to
144 * the new.
145 */
146 fobj->shared_count = old->shared_count;
147
148 for (i = 0; i < old->shared_count; ++i) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200149 struct fence *check;
150
151 check = rcu_dereference_protected(old->shared[i],
152 reservation_object_held(obj));
153
154 if (!old_fence && check->context == fence->context) {
155 old_fence = check;
156 RCU_INIT_POINTER(fobj->shared[i], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200157 } else
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200158 RCU_INIT_POINTER(fobj->shared[i], check);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200159 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200160 if (!old_fence) {
161 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
162 fobj->shared_count++;
163 }
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200164
165done:
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200166 preempt_disable();
167 write_seqcount_begin(&obj->seq);
168 /*
169 * RCU_INIT_POINTER can be used here,
170 * seqcount provides the necessary barriers
171 */
172 RCU_INIT_POINTER(obj->fence, fobj);
173 write_seqcount_end(&obj->seq);
174 preempt_enable();
175
176 if (old)
177 kfree_rcu(old, rcu);
178
179 if (old_fence)
180 fence_put(old_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200181}
182
183/*
184 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called.
186 */
187void reservation_object_add_shared_fence(struct reservation_object *obj,
188 struct fence *fence)
189{
190 struct reservation_object_list *old, *fobj = obj->staged;
191
192 old = reservation_object_get_list(obj);
193 obj->staged = NULL;
194
195 if (!fobj) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200196 BUG_ON(old->shared_count >= old->shared_max);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200197 reservation_object_add_shared_inplace(obj, old, fence);
198 } else
199 reservation_object_add_shared_replace(obj, old, fobj, fence);
200}
201EXPORT_SYMBOL(reservation_object_add_shared_fence);
202
203void reservation_object_add_excl_fence(struct reservation_object *obj,
204 struct fence *fence)
205{
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200206 struct fence *old_fence = reservation_object_get_excl(obj);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200207 struct reservation_object_list *old;
208 u32 i = 0;
209
210 old = reservation_object_get_list(obj);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200211 if (old)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200212 i = old->shared_count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200213
214 if (fence)
215 fence_get(fence);
216
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200217 preempt_disable();
218 write_seqcount_begin(&obj->seq);
219 /* write_seqcount_begin provides the necessary memory barrier */
220 RCU_INIT_POINTER(obj->fence_excl, fence);
221 if (old)
222 old->shared_count = 0;
223 write_seqcount_end(&obj->seq);
224 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200225
226 /* inplace update, no shared fences */
227 while (i--)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200228 fence_put(rcu_dereference_protected(old->shared[i],
229 reservation_object_held(obj)));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200230
231 if (old_fence)
232 fence_put(old_fence);
233}
234EXPORT_SYMBOL(reservation_object_add_excl_fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200235
236int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl,
238 unsigned *pshared_count,
239 struct fence ***pshared)
240{
241 unsigned shared_count = 0;
242 unsigned retry = 1;
243 struct fence **shared = NULL, *fence_excl = NULL;
244 int ret = 0;
245
246 while (retry) {
247 struct reservation_object_list *fobj;
248 unsigned seq;
249
250 seq = read_seqcount_begin(&obj->seq);
251
252 rcu_read_lock();
253
254 fobj = rcu_dereference(obj->fence);
255 if (fobj) {
256 struct fence **nshared;
257 size_t sz = sizeof(*shared) * fobj->shared_max;
258
259 nshared = krealloc(shared, sz,
260 GFP_NOWAIT | __GFP_NOWARN);
261 if (!nshared) {
262 rcu_read_unlock();
263 nshared = krealloc(shared, sz, GFP_KERNEL);
264 if (nshared) {
265 shared = nshared;
266 continue;
267 }
268
269 ret = -ENOMEM;
270 shared_count = 0;
271 break;
272 }
273 shared = nshared;
274 memcpy(shared, fobj->shared, sz);
275 shared_count = fobj->shared_count;
276 } else
277 shared_count = 0;
278 fence_excl = rcu_dereference(obj->fence_excl);
279
280 retry = read_seqcount_retry(&obj->seq, seq);
281 if (retry)
282 goto unlock;
283
284 if (!fence_excl || fence_get_rcu(fence_excl)) {
285 unsigned i;
286
287 for (i = 0; i < shared_count; ++i) {
288 if (fence_get_rcu(shared[i]))
289 continue;
290
291 /* uh oh, refcount failed, abort and retry */
292 while (i--)
293 fence_put(shared[i]);
294
295 if (fence_excl) {
296 fence_put(fence_excl);
297 fence_excl = NULL;
298 }
299
300 retry = 1;
301 break;
302 }
303 } else
304 retry = 1;
305
306unlock:
307 rcu_read_unlock();
308 }
309 *pshared_count = shared_count;
310 if (shared_count)
311 *pshared = shared;
312 else {
313 *pshared = NULL;
314 kfree(shared);
315 }
316 *pfence_excl = fence_excl;
317
318 return ret;
319}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr,
324 unsigned long timeout)
325{
326 struct fence *fence;
327 unsigned seq, shared_count, i = 0;
328 long ret = timeout;
329
Jammy Zhoufb8b7d22015-01-21 18:35:47 +0800330 if (!timeout)
331 return reservation_object_test_signaled_rcu(obj, wait_all);
332
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200333retry:
334 fence = NULL;
335 shared_count = 0;
336 seq = read_seqcount_begin(&obj->seq);
337 rcu_read_lock();
338
339 if (wait_all) {
Jagan Teki51366292015-05-21 01:09:31 +0530340 struct reservation_object_list *fobj =
341 rcu_dereference(obj->fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200342
343 if (fobj)
344 shared_count = fobj->shared_count;
345
346 if (read_seqcount_retry(&obj->seq, seq))
347 goto unlock_retry;
348
349 for (i = 0; i < shared_count; ++i) {
350 struct fence *lfence = rcu_dereference(fobj->shared[i]);
351
352 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
353 continue;
354
355 if (!fence_get_rcu(lfence))
356 goto unlock_retry;
357
358 if (fence_is_signaled(lfence)) {
359 fence_put(lfence);
360 continue;
361 }
362
363 fence = lfence;
364 break;
365 }
366 }
367
368 if (!shared_count) {
369 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
370
371 if (read_seqcount_retry(&obj->seq, seq))
372 goto unlock_retry;
373
374 if (fence_excl &&
375 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
376 if (!fence_get_rcu(fence_excl))
377 goto unlock_retry;
378
379 if (fence_is_signaled(fence_excl))
380 fence_put(fence_excl);
381 else
382 fence = fence_excl;
383 }
384 }
385
386 rcu_read_unlock();
387 if (fence) {
388 ret = fence_wait_timeout(fence, intr, ret);
389 fence_put(fence);
390 if (ret > 0 && wait_all && (i + 1 < shared_count))
391 goto retry;
392 }
393 return ret;
394
395unlock_retry:
396 rcu_read_unlock();
397 goto retry;
398}
399EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
400
401
402static inline int
403reservation_object_test_signaled_single(struct fence *passed_fence)
404{
405 struct fence *fence, *lfence = passed_fence;
406 int ret = 1;
407
408 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200409 fence = fence_get_rcu(lfence);
410 if (!fence)
411 return -1;
412
413 ret = !!fence_is_signaled(fence);
414 fence_put(fence);
415 }
416 return ret;
417}
418
419bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
420 bool test_all)
421{
422 unsigned seq, shared_count;
423 int ret = true;
424
425retry:
426 shared_count = 0;
427 seq = read_seqcount_begin(&obj->seq);
428 rcu_read_lock();
429
430 if (test_all) {
431 unsigned i;
432
Jagan Teki51366292015-05-21 01:09:31 +0530433 struct reservation_object_list *fobj =
434 rcu_dereference(obj->fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200435
436 if (fobj)
437 shared_count = fobj->shared_count;
438
439 if (read_seqcount_retry(&obj->seq, seq))
440 goto unlock_retry;
441
442 for (i = 0; i < shared_count; ++i) {
443 struct fence *fence = rcu_dereference(fobj->shared[i]);
444
445 ret = reservation_object_test_signaled_single(fence);
446 if (ret < 0)
447 goto unlock_retry;
448 else if (!ret)
449 break;
450 }
451
452 /*
453 * There could be a read_seqcount_retry here, but nothing cares
454 * about whether it's the old or newer fence pointers that are
455 * signaled. That race could still have happened after checking
456 * read_seqcount_retry. If you care, use ww_mutex_lock.
457 */
458 }
459
460 if (!shared_count) {
461 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
462
463 if (read_seqcount_retry(&obj->seq, seq))
464 goto unlock_retry;
465
466 if (fence_excl) {
Jagan Teki51366292015-05-21 01:09:31 +0530467 ret = reservation_object_test_signaled_single(
468 fence_excl);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200469 if (ret < 0)
470 goto unlock_retry;
471 }
472 }
473
474 rcu_read_unlock();
475 return ret;
476
477unlock_retry:
478 rcu_read_unlock();
479 goto retry;
480}
481EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);