blob: 9566a62ad8e3186e719751df84aa35f156dae2b2 [file] [log] [blame]
Maarten Lankhorst786d7252013-06-27 13:48:16 +02001/*
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +02002 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
Maarten Lankhorst786d7252013-06-27 13:48:16 +02003 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
Rob Clarkdad6c392016-03-31 16:26:51 -040038/**
39 * DOC: Reservation Object Overview
40 *
41 * The reservation object provides a mechanism to manage shared and
42 * exclusive fences associated with a buffer. A reservation object
43 * can have attached one exclusive fence (normally associated with
44 * write operations) or N shared fences (read operations). The RCU
45 * mechanism is used to protect read access to fences from locked
46 * write-side updates.
47 */
48
Maarten Lankhorst786d7252013-06-27 13:48:16 +020049DEFINE_WW_CLASS(reservation_ww_class);
50EXPORT_SYMBOL(reservation_ww_class);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020051
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +020052struct lock_class_key reservation_seqcount_class;
53EXPORT_SYMBOL(reservation_seqcount_class);
54
55const char reservation_seqcount_string[] = "reservation_seqcount";
56EXPORT_SYMBOL(reservation_seqcount_string);
Rob Clarkdad6c392016-03-31 16:26:51 -040057
58/**
59 * reservation_object_reserve_shared - Reserve space to add a shared
60 * fence to a reservation_object.
61 * @obj: reservation object
62 *
63 * Should be called before reservation_object_add_shared_fence(). Must
64 * be called with obj->lock held.
65 *
66 * RETURNS
67 * Zero for success, or -errno
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020068 */
69int reservation_object_reserve_shared(struct reservation_object *obj)
70{
71 struct reservation_object_list *fobj, *old;
72 u32 max;
73
74 old = reservation_object_get_list(obj);
75
76 if (old && old->shared_max) {
77 if (old->shared_count < old->shared_max) {
78 /* perform an in-place update */
79 kfree(obj->staged);
80 obj->staged = NULL;
81 return 0;
82 } else
83 max = old->shared_max * 2;
84 } else
85 max = 4;
86
87 /*
88 * resize obj->staged or allocate if it doesn't exist,
89 * noop if already correct size
90 */
91 fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
92 GFP_KERNEL);
93 if (!fobj)
94 return -ENOMEM;
95
96 obj->staged = fobj;
97 fobj->shared_max = max;
98 return 0;
99}
100EXPORT_SYMBOL(reservation_object_reserve_shared);
101
102static void
103reservation_object_add_shared_inplace(struct reservation_object *obj,
104 struct reservation_object_list *fobj,
105 struct fence *fence)
106{
107 u32 i;
108
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200109 fence_get(fence);
110
111 preempt_disable();
112 write_seqcount_begin(&obj->seq);
113
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200114 for (i = 0; i < fobj->shared_count; ++i) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200115 struct fence *old_fence;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200116
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200117 old_fence = rcu_dereference_protected(fobj->shared[i],
118 reservation_object_held(obj));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200119
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200120 if (old_fence->context == fence->context) {
121 /* memory barrier is added by write_seqcount_begin */
122 RCU_INIT_POINTER(fobj->shared[i], fence);
123 write_seqcount_end(&obj->seq);
124 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200125
126 fence_put(old_fence);
127 return;
128 }
129 }
130
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200131 /*
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200132 * memory barrier is added by write_seqcount_begin,
133 * fobj->shared_count is protected by this lock too
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200134 */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200135 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200136 fobj->shared_count++;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200137
138 write_seqcount_end(&obj->seq);
139 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200140}
141
142static void
143reservation_object_add_shared_replace(struct reservation_object *obj,
144 struct reservation_object_list *old,
145 struct reservation_object_list *fobj,
146 struct fence *fence)
147{
148 unsigned i;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200149 struct fence *old_fence = NULL;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200150
151 fence_get(fence);
152
153 if (!old) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200154 RCU_INIT_POINTER(fobj->shared[0], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200155 fobj->shared_count = 1;
156 goto done;
157 }
158
159 /*
160 * no need to bump fence refcounts, rcu_read access
161 * requires the use of kref_get_unless_zero, and the
162 * references from the old struct are carried over to
163 * the new.
164 */
165 fobj->shared_count = old->shared_count;
166
167 for (i = 0; i < old->shared_count; ++i) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200168 struct fence *check;
169
170 check = rcu_dereference_protected(old->shared[i],
171 reservation_object_held(obj));
172
173 if (!old_fence && check->context == fence->context) {
174 old_fence = check;
175 RCU_INIT_POINTER(fobj->shared[i], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200176 } else
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200177 RCU_INIT_POINTER(fobj->shared[i], check);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200178 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200179 if (!old_fence) {
180 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
181 fobj->shared_count++;
182 }
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200183
184done:
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200185 preempt_disable();
186 write_seqcount_begin(&obj->seq);
187 /*
188 * RCU_INIT_POINTER can be used here,
189 * seqcount provides the necessary barriers
190 */
191 RCU_INIT_POINTER(obj->fence, fobj);
192 write_seqcount_end(&obj->seq);
193 preempt_enable();
194
195 if (old)
196 kfree_rcu(old, rcu);
197
198 if (old_fence)
199 fence_put(old_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200200}
201
Rob Clarkdad6c392016-03-31 16:26:51 -0400202/**
203 * reservation_object_add_shared_fence - Add a fence to a shared slot
204 * @obj: the reservation object
205 * @fence: the shared fence to add
206 *
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200207 * Add a fence to a shared slot, obj->lock must be held, and
208 * reservation_object_reserve_shared_fence has been called.
209 */
210void reservation_object_add_shared_fence(struct reservation_object *obj,
211 struct fence *fence)
212{
213 struct reservation_object_list *old, *fobj = obj->staged;
214
215 old = reservation_object_get_list(obj);
216 obj->staged = NULL;
217
218 if (!fobj) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200219 BUG_ON(old->shared_count >= old->shared_max);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200220 reservation_object_add_shared_inplace(obj, old, fence);
221 } else
222 reservation_object_add_shared_replace(obj, old, fobj, fence);
223}
224EXPORT_SYMBOL(reservation_object_add_shared_fence);
225
Rob Clarkdad6c392016-03-31 16:26:51 -0400226/**
227 * reservation_object_add_excl_fence - Add an exclusive fence.
228 * @obj: the reservation object
229 * @fence: the shared fence to add
230 *
231 * Add a fence to the exclusive slot. The obj->lock must be held.
232 */
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200233void reservation_object_add_excl_fence(struct reservation_object *obj,
234 struct fence *fence)
235{
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200236 struct fence *old_fence = reservation_object_get_excl(obj);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200237 struct reservation_object_list *old;
238 u32 i = 0;
239
240 old = reservation_object_get_list(obj);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200241 if (old)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200242 i = old->shared_count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200243
244 if (fence)
245 fence_get(fence);
246
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200247 preempt_disable();
248 write_seqcount_begin(&obj->seq);
249 /* write_seqcount_begin provides the necessary memory barrier */
250 RCU_INIT_POINTER(obj->fence_excl, fence);
251 if (old)
252 old->shared_count = 0;
253 write_seqcount_end(&obj->seq);
254 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200255
256 /* inplace update, no shared fences */
257 while (i--)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200258 fence_put(rcu_dereference_protected(old->shared[i],
259 reservation_object_held(obj)));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200260
261 if (old_fence)
262 fence_put(old_fence);
263}
264EXPORT_SYMBOL(reservation_object_add_excl_fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200265
Rob Clarkdad6c392016-03-31 16:26:51 -0400266/**
267 * reservation_object_get_fences_rcu - Get an object's shared and exclusive
268 * fences without update side lock held
269 * @obj: the reservation object
270 * @pfence_excl: the returned exclusive fence (or NULL)
271 * @pshared_count: the number of shared fences returned
272 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
273 * the required size, and must be freed by caller)
274 *
275 * RETURNS
276 * Zero or -errno
277 */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200278int reservation_object_get_fences_rcu(struct reservation_object *obj,
279 struct fence **pfence_excl,
280 unsigned *pshared_count,
281 struct fence ***pshared)
282{
283 unsigned shared_count = 0;
284 unsigned retry = 1;
285 struct fence **shared = NULL, *fence_excl = NULL;
286 int ret = 0;
287
288 while (retry) {
289 struct reservation_object_list *fobj;
290 unsigned seq;
291
292 seq = read_seqcount_begin(&obj->seq);
293
294 rcu_read_lock();
295
296 fobj = rcu_dereference(obj->fence);
297 if (fobj) {
298 struct fence **nshared;
299 size_t sz = sizeof(*shared) * fobj->shared_max;
300
301 nshared = krealloc(shared, sz,
302 GFP_NOWAIT | __GFP_NOWARN);
303 if (!nshared) {
304 rcu_read_unlock();
305 nshared = krealloc(shared, sz, GFP_KERNEL);
306 if (nshared) {
307 shared = nshared;
308 continue;
309 }
310
311 ret = -ENOMEM;
312 shared_count = 0;
313 break;
314 }
315 shared = nshared;
316 memcpy(shared, fobj->shared, sz);
317 shared_count = fobj->shared_count;
318 } else
319 shared_count = 0;
320 fence_excl = rcu_dereference(obj->fence_excl);
321
322 retry = read_seqcount_retry(&obj->seq, seq);
323 if (retry)
324 goto unlock;
325
326 if (!fence_excl || fence_get_rcu(fence_excl)) {
327 unsigned i;
328
329 for (i = 0; i < shared_count; ++i) {
330 if (fence_get_rcu(shared[i]))
331 continue;
332
333 /* uh oh, refcount failed, abort and retry */
334 while (i--)
335 fence_put(shared[i]);
336
337 if (fence_excl) {
338 fence_put(fence_excl);
339 fence_excl = NULL;
340 }
341
342 retry = 1;
343 break;
344 }
345 } else
346 retry = 1;
347
348unlock:
349 rcu_read_unlock();
350 }
351 *pshared_count = shared_count;
352 if (shared_count)
353 *pshared = shared;
354 else {
355 *pshared = NULL;
356 kfree(shared);
357 }
358 *pfence_excl = fence_excl;
359
360 return ret;
361}
362EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
363
Rob Clarkdad6c392016-03-31 16:26:51 -0400364/**
365 * reservation_object_wait_timeout_rcu - Wait on reservation's objects
366 * shared and/or exclusive fences.
367 * @obj: the reservation object
368 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
369 * @intr: if true, do interruptible wait
370 * @timeout: timeout value in jiffies or zero to return immediately
371 *
372 * RETURNS
373 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
374 * greater than zer on success.
375 */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200376long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
377 bool wait_all, bool intr,
378 unsigned long timeout)
379{
380 struct fence *fence;
381 unsigned seq, shared_count, i = 0;
382 long ret = timeout;
383
Jammy Zhoufb8b7d22015-01-21 18:35:47 +0800384 if (!timeout)
385 return reservation_object_test_signaled_rcu(obj, wait_all);
386
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200387retry:
388 fence = NULL;
389 shared_count = 0;
390 seq = read_seqcount_begin(&obj->seq);
391 rcu_read_lock();
392
393 if (wait_all) {
Jagan Teki51366292015-05-21 01:09:31 +0530394 struct reservation_object_list *fobj =
395 rcu_dereference(obj->fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200396
397 if (fobj)
398 shared_count = fobj->shared_count;
399
400 if (read_seqcount_retry(&obj->seq, seq))
401 goto unlock_retry;
402
403 for (i = 0; i < shared_count; ++i) {
404 struct fence *lfence = rcu_dereference(fobj->shared[i]);
405
406 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
407 continue;
408
409 if (!fence_get_rcu(lfence))
410 goto unlock_retry;
411
412 if (fence_is_signaled(lfence)) {
413 fence_put(lfence);
414 continue;
415 }
416
417 fence = lfence;
418 break;
419 }
420 }
421
422 if (!shared_count) {
423 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
424
425 if (read_seqcount_retry(&obj->seq, seq))
426 goto unlock_retry;
427
428 if (fence_excl &&
429 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
430 if (!fence_get_rcu(fence_excl))
431 goto unlock_retry;
432
433 if (fence_is_signaled(fence_excl))
434 fence_put(fence_excl);
435 else
436 fence = fence_excl;
437 }
438 }
439
440 rcu_read_unlock();
441 if (fence) {
442 ret = fence_wait_timeout(fence, intr, ret);
443 fence_put(fence);
444 if (ret > 0 && wait_all && (i + 1 < shared_count))
445 goto retry;
446 }
447 return ret;
448
449unlock_retry:
450 rcu_read_unlock();
451 goto retry;
452}
453EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
454
455
456static inline int
457reservation_object_test_signaled_single(struct fence *passed_fence)
458{
459 struct fence *fence, *lfence = passed_fence;
460 int ret = 1;
461
462 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200463 fence = fence_get_rcu(lfence);
464 if (!fence)
465 return -1;
466
467 ret = !!fence_is_signaled(fence);
468 fence_put(fence);
469 }
470 return ret;
471}
472
Rob Clarkdad6c392016-03-31 16:26:51 -0400473/**
474 * reservation_object_test_signaled_rcu - Test if a reservation object's
475 * fences have been signaled.
476 * @obj: the reservation object
477 * @test_all: if true, test all fences, otherwise only test the exclusive
478 * fence
479 *
480 * RETURNS
481 * true if all fences signaled, else false
482 */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200483bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
484 bool test_all)
485{
486 unsigned seq, shared_count;
487 int ret = true;
488
489retry:
490 shared_count = 0;
491 seq = read_seqcount_begin(&obj->seq);
492 rcu_read_lock();
493
494 if (test_all) {
495 unsigned i;
496
Jagan Teki51366292015-05-21 01:09:31 +0530497 struct reservation_object_list *fobj =
498 rcu_dereference(obj->fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200499
500 if (fobj)
501 shared_count = fobj->shared_count;
502
503 if (read_seqcount_retry(&obj->seq, seq))
504 goto unlock_retry;
505
506 for (i = 0; i < shared_count; ++i) {
507 struct fence *fence = rcu_dereference(fobj->shared[i]);
508
509 ret = reservation_object_test_signaled_single(fence);
510 if (ret < 0)
511 goto unlock_retry;
512 else if (!ret)
513 break;
514 }
515
516 /*
517 * There could be a read_seqcount_retry here, but nothing cares
518 * about whether it's the old or newer fence pointers that are
519 * signaled. That race could still have happened after checking
520 * read_seqcount_retry. If you care, use ww_mutex_lock.
521 */
522 }
523
524 if (!shared_count) {
525 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
526
527 if (read_seqcount_retry(&obj->seq, seq))
528 goto unlock_retry;
529
530 if (fence_excl) {
Jagan Teki51366292015-05-21 01:09:31 +0530531 ret = reservation_object_test_signaled_single(
532 fence_excl);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200533 if (ret < 0)
534 goto unlock_retry;
535 }
536 }
537
538 rcu_read_unlock();
539 return ret;
540
541unlock_retry:
542 rcu_read_unlock();
543 goto retry;
544}
545EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);