Maarten Lankhorst | 786d725 | 2013-06-27 13:48:16 +0200 | [diff] [blame] | 1 | /* |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 2 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) |
Maarten Lankhorst | 786d725 | 2013-06-27 13:48:16 +0200 | [diff] [blame] | 3 | * |
| 4 | * Based on bo.c which bears the following copyright notice, |
| 5 | * but is dual licensed: |
| 6 | * |
| 7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 8 | * All Rights Reserved. |
| 9 | * |
| 10 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 11 | * copy of this software and associated documentation files (the |
| 12 | * "Software"), to deal in the Software without restriction, including |
| 13 | * without limitation the rights to use, copy, modify, merge, publish, |
| 14 | * distribute, sub license, and/or sell copies of the Software, and to |
| 15 | * permit persons to whom the Software is furnished to do so, subject to |
| 16 | * the following conditions: |
| 17 | * |
| 18 | * The above copyright notice and this permission notice (including the |
| 19 | * next paragraph) shall be included in all copies or substantial portions |
| 20 | * of the Software. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 29 | * |
| 30 | **************************************************************************/ |
| 31 | /* |
| 32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 33 | */ |
| 34 | |
| 35 | #include <linux/reservation.h> |
| 36 | #include <linux/export.h> |
| 37 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 38 | /** |
| 39 | * DOC: Reservation Object Overview |
| 40 | * |
| 41 | * The reservation object provides a mechanism to manage shared and |
| 42 | * exclusive fences associated with a buffer. A reservation object |
| 43 | * can have attached one exclusive fence (normally associated with |
| 44 | * write operations) or N shared fences (read operations). The RCU |
| 45 | * mechanism is used to protect read access to fences from locked |
| 46 | * write-side updates. |
| 47 | */ |
| 48 | |
Maarten Lankhorst | 786d725 | 2013-06-27 13:48:16 +0200 | [diff] [blame] | 49 | DEFINE_WW_CLASS(reservation_ww_class); |
| 50 | EXPORT_SYMBOL(reservation_ww_class); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 51 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 52 | struct lock_class_key reservation_seqcount_class; |
| 53 | EXPORT_SYMBOL(reservation_seqcount_class); |
| 54 | |
| 55 | const char reservation_seqcount_string[] = "reservation_seqcount"; |
| 56 | EXPORT_SYMBOL(reservation_seqcount_string); |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 57 | |
| 58 | /** |
| 59 | * reservation_object_reserve_shared - Reserve space to add a shared |
| 60 | * fence to a reservation_object. |
| 61 | * @obj: reservation object |
| 62 | * |
| 63 | * Should be called before reservation_object_add_shared_fence(). Must |
| 64 | * be called with obj->lock held. |
| 65 | * |
| 66 | * RETURNS |
| 67 | * Zero for success, or -errno |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 68 | */ |
| 69 | int reservation_object_reserve_shared(struct reservation_object *obj) |
| 70 | { |
| 71 | struct reservation_object_list *fobj, *old; |
| 72 | u32 max; |
| 73 | |
| 74 | old = reservation_object_get_list(obj); |
| 75 | |
| 76 | if (old && old->shared_max) { |
| 77 | if (old->shared_count < old->shared_max) { |
| 78 | /* perform an in-place update */ |
| 79 | kfree(obj->staged); |
| 80 | obj->staged = NULL; |
| 81 | return 0; |
| 82 | } else |
| 83 | max = old->shared_max * 2; |
| 84 | } else |
| 85 | max = 4; |
| 86 | |
| 87 | /* |
| 88 | * resize obj->staged or allocate if it doesn't exist, |
| 89 | * noop if already correct size |
| 90 | */ |
| 91 | fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), |
| 92 | GFP_KERNEL); |
| 93 | if (!fobj) |
| 94 | return -ENOMEM; |
| 95 | |
| 96 | obj->staged = fobj; |
| 97 | fobj->shared_max = max; |
| 98 | return 0; |
| 99 | } |
| 100 | EXPORT_SYMBOL(reservation_object_reserve_shared); |
| 101 | |
| 102 | static void |
| 103 | reservation_object_add_shared_inplace(struct reservation_object *obj, |
| 104 | struct reservation_object_list *fobj, |
| 105 | struct fence *fence) |
| 106 | { |
| 107 | u32 i; |
| 108 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 109 | fence_get(fence); |
| 110 | |
| 111 | preempt_disable(); |
| 112 | write_seqcount_begin(&obj->seq); |
| 113 | |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 114 | for (i = 0; i < fobj->shared_count; ++i) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 115 | struct fence *old_fence; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 116 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 117 | old_fence = rcu_dereference_protected(fobj->shared[i], |
| 118 | reservation_object_held(obj)); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 119 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 120 | if (old_fence->context == fence->context) { |
| 121 | /* memory barrier is added by write_seqcount_begin */ |
| 122 | RCU_INIT_POINTER(fobj->shared[i], fence); |
| 123 | write_seqcount_end(&obj->seq); |
| 124 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 125 | |
| 126 | fence_put(old_fence); |
| 127 | return; |
| 128 | } |
| 129 | } |
| 130 | |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 131 | /* |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 132 | * memory barrier is added by write_seqcount_begin, |
| 133 | * fobj->shared_count is protected by this lock too |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 134 | */ |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 135 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 136 | fobj->shared_count++; |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 137 | |
| 138 | write_seqcount_end(&obj->seq); |
| 139 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | static void |
| 143 | reservation_object_add_shared_replace(struct reservation_object *obj, |
| 144 | struct reservation_object_list *old, |
| 145 | struct reservation_object_list *fobj, |
| 146 | struct fence *fence) |
| 147 | { |
| 148 | unsigned i; |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 149 | struct fence *old_fence = NULL; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 150 | |
| 151 | fence_get(fence); |
| 152 | |
| 153 | if (!old) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 154 | RCU_INIT_POINTER(fobj->shared[0], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 155 | fobj->shared_count = 1; |
| 156 | goto done; |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * no need to bump fence refcounts, rcu_read access |
| 161 | * requires the use of kref_get_unless_zero, and the |
| 162 | * references from the old struct are carried over to |
| 163 | * the new. |
| 164 | */ |
| 165 | fobj->shared_count = old->shared_count; |
| 166 | |
| 167 | for (i = 0; i < old->shared_count; ++i) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 168 | struct fence *check; |
| 169 | |
| 170 | check = rcu_dereference_protected(old->shared[i], |
| 171 | reservation_object_held(obj)); |
| 172 | |
| 173 | if (!old_fence && check->context == fence->context) { |
| 174 | old_fence = check; |
| 175 | RCU_INIT_POINTER(fobj->shared[i], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 176 | } else |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 177 | RCU_INIT_POINTER(fobj->shared[i], check); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 178 | } |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 179 | if (!old_fence) { |
| 180 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
| 181 | fobj->shared_count++; |
| 182 | } |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 183 | |
| 184 | done: |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 185 | preempt_disable(); |
| 186 | write_seqcount_begin(&obj->seq); |
| 187 | /* |
| 188 | * RCU_INIT_POINTER can be used here, |
| 189 | * seqcount provides the necessary barriers |
| 190 | */ |
| 191 | RCU_INIT_POINTER(obj->fence, fobj); |
| 192 | write_seqcount_end(&obj->seq); |
| 193 | preempt_enable(); |
| 194 | |
| 195 | if (old) |
| 196 | kfree_rcu(old, rcu); |
| 197 | |
| 198 | if (old_fence) |
| 199 | fence_put(old_fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 200 | } |
| 201 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 202 | /** |
| 203 | * reservation_object_add_shared_fence - Add a fence to a shared slot |
| 204 | * @obj: the reservation object |
| 205 | * @fence: the shared fence to add |
| 206 | * |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
| 208 | * reservation_object_reserve_shared_fence has been called. |
| 209 | */ |
| 210 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
| 211 | struct fence *fence) |
| 212 | { |
| 213 | struct reservation_object_list *old, *fobj = obj->staged; |
| 214 | |
| 215 | old = reservation_object_get_list(obj); |
| 216 | obj->staged = NULL; |
| 217 | |
| 218 | if (!fobj) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 219 | BUG_ON(old->shared_count >= old->shared_max); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 220 | reservation_object_add_shared_inplace(obj, old, fence); |
| 221 | } else |
| 222 | reservation_object_add_shared_replace(obj, old, fobj, fence); |
| 223 | } |
| 224 | EXPORT_SYMBOL(reservation_object_add_shared_fence); |
| 225 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 226 | /** |
| 227 | * reservation_object_add_excl_fence - Add an exclusive fence. |
| 228 | * @obj: the reservation object |
| 229 | * @fence: the shared fence to add |
| 230 | * |
| 231 | * Add a fence to the exclusive slot. The obj->lock must be held. |
| 232 | */ |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
| 234 | struct fence *fence) |
| 235 | { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 236 | struct fence *old_fence = reservation_object_get_excl(obj); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 237 | struct reservation_object_list *old; |
| 238 | u32 i = 0; |
| 239 | |
| 240 | old = reservation_object_get_list(obj); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 241 | if (old) |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 242 | i = old->shared_count; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 243 | |
| 244 | if (fence) |
| 245 | fence_get(fence); |
| 246 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 247 | preempt_disable(); |
| 248 | write_seqcount_begin(&obj->seq); |
| 249 | /* write_seqcount_begin provides the necessary memory barrier */ |
| 250 | RCU_INIT_POINTER(obj->fence_excl, fence); |
| 251 | if (old) |
| 252 | old->shared_count = 0; |
| 253 | write_seqcount_end(&obj->seq); |
| 254 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 255 | |
| 256 | /* inplace update, no shared fences */ |
| 257 | while (i--) |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 258 | fence_put(rcu_dereference_protected(old->shared[i], |
| 259 | reservation_object_held(obj))); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 260 | |
| 261 | if (old_fence) |
| 262 | fence_put(old_fence); |
| 263 | } |
| 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 265 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 266 | /** |
| 267 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive |
| 268 | * fences without update side lock held |
| 269 | * @obj: the reservation object |
| 270 | * @pfence_excl: the returned exclusive fence (or NULL) |
| 271 | * @pshared_count: the number of shared fences returned |
| 272 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to |
| 273 | * the required size, and must be freed by caller) |
| 274 | * |
| 275 | * RETURNS |
| 276 | * Zero or -errno |
| 277 | */ |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
| 279 | struct fence **pfence_excl, |
| 280 | unsigned *pshared_count, |
| 281 | struct fence ***pshared) |
| 282 | { |
| 283 | unsigned shared_count = 0; |
| 284 | unsigned retry = 1; |
| 285 | struct fence **shared = NULL, *fence_excl = NULL; |
| 286 | int ret = 0; |
| 287 | |
| 288 | while (retry) { |
| 289 | struct reservation_object_list *fobj; |
| 290 | unsigned seq; |
| 291 | |
| 292 | seq = read_seqcount_begin(&obj->seq); |
| 293 | |
| 294 | rcu_read_lock(); |
| 295 | |
| 296 | fobj = rcu_dereference(obj->fence); |
| 297 | if (fobj) { |
| 298 | struct fence **nshared; |
| 299 | size_t sz = sizeof(*shared) * fobj->shared_max; |
| 300 | |
| 301 | nshared = krealloc(shared, sz, |
| 302 | GFP_NOWAIT | __GFP_NOWARN); |
| 303 | if (!nshared) { |
| 304 | rcu_read_unlock(); |
| 305 | nshared = krealloc(shared, sz, GFP_KERNEL); |
| 306 | if (nshared) { |
| 307 | shared = nshared; |
| 308 | continue; |
| 309 | } |
| 310 | |
| 311 | ret = -ENOMEM; |
| 312 | shared_count = 0; |
| 313 | break; |
| 314 | } |
| 315 | shared = nshared; |
| 316 | memcpy(shared, fobj->shared, sz); |
| 317 | shared_count = fobj->shared_count; |
| 318 | } else |
| 319 | shared_count = 0; |
| 320 | fence_excl = rcu_dereference(obj->fence_excl); |
| 321 | |
| 322 | retry = read_seqcount_retry(&obj->seq, seq); |
| 323 | if (retry) |
| 324 | goto unlock; |
| 325 | |
| 326 | if (!fence_excl || fence_get_rcu(fence_excl)) { |
| 327 | unsigned i; |
| 328 | |
| 329 | for (i = 0; i < shared_count; ++i) { |
| 330 | if (fence_get_rcu(shared[i])) |
| 331 | continue; |
| 332 | |
| 333 | /* uh oh, refcount failed, abort and retry */ |
| 334 | while (i--) |
| 335 | fence_put(shared[i]); |
| 336 | |
| 337 | if (fence_excl) { |
| 338 | fence_put(fence_excl); |
| 339 | fence_excl = NULL; |
| 340 | } |
| 341 | |
| 342 | retry = 1; |
| 343 | break; |
| 344 | } |
| 345 | } else |
| 346 | retry = 1; |
| 347 | |
| 348 | unlock: |
| 349 | rcu_read_unlock(); |
| 350 | } |
| 351 | *pshared_count = shared_count; |
| 352 | if (shared_count) |
| 353 | *pshared = shared; |
| 354 | else { |
| 355 | *pshared = NULL; |
| 356 | kfree(shared); |
| 357 | } |
| 358 | *pfence_excl = fence_excl; |
| 359 | |
| 360 | return ret; |
| 361 | } |
| 362 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); |
| 363 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 364 | /** |
| 365 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects |
| 366 | * shared and/or exclusive fences. |
| 367 | * @obj: the reservation object |
| 368 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
| 369 | * @intr: if true, do interruptible wait |
| 370 | * @timeout: timeout value in jiffies or zero to return immediately |
| 371 | * |
| 372 | * RETURNS |
| 373 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
| 374 | * greater than zer on success. |
| 375 | */ |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 376 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
| 377 | bool wait_all, bool intr, |
| 378 | unsigned long timeout) |
| 379 | { |
| 380 | struct fence *fence; |
| 381 | unsigned seq, shared_count, i = 0; |
| 382 | long ret = timeout; |
| 383 | |
Jammy Zhou | fb8b7d2 | 2015-01-21 18:35:47 +0800 | [diff] [blame] | 384 | if (!timeout) |
| 385 | return reservation_object_test_signaled_rcu(obj, wait_all); |
| 386 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 387 | retry: |
| 388 | fence = NULL; |
| 389 | shared_count = 0; |
| 390 | seq = read_seqcount_begin(&obj->seq); |
| 391 | rcu_read_lock(); |
| 392 | |
| 393 | if (wait_all) { |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 394 | struct reservation_object_list *fobj = |
| 395 | rcu_dereference(obj->fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 396 | |
| 397 | if (fobj) |
| 398 | shared_count = fobj->shared_count; |
| 399 | |
| 400 | if (read_seqcount_retry(&obj->seq, seq)) |
| 401 | goto unlock_retry; |
| 402 | |
| 403 | for (i = 0; i < shared_count; ++i) { |
| 404 | struct fence *lfence = rcu_dereference(fobj->shared[i]); |
| 405 | |
| 406 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) |
| 407 | continue; |
| 408 | |
| 409 | if (!fence_get_rcu(lfence)) |
| 410 | goto unlock_retry; |
| 411 | |
| 412 | if (fence_is_signaled(lfence)) { |
| 413 | fence_put(lfence); |
| 414 | continue; |
| 415 | } |
| 416 | |
| 417 | fence = lfence; |
| 418 | break; |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | if (!shared_count) { |
| 423 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); |
| 424 | |
| 425 | if (read_seqcount_retry(&obj->seq, seq)) |
| 426 | goto unlock_retry; |
| 427 | |
| 428 | if (fence_excl && |
| 429 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { |
| 430 | if (!fence_get_rcu(fence_excl)) |
| 431 | goto unlock_retry; |
| 432 | |
| 433 | if (fence_is_signaled(fence_excl)) |
| 434 | fence_put(fence_excl); |
| 435 | else |
| 436 | fence = fence_excl; |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | rcu_read_unlock(); |
| 441 | if (fence) { |
| 442 | ret = fence_wait_timeout(fence, intr, ret); |
| 443 | fence_put(fence); |
| 444 | if (ret > 0 && wait_all && (i + 1 < shared_count)) |
| 445 | goto retry; |
| 446 | } |
| 447 | return ret; |
| 448 | |
| 449 | unlock_retry: |
| 450 | rcu_read_unlock(); |
| 451 | goto retry; |
| 452 | } |
| 453 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); |
| 454 | |
| 455 | |
| 456 | static inline int |
| 457 | reservation_object_test_signaled_single(struct fence *passed_fence) |
| 458 | { |
| 459 | struct fence *fence, *lfence = passed_fence; |
| 460 | int ret = 1; |
| 461 | |
| 462 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 463 | fence = fence_get_rcu(lfence); |
| 464 | if (!fence) |
| 465 | return -1; |
| 466 | |
| 467 | ret = !!fence_is_signaled(fence); |
| 468 | fence_put(fence); |
| 469 | } |
| 470 | return ret; |
| 471 | } |
| 472 | |
Rob Clark | dad6c39 | 2016-03-31 16:26:51 -0400 | [diff] [blame] | 473 | /** |
| 474 | * reservation_object_test_signaled_rcu - Test if a reservation object's |
| 475 | * fences have been signaled. |
| 476 | * @obj: the reservation object |
| 477 | * @test_all: if true, test all fences, otherwise only test the exclusive |
| 478 | * fence |
| 479 | * |
| 480 | * RETURNS |
| 481 | * true if all fences signaled, else false |
| 482 | */ |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 483 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
| 484 | bool test_all) |
| 485 | { |
| 486 | unsigned seq, shared_count; |
| 487 | int ret = true; |
| 488 | |
| 489 | retry: |
| 490 | shared_count = 0; |
| 491 | seq = read_seqcount_begin(&obj->seq); |
| 492 | rcu_read_lock(); |
| 493 | |
| 494 | if (test_all) { |
| 495 | unsigned i; |
| 496 | |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 497 | struct reservation_object_list *fobj = |
| 498 | rcu_dereference(obj->fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 499 | |
| 500 | if (fobj) |
| 501 | shared_count = fobj->shared_count; |
| 502 | |
| 503 | if (read_seqcount_retry(&obj->seq, seq)) |
| 504 | goto unlock_retry; |
| 505 | |
| 506 | for (i = 0; i < shared_count; ++i) { |
| 507 | struct fence *fence = rcu_dereference(fobj->shared[i]); |
| 508 | |
| 509 | ret = reservation_object_test_signaled_single(fence); |
| 510 | if (ret < 0) |
| 511 | goto unlock_retry; |
| 512 | else if (!ret) |
| 513 | break; |
| 514 | } |
| 515 | |
| 516 | /* |
| 517 | * There could be a read_seqcount_retry here, but nothing cares |
| 518 | * about whether it's the old or newer fence pointers that are |
| 519 | * signaled. That race could still have happened after checking |
| 520 | * read_seqcount_retry. If you care, use ww_mutex_lock. |
| 521 | */ |
| 522 | } |
| 523 | |
| 524 | if (!shared_count) { |
| 525 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); |
| 526 | |
| 527 | if (read_seqcount_retry(&obj->seq, seq)) |
| 528 | goto unlock_retry; |
| 529 | |
| 530 | if (fence_excl) { |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 531 | ret = reservation_object_test_signaled_single( |
| 532 | fence_excl); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 533 | if (ret < 0) |
| 534 | goto unlock_retry; |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | rcu_read_unlock(); |
| 539 | return ret; |
| 540 | |
| 541 | unlock_retry: |
| 542 | rcu_read_unlock(); |
| 543 | goto retry; |
| 544 | } |
| 545 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); |