Maarten Lankhorst | 786d725 | 2013-06-27 13:48:16 +0200 | [diff] [blame] | 1 | /* |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 2 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) |
Maarten Lankhorst | 786d725 | 2013-06-27 13:48:16 +0200 | [diff] [blame] | 3 | * |
| 4 | * Based on bo.c which bears the following copyright notice, |
| 5 | * but is dual licensed: |
| 6 | * |
| 7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 8 | * All Rights Reserved. |
| 9 | * |
| 10 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 11 | * copy of this software and associated documentation files (the |
| 12 | * "Software"), to deal in the Software without restriction, including |
| 13 | * without limitation the rights to use, copy, modify, merge, publish, |
| 14 | * distribute, sub license, and/or sell copies of the Software, and to |
| 15 | * permit persons to whom the Software is furnished to do so, subject to |
| 16 | * the following conditions: |
| 17 | * |
| 18 | * The above copyright notice and this permission notice (including the |
| 19 | * next paragraph) shall be included in all copies or substantial portions |
| 20 | * of the Software. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 29 | * |
| 30 | **************************************************************************/ |
| 31 | /* |
| 32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
| 33 | */ |
| 34 | |
| 35 | #include <linux/reservation.h> |
| 36 | #include <linux/export.h> |
| 37 | |
| 38 | DEFINE_WW_CLASS(reservation_ww_class); |
| 39 | EXPORT_SYMBOL(reservation_ww_class); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 40 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 41 | struct lock_class_key reservation_seqcount_class; |
| 42 | EXPORT_SYMBOL(reservation_seqcount_class); |
| 43 | |
| 44 | const char reservation_seqcount_string[] = "reservation_seqcount"; |
| 45 | EXPORT_SYMBOL(reservation_seqcount_string); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 46 | /* |
| 47 | * Reserve space to add a shared fence to a reservation_object, |
| 48 | * must be called with obj->lock held. |
| 49 | */ |
| 50 | int reservation_object_reserve_shared(struct reservation_object *obj) |
| 51 | { |
| 52 | struct reservation_object_list *fobj, *old; |
| 53 | u32 max; |
| 54 | |
| 55 | old = reservation_object_get_list(obj); |
| 56 | |
| 57 | if (old && old->shared_max) { |
| 58 | if (old->shared_count < old->shared_max) { |
| 59 | /* perform an in-place update */ |
| 60 | kfree(obj->staged); |
| 61 | obj->staged = NULL; |
| 62 | return 0; |
| 63 | } else |
| 64 | max = old->shared_max * 2; |
| 65 | } else |
| 66 | max = 4; |
| 67 | |
| 68 | /* |
| 69 | * resize obj->staged or allocate if it doesn't exist, |
| 70 | * noop if already correct size |
| 71 | */ |
| 72 | fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), |
| 73 | GFP_KERNEL); |
| 74 | if (!fobj) |
| 75 | return -ENOMEM; |
| 76 | |
| 77 | obj->staged = fobj; |
| 78 | fobj->shared_max = max; |
| 79 | return 0; |
| 80 | } |
| 81 | EXPORT_SYMBOL(reservation_object_reserve_shared); |
| 82 | |
| 83 | static void |
| 84 | reservation_object_add_shared_inplace(struct reservation_object *obj, |
| 85 | struct reservation_object_list *fobj, |
| 86 | struct fence *fence) |
| 87 | { |
| 88 | u32 i; |
| 89 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 90 | fence_get(fence); |
| 91 | |
| 92 | preempt_disable(); |
| 93 | write_seqcount_begin(&obj->seq); |
| 94 | |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 95 | for (i = 0; i < fobj->shared_count; ++i) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 96 | struct fence *old_fence; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 97 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 98 | old_fence = rcu_dereference_protected(fobj->shared[i], |
| 99 | reservation_object_held(obj)); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 100 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 101 | if (old_fence->context == fence->context) { |
| 102 | /* memory barrier is added by write_seqcount_begin */ |
| 103 | RCU_INIT_POINTER(fobj->shared[i], fence); |
| 104 | write_seqcount_end(&obj->seq); |
| 105 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 106 | |
| 107 | fence_put(old_fence); |
| 108 | return; |
| 109 | } |
| 110 | } |
| 111 | |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 112 | /* |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 113 | * memory barrier is added by write_seqcount_begin, |
| 114 | * fobj->shared_count is protected by this lock too |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 115 | */ |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 116 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 117 | fobj->shared_count++; |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 118 | |
| 119 | write_seqcount_end(&obj->seq); |
| 120 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static void |
| 124 | reservation_object_add_shared_replace(struct reservation_object *obj, |
| 125 | struct reservation_object_list *old, |
| 126 | struct reservation_object_list *fobj, |
| 127 | struct fence *fence) |
| 128 | { |
| 129 | unsigned i; |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 130 | struct fence *old_fence = NULL; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 131 | |
| 132 | fence_get(fence); |
| 133 | |
| 134 | if (!old) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 135 | RCU_INIT_POINTER(fobj->shared[0], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 136 | fobj->shared_count = 1; |
| 137 | goto done; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * no need to bump fence refcounts, rcu_read access |
| 142 | * requires the use of kref_get_unless_zero, and the |
| 143 | * references from the old struct are carried over to |
| 144 | * the new. |
| 145 | */ |
| 146 | fobj->shared_count = old->shared_count; |
| 147 | |
| 148 | for (i = 0; i < old->shared_count; ++i) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 149 | struct fence *check; |
| 150 | |
| 151 | check = rcu_dereference_protected(old->shared[i], |
| 152 | reservation_object_held(obj)); |
| 153 | |
| 154 | if (!old_fence && check->context == fence->context) { |
| 155 | old_fence = check; |
| 156 | RCU_INIT_POINTER(fobj->shared[i], fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 157 | } else |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 158 | RCU_INIT_POINTER(fobj->shared[i], check); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 159 | } |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 160 | if (!old_fence) { |
| 161 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
| 162 | fobj->shared_count++; |
| 163 | } |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 164 | |
| 165 | done: |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 166 | preempt_disable(); |
| 167 | write_seqcount_begin(&obj->seq); |
| 168 | /* |
| 169 | * RCU_INIT_POINTER can be used here, |
| 170 | * seqcount provides the necessary barriers |
| 171 | */ |
| 172 | RCU_INIT_POINTER(obj->fence, fobj); |
| 173 | write_seqcount_end(&obj->seq); |
| 174 | preempt_enable(); |
| 175 | |
| 176 | if (old) |
| 177 | kfree_rcu(old, rcu); |
| 178 | |
| 179 | if (old_fence) |
| 180 | fence_put(old_fence); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | /* |
| 184 | * Add a fence to a shared slot, obj->lock must be held, and |
| 185 | * reservation_object_reserve_shared_fence has been called. |
| 186 | */ |
| 187 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
| 188 | struct fence *fence) |
| 189 | { |
| 190 | struct reservation_object_list *old, *fobj = obj->staged; |
| 191 | |
| 192 | old = reservation_object_get_list(obj); |
| 193 | obj->staged = NULL; |
| 194 | |
| 195 | if (!fobj) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 196 | BUG_ON(old->shared_count >= old->shared_max); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 197 | reservation_object_add_shared_inplace(obj, old, fence); |
| 198 | } else |
| 199 | reservation_object_add_shared_replace(obj, old, fobj, fence); |
| 200 | } |
| 201 | EXPORT_SYMBOL(reservation_object_add_shared_fence); |
| 202 | |
| 203 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
| 204 | struct fence *fence) |
| 205 | { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 206 | struct fence *old_fence = reservation_object_get_excl(obj); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 207 | struct reservation_object_list *old; |
| 208 | u32 i = 0; |
| 209 | |
| 210 | old = reservation_object_get_list(obj); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 211 | if (old) |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 212 | i = old->shared_count; |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 213 | |
| 214 | if (fence) |
| 215 | fence_get(fence); |
| 216 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 217 | preempt_disable(); |
| 218 | write_seqcount_begin(&obj->seq); |
| 219 | /* write_seqcount_begin provides the necessary memory barrier */ |
| 220 | RCU_INIT_POINTER(obj->fence_excl, fence); |
| 221 | if (old) |
| 222 | old->shared_count = 0; |
| 223 | write_seqcount_end(&obj->seq); |
| 224 | preempt_enable(); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 225 | |
| 226 | /* inplace update, no shared fences */ |
| 227 | while (i--) |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 228 | fence_put(rcu_dereference_protected(old->shared[i], |
| 229 | reservation_object_held(obj))); |
Maarten Lankhorst | 04a5faa | 2014-07-01 12:57:54 +0200 | [diff] [blame] | 230 | |
| 231 | if (old_fence) |
| 232 | fence_put(old_fence); |
| 233 | } |
| 234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 235 | |
| 236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
| 237 | struct fence **pfence_excl, |
| 238 | unsigned *pshared_count, |
| 239 | struct fence ***pshared) |
| 240 | { |
| 241 | unsigned shared_count = 0; |
| 242 | unsigned retry = 1; |
| 243 | struct fence **shared = NULL, *fence_excl = NULL; |
| 244 | int ret = 0; |
| 245 | |
| 246 | while (retry) { |
| 247 | struct reservation_object_list *fobj; |
| 248 | unsigned seq; |
| 249 | |
| 250 | seq = read_seqcount_begin(&obj->seq); |
| 251 | |
| 252 | rcu_read_lock(); |
| 253 | |
| 254 | fobj = rcu_dereference(obj->fence); |
| 255 | if (fobj) { |
| 256 | struct fence **nshared; |
| 257 | size_t sz = sizeof(*shared) * fobj->shared_max; |
| 258 | |
| 259 | nshared = krealloc(shared, sz, |
| 260 | GFP_NOWAIT | __GFP_NOWARN); |
| 261 | if (!nshared) { |
| 262 | rcu_read_unlock(); |
| 263 | nshared = krealloc(shared, sz, GFP_KERNEL); |
| 264 | if (nshared) { |
| 265 | shared = nshared; |
| 266 | continue; |
| 267 | } |
| 268 | |
| 269 | ret = -ENOMEM; |
| 270 | shared_count = 0; |
| 271 | break; |
| 272 | } |
| 273 | shared = nshared; |
| 274 | memcpy(shared, fobj->shared, sz); |
| 275 | shared_count = fobj->shared_count; |
| 276 | } else |
| 277 | shared_count = 0; |
| 278 | fence_excl = rcu_dereference(obj->fence_excl); |
| 279 | |
| 280 | retry = read_seqcount_retry(&obj->seq, seq); |
| 281 | if (retry) |
| 282 | goto unlock; |
| 283 | |
| 284 | if (!fence_excl || fence_get_rcu(fence_excl)) { |
| 285 | unsigned i; |
| 286 | |
| 287 | for (i = 0; i < shared_count; ++i) { |
| 288 | if (fence_get_rcu(shared[i])) |
| 289 | continue; |
| 290 | |
| 291 | /* uh oh, refcount failed, abort and retry */ |
| 292 | while (i--) |
| 293 | fence_put(shared[i]); |
| 294 | |
| 295 | if (fence_excl) { |
| 296 | fence_put(fence_excl); |
| 297 | fence_excl = NULL; |
| 298 | } |
| 299 | |
| 300 | retry = 1; |
| 301 | break; |
| 302 | } |
| 303 | } else |
| 304 | retry = 1; |
| 305 | |
| 306 | unlock: |
| 307 | rcu_read_unlock(); |
| 308 | } |
| 309 | *pshared_count = shared_count; |
| 310 | if (shared_count) |
| 311 | *pshared = shared; |
| 312 | else { |
| 313 | *pshared = NULL; |
| 314 | kfree(shared); |
| 315 | } |
| 316 | *pfence_excl = fence_excl; |
| 317 | |
| 318 | return ret; |
| 319 | } |
| 320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); |
| 321 | |
| 322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
| 323 | bool wait_all, bool intr, |
| 324 | unsigned long timeout) |
| 325 | { |
| 326 | struct fence *fence; |
| 327 | unsigned seq, shared_count, i = 0; |
| 328 | long ret = timeout; |
| 329 | |
Jammy Zhou | fb8b7d2 | 2015-01-21 18:35:47 +0800 | [diff] [blame] | 330 | if (!timeout) |
| 331 | return reservation_object_test_signaled_rcu(obj, wait_all); |
| 332 | |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 333 | retry: |
| 334 | fence = NULL; |
| 335 | shared_count = 0; |
| 336 | seq = read_seqcount_begin(&obj->seq); |
| 337 | rcu_read_lock(); |
| 338 | |
| 339 | if (wait_all) { |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 340 | struct reservation_object_list *fobj = |
| 341 | rcu_dereference(obj->fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 342 | |
| 343 | if (fobj) |
| 344 | shared_count = fobj->shared_count; |
| 345 | |
| 346 | if (read_seqcount_retry(&obj->seq, seq)) |
| 347 | goto unlock_retry; |
| 348 | |
| 349 | for (i = 0; i < shared_count; ++i) { |
| 350 | struct fence *lfence = rcu_dereference(fobj->shared[i]); |
| 351 | |
| 352 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) |
| 353 | continue; |
| 354 | |
| 355 | if (!fence_get_rcu(lfence)) |
| 356 | goto unlock_retry; |
| 357 | |
| 358 | if (fence_is_signaled(lfence)) { |
| 359 | fence_put(lfence); |
| 360 | continue; |
| 361 | } |
| 362 | |
| 363 | fence = lfence; |
| 364 | break; |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | if (!shared_count) { |
| 369 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); |
| 370 | |
| 371 | if (read_seqcount_retry(&obj->seq, seq)) |
| 372 | goto unlock_retry; |
| 373 | |
| 374 | if (fence_excl && |
| 375 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { |
| 376 | if (!fence_get_rcu(fence_excl)) |
| 377 | goto unlock_retry; |
| 378 | |
| 379 | if (fence_is_signaled(fence_excl)) |
| 380 | fence_put(fence_excl); |
| 381 | else |
| 382 | fence = fence_excl; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | rcu_read_unlock(); |
| 387 | if (fence) { |
| 388 | ret = fence_wait_timeout(fence, intr, ret); |
| 389 | fence_put(fence); |
| 390 | if (ret > 0 && wait_all && (i + 1 < shared_count)) |
| 391 | goto retry; |
| 392 | } |
| 393 | return ret; |
| 394 | |
| 395 | unlock_retry: |
| 396 | rcu_read_unlock(); |
| 397 | goto retry; |
| 398 | } |
| 399 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); |
| 400 | |
| 401 | |
| 402 | static inline int |
| 403 | reservation_object_test_signaled_single(struct fence *passed_fence) |
| 404 | { |
| 405 | struct fence *fence, *lfence = passed_fence; |
| 406 | int ret = 1; |
| 407 | |
| 408 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 409 | fence = fence_get_rcu(lfence); |
| 410 | if (!fence) |
| 411 | return -1; |
| 412 | |
| 413 | ret = !!fence_is_signaled(fence); |
| 414 | fence_put(fence); |
| 415 | } |
| 416 | return ret; |
| 417 | } |
| 418 | |
| 419 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
| 420 | bool test_all) |
| 421 | { |
| 422 | unsigned seq, shared_count; |
| 423 | int ret = true; |
| 424 | |
| 425 | retry: |
| 426 | shared_count = 0; |
| 427 | seq = read_seqcount_begin(&obj->seq); |
| 428 | rcu_read_lock(); |
| 429 | |
| 430 | if (test_all) { |
| 431 | unsigned i; |
| 432 | |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 433 | struct reservation_object_list *fobj = |
| 434 | rcu_dereference(obj->fence); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 435 | |
| 436 | if (fobj) |
| 437 | shared_count = fobj->shared_count; |
| 438 | |
| 439 | if (read_seqcount_retry(&obj->seq, seq)) |
| 440 | goto unlock_retry; |
| 441 | |
| 442 | for (i = 0; i < shared_count; ++i) { |
| 443 | struct fence *fence = rcu_dereference(fobj->shared[i]); |
| 444 | |
| 445 | ret = reservation_object_test_signaled_single(fence); |
| 446 | if (ret < 0) |
| 447 | goto unlock_retry; |
| 448 | else if (!ret) |
| 449 | break; |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * There could be a read_seqcount_retry here, but nothing cares |
| 454 | * about whether it's the old or newer fence pointers that are |
| 455 | * signaled. That race could still have happened after checking |
| 456 | * read_seqcount_retry. If you care, use ww_mutex_lock. |
| 457 | */ |
| 458 | } |
| 459 | |
| 460 | if (!shared_count) { |
| 461 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); |
| 462 | |
| 463 | if (read_seqcount_retry(&obj->seq, seq)) |
| 464 | goto unlock_retry; |
| 465 | |
| 466 | if (fence_excl) { |
Jagan Teki | 5136629 | 2015-05-21 01:09:31 +0530 | [diff] [blame] | 467 | ret = reservation_object_test_signaled_single( |
| 468 | fence_excl); |
Maarten Lankhorst | 3c3b177 | 2014-07-01 12:58:00 +0200 | [diff] [blame] | 469 | if (ret < 0) |
| 470 | goto unlock_retry; |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | rcu_read_unlock(); |
| 475 | return ret; |
| 476 | |
| 477 | unlock_retry: |
| 478 | rcu_read_unlock(); |
| 479 | goto retry; |
| 480 | } |
| 481 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); |