Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Christian König <christian.koenig@amd.com> |
| 29 | */ |
| 30 | |
| 31 | #include <drm/drmP.h> |
| 32 | #include "amdgpu.h" |
| 33 | #include "amdgpu_trace.h" |
| 34 | |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 35 | struct amdgpu_sync_entry { |
| 36 | struct hlist_node node; |
| 37 | struct fence *fence; |
| 38 | }; |
| 39 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 40 | /** |
| 41 | * amdgpu_sync_create - zero init sync object |
| 42 | * |
| 43 | * @sync: sync object to initialize |
| 44 | * |
| 45 | * Just clear the sync object for now. |
| 46 | */ |
| 47 | void amdgpu_sync_create(struct amdgpu_sync *sync) |
| 48 | { |
| 49 | unsigned i; |
| 50 | |
| 51 | for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) |
| 52 | sync->semaphores[i] = NULL; |
| 53 | |
| 54 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
| 55 | sync->sync_to[i] = NULL; |
| 56 | |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 57 | hash_init(sync->fences); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 58 | sync->last_vm_update = NULL; |
| 59 | } |
| 60 | |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 61 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) |
| 62 | { |
| 63 | struct amdgpu_fence *a_fence = to_amdgpu_fence(f); |
| 64 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
| 65 | |
| 66 | if (a_fence) |
| 67 | return a_fence->ring->adev == adev; |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 68 | |
| 69 | if (s_fence) { |
| 70 | struct amdgpu_ring *ring; |
| 71 | |
| 72 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); |
| 73 | return ring->adev == adev; |
| 74 | } |
| 75 | |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 76 | return false; |
| 77 | } |
| 78 | |
| 79 | static bool amdgpu_sync_test_owner(struct fence *f, void *owner) |
| 80 | { |
| 81 | struct amdgpu_fence *a_fence = to_amdgpu_fence(f); |
| 82 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
| 83 | if (s_fence) |
| 84 | return s_fence->owner == owner; |
| 85 | if (a_fence) |
| 86 | return a_fence->owner == owner; |
| 87 | return false; |
| 88 | } |
| 89 | |
Christian König | 2423386 | 2015-10-22 10:53:16 +0200 | [diff] [blame] | 90 | static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) |
| 91 | { |
| 92 | if (*keep && fence_is_later(*keep, fence)) |
| 93 | return; |
| 94 | |
| 95 | fence_put(*keep); |
| 96 | *keep = fence_get(fence); |
| 97 | } |
| 98 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 99 | /** |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 100 | * amdgpu_sync_fence - remember to sync to this fence |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 101 | * |
| 102 | * @sync: sync object to add fence to |
| 103 | * @fence: fence to sync to |
| 104 | * |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 105 | */ |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 106 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
| 107 | struct fence *f) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 108 | { |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 109 | struct amdgpu_sync_entry *e; |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 110 | struct amdgpu_fence *fence; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 111 | |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 112 | if (!f) |
| 113 | return 0; |
| 114 | |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 115 | if (amdgpu_sync_same_dev(adev, f) && |
Christian König | 2423386 | 2015-10-22 10:53:16 +0200 | [diff] [blame] | 116 | amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) |
| 117 | amdgpu_sync_keep_later(&sync->last_vm_update, f); |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 118 | |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 119 | fence = to_amdgpu_fence(f); |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 120 | if (!fence || fence->ring->adev != adev) { |
| 121 | hash_for_each_possible(sync->fences, e, node, f->context) { |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 122 | if (unlikely(e->fence->context != f->context)) |
| 123 | continue; |
Christian König | 2423386 | 2015-10-22 10:53:16 +0200 | [diff] [blame] | 124 | |
| 125 | amdgpu_sync_keep_later(&e->fence, f); |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); |
| 130 | if (!e) |
| 131 | return -ENOMEM; |
| 132 | |
| 133 | hash_add(sync->fences, &e->node, f->context); |
| 134 | e->fence = fence_get(f); |
| 135 | return 0; |
| 136 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 137 | |
Christian König | 16545c3 | 2015-10-22 15:04:50 +0200 | [diff] [blame] | 138 | amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 139 | |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 140 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 141 | } |
| 142 | |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 143 | static void *amdgpu_sync_get_owner(struct fence *f) |
| 144 | { |
| 145 | struct amdgpu_fence *a_fence = to_amdgpu_fence(f); |
| 146 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
| 147 | |
| 148 | if (s_fence) |
| 149 | return s_fence->owner; |
| 150 | else if (a_fence) |
| 151 | return a_fence->owner; |
| 152 | return AMDGPU_FENCE_OWNER_UNDEFINED; |
| 153 | } |
| 154 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 155 | /** |
| 156 | * amdgpu_sync_resv - use the semaphores to sync to a reservation object |
| 157 | * |
| 158 | * @sync: sync object to add fences from reservation object to |
| 159 | * @resv: reservation object with embedded fence |
| 160 | * @shared: true if we should only sync to the exclusive fence |
| 161 | * |
| 162 | * Sync to the fence using the semaphore objects |
| 163 | */ |
| 164 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
| 165 | struct amdgpu_sync *sync, |
| 166 | struct reservation_object *resv, |
| 167 | void *owner) |
| 168 | { |
| 169 | struct reservation_object_list *flist; |
| 170 | struct fence *f; |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 171 | void *fence_owner; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 172 | unsigned i; |
| 173 | int r = 0; |
| 174 | |
Jammy Zhou | 4b09530 | 2015-05-12 23:17:19 +0800 | [diff] [blame] | 175 | if (resv == NULL) |
| 176 | return -EINVAL; |
| 177 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 178 | /* always sync to the exclusive fence */ |
| 179 | f = reservation_object_get_excl(resv); |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 180 | r = amdgpu_sync_fence(adev, sync, f); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 181 | |
| 182 | flist = reservation_object_get_list(resv); |
| 183 | if (!flist || r) |
| 184 | return r; |
| 185 | |
| 186 | for (i = 0; i < flist->shared_count; ++i) { |
| 187 | f = rcu_dereference_protected(flist->shared[i], |
| 188 | reservation_object_held(resv)); |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 189 | if (amdgpu_sync_same_dev(adev, f)) { |
Christian König | 1d3897e | 2015-07-27 15:40:35 +0200 | [diff] [blame] | 190 | /* VM updates are only interesting |
| 191 | * for other VM updates and moves. |
| 192 | */ |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 193 | fence_owner = amdgpu_sync_get_owner(f); |
Christian König | 7a91d6c | 2015-10-27 17:28:24 +0100 | [diff] [blame] | 194 | if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && |
| 195 | (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && |
Christian König | 1d3897e | 2015-07-27 15:40:35 +0200 | [diff] [blame] | 196 | ((owner == AMDGPU_FENCE_OWNER_VM) != |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 197 | (fence_owner == AMDGPU_FENCE_OWNER_VM))) |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 198 | continue; |
| 199 | |
Christian König | 1d3897e | 2015-07-27 15:40:35 +0200 | [diff] [blame] | 200 | /* Ignore fence from the same owner as |
| 201 | * long as it isn't undefined. |
| 202 | */ |
| 203 | if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && |
Chunming Zhou | 423a948 | 2015-08-24 16:59:54 +0800 | [diff] [blame] | 204 | fence_owner == owner) |
Christian König | 1d3897e | 2015-07-27 15:40:35 +0200 | [diff] [blame] | 205 | continue; |
| 206 | } |
| 207 | |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 208 | r = amdgpu_sync_fence(adev, sync, f); |
| 209 | if (r) |
| 210 | break; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 211 | } |
| 212 | return r; |
| 213 | } |
| 214 | |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 215 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) |
| 216 | { |
| 217 | struct amdgpu_sync_entry *e; |
| 218 | struct hlist_node *tmp; |
| 219 | struct fence *f; |
| 220 | int i; |
| 221 | |
| 222 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
| 223 | |
| 224 | f = e->fence; |
| 225 | |
| 226 | hash_del(&e->node); |
| 227 | kfree(e); |
| 228 | |
| 229 | if (!fence_is_signaled(f)) |
| 230 | return f; |
| 231 | |
| 232 | fence_put(f); |
| 233 | } |
| 234 | return NULL; |
| 235 | } |
| 236 | |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 237 | int amdgpu_sync_wait(struct amdgpu_sync *sync) |
| 238 | { |
| 239 | struct amdgpu_sync_entry *e; |
| 240 | struct hlist_node *tmp; |
| 241 | int i, r; |
| 242 | |
| 243 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
| 244 | r = fence_wait(e->fence, false); |
| 245 | if (r) |
| 246 | return r; |
| 247 | |
| 248 | hash_del(&e->node); |
| 249 | fence_put(e->fence); |
| 250 | kfree(e); |
| 251 | } |
Christian König | 3daea9e3d | 2015-09-05 11:12:27 +0200 | [diff] [blame] | 252 | |
| 253 | if (amdgpu_enable_semaphores) |
| 254 | return 0; |
| 255 | |
| 256 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
Christian König | 16545c3 | 2015-10-22 15:04:50 +0200 | [diff] [blame] | 257 | struct fence *fence = sync->sync_to[i]; |
Christian König | 3daea9e3d | 2015-09-05 11:12:27 +0200 | [diff] [blame] | 258 | if (!fence) |
| 259 | continue; |
| 260 | |
Christian König | 16545c3 | 2015-10-22 15:04:50 +0200 | [diff] [blame] | 261 | r = fence_wait(fence, false); |
Christian König | 3daea9e3d | 2015-09-05 11:12:27 +0200 | [diff] [blame] | 262 | if (r) |
| 263 | return r; |
| 264 | } |
| 265 | |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 266 | return 0; |
| 267 | } |
| 268 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 269 | /** |
| 270 | * amdgpu_sync_rings - sync ring to all registered fences |
| 271 | * |
| 272 | * @sync: sync object to use |
| 273 | * @ring: ring that needs sync |
| 274 | * |
| 275 | * Ensure that all registered fences are signaled before letting |
| 276 | * the ring continue. The caller must hold the ring lock. |
| 277 | */ |
| 278 | int amdgpu_sync_rings(struct amdgpu_sync *sync, |
| 279 | struct amdgpu_ring *ring) |
| 280 | { |
| 281 | struct amdgpu_device *adev = ring->adev; |
| 282 | unsigned count = 0; |
| 283 | int i, r; |
| 284 | |
| 285 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 286 | struct amdgpu_ring *other = adev->rings[i]; |
Christian König | 16545c3 | 2015-10-22 15:04:50 +0200 | [diff] [blame] | 287 | struct amdgpu_semaphore *semaphore; |
| 288 | struct amdgpu_fence *fence; |
| 289 | |
| 290 | if (!sync->sync_to[i]) |
| 291 | continue; |
| 292 | |
| 293 | fence = to_amdgpu_fence(sync->sync_to[i]); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 294 | |
| 295 | /* check if we really need to sync */ |
Chunming Zhou | 888c9e3 | 2016-01-13 12:55:18 +0800 | [diff] [blame] | 296 | if (!amdgpu_enable_scheduler && |
| 297 | !amdgpu_fence_need_sync(fence, ring)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 298 | continue; |
| 299 | |
| 300 | /* prevent GPU deadlocks */ |
| 301 | if (!other->ready) { |
| 302 | dev_err(adev->dev, "Syncing to a disabled ring!"); |
| 303 | return -EINVAL; |
| 304 | } |
| 305 | |
Christian König | 680513c | 2015-09-10 15:03:50 +0200 | [diff] [blame] | 306 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { |
Chunming Zhou | 888c9e3 | 2016-01-13 12:55:18 +0800 | [diff] [blame] | 307 | r = fence_wait(sync->sync_to[i], true); |
Christian König | 680513c | 2015-09-10 15:03:50 +0200 | [diff] [blame] | 308 | if (r) |
| 309 | return r; |
| 310 | continue; |
| 311 | } |
| 312 | |
| 313 | if (count >= AMDGPU_NUM_SYNCS) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 314 | /* not enough room, wait manually */ |
Christian König | 02bc065 | 2015-08-07 13:53:36 +0200 | [diff] [blame] | 315 | r = fence_wait(&fence->base, false); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 316 | if (r) |
| 317 | return r; |
| 318 | continue; |
| 319 | } |
| 320 | r = amdgpu_semaphore_create(adev, &semaphore); |
| 321 | if (r) |
| 322 | return r; |
| 323 | |
| 324 | sync->semaphores[count++] = semaphore; |
| 325 | |
| 326 | /* allocate enough space for sync command */ |
| 327 | r = amdgpu_ring_alloc(other, 16); |
| 328 | if (r) |
| 329 | return r; |
| 330 | |
| 331 | /* emit the signal semaphore */ |
| 332 | if (!amdgpu_semaphore_emit_signal(other, semaphore)) { |
| 333 | /* signaling wasn't successful wait manually */ |
| 334 | amdgpu_ring_undo(other); |
Christian König | 02bc065 | 2015-08-07 13:53:36 +0200 | [diff] [blame] | 335 | r = fence_wait(&fence->base, false); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 336 | if (r) |
| 337 | return r; |
| 338 | continue; |
| 339 | } |
| 340 | |
| 341 | /* we assume caller has already allocated space on waiters ring */ |
| 342 | if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { |
| 343 | /* waiting wasn't successful wait manually */ |
| 344 | amdgpu_ring_undo(other); |
Christian König | 02bc065 | 2015-08-07 13:53:36 +0200 | [diff] [blame] | 345 | r = fence_wait(&fence->base, false); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 346 | if (r) |
| 347 | return r; |
| 348 | continue; |
| 349 | } |
| 350 | |
| 351 | amdgpu_ring_commit(other); |
| 352 | amdgpu_fence_note_sync(fence, ring); |
| 353 | } |
| 354 | |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | /** |
| 359 | * amdgpu_sync_free - free the sync object |
| 360 | * |
| 361 | * @adev: amdgpu_device pointer |
| 362 | * @sync: sync object to use |
| 363 | * @fence: fence to use for the free |
| 364 | * |
| 365 | * Free the sync object by freeing all semaphores in it. |
| 366 | */ |
| 367 | void amdgpu_sync_free(struct amdgpu_device *adev, |
| 368 | struct amdgpu_sync *sync, |
Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 369 | struct fence *fence) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 370 | { |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 371 | struct amdgpu_sync_entry *e; |
| 372 | struct hlist_node *tmp; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 373 | unsigned i; |
| 374 | |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 375 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
| 376 | hash_del(&e->node); |
| 377 | fence_put(e->fence); |
| 378 | kfree(e); |
| 379 | } |
| 380 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 381 | for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) |
| 382 | amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); |
| 383 | |
| 384 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
Christian König | 16545c3 | 2015-10-22 15:04:50 +0200 | [diff] [blame] | 385 | fence_put(sync->sync_to[i]); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 386 | |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 387 | fence_put(sync->last_vm_update); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 388 | } |