blob: a2766d72b2da9a23556f1dd1dcc98a4a61486965 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
Christian Königf91b3a62015-08-20 14:47:40 +080035struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct fence *fence;
38};
39
Christian König257bf152016-02-16 11:24:58 +010040static struct kmem_cache *amdgpu_sync_slab;
41
Alex Deucherd38ceaf2015-04-20 16:55:21 -040042/**
43 * amdgpu_sync_create - zero init sync object
44 *
45 * @sync: sync object to initialize
46 *
47 * Just clear the sync object for now.
48 */
49void amdgpu_sync_create(struct amdgpu_sync *sync)
50{
Christian Königf91b3a62015-08-20 14:47:40 +080051 hash_init(sync->fences);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052 sync->last_vm_update = NULL;
53}
54
Christian Königbcc634f2016-02-16 16:23:02 +010055/**
56 * amdgpu_sync_same_dev - test if fence belong to us
57 *
58 * @adev: amdgpu device to use for the test
59 * @f: fence to test
60 *
61 * Test if the fence was issued by us.
62 */
Chunming Zhou3c623382015-08-20 18:33:59 +080063static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
64{
Chunming Zhou3c623382015-08-20 18:33:59 +080065 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66
Christian König4f839a22015-09-08 20:22:31 +020067 if (s_fence) {
68 struct amdgpu_ring *ring;
69
70 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
71 return ring->adev == adev;
72 }
73
Chunming Zhou3c623382015-08-20 18:33:59 +080074 return false;
75}
76
Christian Königbcc634f2016-02-16 16:23:02 +010077/**
78 * amdgpu_sync_get_owner - extract the owner of a fence
79 *
80 * @fence: fence get the owner from
81 *
82 * Extract who originally created the fence.
83 */
84static void *amdgpu_sync_get_owner(struct fence *f)
Chunming Zhou3c623382015-08-20 18:33:59 +080085{
Chunming Zhou3c623382015-08-20 18:33:59 +080086 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
Christian Königbcc634f2016-02-16 16:23:02 +010087
Chunming Zhou3c623382015-08-20 18:33:59 +080088 if (s_fence)
Christian Königbcc634f2016-02-16 16:23:02 +010089 return s_fence->owner;
Christian König336d1f52016-02-16 10:57:10 +010090
Christian Königbcc634f2016-02-16 16:23:02 +010091 return AMDGPU_FENCE_OWNER_UNDEFINED;
Chunming Zhou3c623382015-08-20 18:33:59 +080092}
93
Christian Königbcc634f2016-02-16 16:23:02 +010094/**
95 * amdgpu_sync_keep_later - Keep the later fence
96 *
97 * @keep: existing fence to test
98 * @fence: new fence
99 *
100 * Either keep the existing fence or the new one, depending which one is later.
101 */
Christian König24233862015-10-22 10:53:16 +0200102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
103{
104 if (*keep && fence_is_later(*keep, fence))
105 return;
106
107 fence_put(*keep);
108 *keep = fence_get(fence);
109}
110
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111/**
Christian König832a9022016-02-15 12:33:02 +0100112 * amdgpu_sync_add_later - add the fence to the hash
113 *
114 * @sync: sync object to add the fence to
115 * @f: fence to add
116 *
117 * Tries to add the fence to an existing hash entry. Returns true when an entry
118 * was found, false otherwise.
119 */
120static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
121{
122 struct amdgpu_sync_entry *e;
123
124 hash_for_each_possible(sync->fences, e, node, f->context) {
125 if (unlikely(e->fence->context != f->context))
126 continue;
127
128 amdgpu_sync_keep_later(&e->fence, f);
129 return true;
130 }
131 return false;
132}
133
134/**
Christian König91e1a522015-07-06 22:06:40 +0200135 * amdgpu_sync_fence - remember to sync to this fence
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136 *
137 * @sync: sync object to add fence to
138 * @fence: fence to sync to
139 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140 */
Christian König91e1a522015-07-06 22:06:40 +0200141int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
142 struct fence *f)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143{
Christian Königf91b3a62015-08-20 14:47:40 +0800144 struct amdgpu_sync_entry *e;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400145
Christian König91e1a522015-07-06 22:06:40 +0200146 if (!f)
147 return 0;
148
Chunming Zhou3c623382015-08-20 18:33:59 +0800149 if (amdgpu_sync_same_dev(adev, f) &&
Christian Königbcc634f2016-02-16 16:23:02 +0100150 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
Christian König24233862015-10-22 10:53:16 +0200151 amdgpu_sync_keep_later(&sync->last_vm_update, f);
Chunming Zhou3c623382015-08-20 18:33:59 +0800152
Christian König832a9022016-02-15 12:33:02 +0100153 if (amdgpu_sync_add_later(sync, f))
Christian Königf91b3a62015-08-20 14:47:40 +0800154 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155
Christian König257bf152016-02-16 11:24:58 +0100156 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
Christian König046c12c2016-01-18 14:49:45 +0100157 if (!e)
158 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400159
Christian König046c12c2016-01-18 14:49:45 +0100160 hash_add(sync->fences, &e->node, f->context);
161 e->fence = fence_get(f);
Christian König91e1a522015-07-06 22:06:40 +0200162 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400163}
164
165/**
Chunming Zhou2f4b9402016-01-15 11:05:21 +0800166 * amdgpu_sync_resv - sync to a reservation object
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400167 *
168 * @sync: sync object to add fences from reservation object to
169 * @resv: reservation object with embedded fence
170 * @shared: true if we should only sync to the exclusive fence
171 *
Chunming Zhou2f4b9402016-01-15 11:05:21 +0800172 * Sync to the fence
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400173 */
174int amdgpu_sync_resv(struct amdgpu_device *adev,
175 struct amdgpu_sync *sync,
176 struct reservation_object *resv,
177 void *owner)
178{
179 struct reservation_object_list *flist;
180 struct fence *f;
Chunming Zhou423a9482015-08-24 16:59:54 +0800181 void *fence_owner;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182 unsigned i;
183 int r = 0;
184
Jammy Zhou4b095302015-05-12 23:17:19 +0800185 if (resv == NULL)
186 return -EINVAL;
187
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188 /* always sync to the exclusive fence */
189 f = reservation_object_get_excl(resv);
Christian König91e1a522015-07-06 22:06:40 +0200190 r = amdgpu_sync_fence(adev, sync, f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400191
192 flist = reservation_object_get_list(resv);
193 if (!flist || r)
194 return r;
195
196 for (i = 0; i < flist->shared_count; ++i) {
197 f = rcu_dereference_protected(flist->shared[i],
198 reservation_object_held(resv));
Chunming Zhou423a9482015-08-24 16:59:54 +0800199 if (amdgpu_sync_same_dev(adev, f)) {
Christian König1d3897e2015-07-27 15:40:35 +0200200 /* VM updates are only interesting
201 * for other VM updates and moves.
202 */
Chunming Zhou423a9482015-08-24 16:59:54 +0800203 fence_owner = amdgpu_sync_get_owner(f);
Christian König7a91d6c2015-10-27 17:28:24 +0100204 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
205 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
Christian König1d3897e2015-07-27 15:40:35 +0200206 ((owner == AMDGPU_FENCE_OWNER_VM) !=
Chunming Zhou423a9482015-08-24 16:59:54 +0800207 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
Christian König91e1a522015-07-06 22:06:40 +0200208 continue;
209
Christian König1d3897e2015-07-27 15:40:35 +0200210 /* Ignore fence from the same owner as
211 * long as it isn't undefined.
212 */
213 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
Chunming Zhou423a9482015-08-24 16:59:54 +0800214 fence_owner == owner)
Christian König1d3897e2015-07-27 15:40:35 +0200215 continue;
216 }
217
Christian König91e1a522015-07-06 22:06:40 +0200218 r = amdgpu_sync_fence(adev, sync, f);
219 if (r)
220 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400221 }
222 return r;
223}
224
Christian König832a9022016-02-15 12:33:02 +0100225/**
226 * amdgpu_sync_is_idle - test if all fences are signaled
227 *
228 * @sync: the sync object
Christian König35420232016-05-23 14:26:39 +0200229 * @ring: optional ring to use for test
Christian König832a9022016-02-15 12:33:02 +0100230 *
Christian König35420232016-05-23 14:26:39 +0200231 * Returns true if all fences in the sync object are signaled or scheduled to
232 * the ring (if provided).
Christian König832a9022016-02-15 12:33:02 +0100233 */
Christian König35420232016-05-23 14:26:39 +0200234bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring)
Christian König832a9022016-02-15 12:33:02 +0100236{
237 struct amdgpu_sync_entry *e;
238 struct hlist_node *tmp;
239 int i;
240
241 hash_for_each_safe(sync->fences, i, tmp, e, node) {
242 struct fence *f = e->fence;
Christian König35420232016-05-23 14:26:39 +0200243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
244
245 if (ring && s_fence) {
246 /* For fences from the same ring it is sufficient
247 * when they are scheduled.
248 */
249 if (s_fence->sched == &ring->sched &&
250 fence_is_signaled(&s_fence->scheduled))
251 continue;
252 }
Christian König832a9022016-02-15 12:33:02 +0100253
254 if (fence_is_signaled(f)) {
255 hash_del(&e->node);
256 fence_put(f);
257 kmem_cache_free(amdgpu_sync_slab, e);
258 continue;
259 }
260
261 return false;
262 }
263
264 return true;
265}
266
267/**
268 * amdgpu_sync_cycle_fences - move fences from one sync object into another
269 *
270 * @dst: the destination sync object
271 * @src: the source sync object
272 * @fence: fence to add to source
273 *
274 * Remove all fences from source and put them into destination and add
275 * fence as new one into source.
276 */
277int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
278 struct fence *fence)
279{
280 struct amdgpu_sync_entry *e, *newone;
281 struct hlist_node *tmp;
282 int i;
283
284 /* Allocate the new entry before moving the old ones */
285 newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
286 if (!newone)
287 return -ENOMEM;
288
289 hash_for_each_safe(src->fences, i, tmp, e, node) {
290 struct fence *f = e->fence;
291
292 hash_del(&e->node);
293 if (fence_is_signaled(f)) {
294 fence_put(f);
295 kmem_cache_free(amdgpu_sync_slab, e);
296 continue;
297 }
298
299 if (amdgpu_sync_add_later(dst, f)) {
300 kmem_cache_free(amdgpu_sync_slab, e);
301 continue;
302 }
303
304 hash_add(dst->fences, &e->node, f->context);
305 }
306
307 hash_add(src->fences, &newone->node, fence->context);
308 newone->fence = fence_get(fence);
309
310 return 0;
311}
312
Christian König0e9d2392016-05-23 16:19:44 +0200313/**
314 * amdgpu_sync_get_fence - get the next fence from the sync object
315 *
316 * @sync: sync object to use
317 *
318 * Get and removes the next fence from the sync object not signaled yet.
319 */
Christian Könige61235d2015-08-25 11:05:36 +0200320struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
321{
322 struct amdgpu_sync_entry *e;
323 struct hlist_node *tmp;
324 struct fence *f;
325 int i;
326
327 hash_for_each_safe(sync->fences, i, tmp, e, node) {
328
329 f = e->fence;
330
331 hash_del(&e->node);
Christian König257bf152016-02-16 11:24:58 +0100332 kmem_cache_free(amdgpu_sync_slab, e);
Christian Könige61235d2015-08-25 11:05:36 +0200333
334 if (!fence_is_signaled(f))
335 return f;
336
337 fence_put(f);
338 }
339 return NULL;
340}
341
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400343 * amdgpu_sync_free - free the sync object
344 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400345 * @sync: sync object to use
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400346 *
Chunming Zhou2f4b9402016-01-15 11:05:21 +0800347 * Free the sync object.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400348 */
Christian König8a8f0b42016-02-03 15:11:39 +0100349void amdgpu_sync_free(struct amdgpu_sync *sync)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400350{
Christian Königf91b3a62015-08-20 14:47:40 +0800351 struct amdgpu_sync_entry *e;
352 struct hlist_node *tmp;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400353 unsigned i;
354
Christian Königf91b3a62015-08-20 14:47:40 +0800355 hash_for_each_safe(sync->fences, i, tmp, e, node) {
356 hash_del(&e->node);
357 fence_put(e->fence);
Christian König257bf152016-02-16 11:24:58 +0100358 kmem_cache_free(amdgpu_sync_slab, e);
Christian Königf91b3a62015-08-20 14:47:40 +0800359 }
360
Chunming Zhou3c623382015-08-20 18:33:59 +0800361 fence_put(sync->last_vm_update);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400362}
Christian König257bf152016-02-16 11:24:58 +0100363
364/**
365 * amdgpu_sync_init - init sync object subsystem
366 *
367 * Allocate the slab allocator.
368 */
369int amdgpu_sync_init(void)
370{
371 amdgpu_sync_slab = kmem_cache_create(
372 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
373 SLAB_HWCACHE_ALIGN, NULL);
374 if (!amdgpu_sync_slab)
375 return -ENOMEM;
376
377 return 0;
378}
379
380/**
381 * amdgpu_sync_fini - fini sync object subsystem
382 *
383 * Free the slab allocator.
384 */
385void amdgpu_sync_fini(void)
386{
387 kmem_cache_destroy(amdgpu_sync_slab);
388}