blob: 3ccb3b53216e92657f632de1bc00453b6bb3c5d6 [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030027#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030028#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030029#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020030#include <linux/compat.h>
31
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030032struct mm_struct;
33
34#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030035#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030036
37/*
38 * Initial size for the array of queues.
39 * The allocated size is doubled each time
40 * it is exceeded up to MAX_PROCESS_QUEUES.
41 */
42#define INITIAL_QUEUE_ARRAY_SIZE 16
43
44/*
45 * List of struct kfd_process (field kfd_process).
46 * Unique/indexed by mm_struct*
47 */
48#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
49static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
50static DEFINE_MUTEX(kfd_processes_mutex);
51
52DEFINE_STATIC_SRCU(kfd_processes_srcu);
53
54static struct workqueue_struct *kfd_process_wq;
55
56struct kfd_process_release_work {
57 struct work_struct kfd_work;
58 struct kfd_process *p;
59};
60
61static struct kfd_process *find_process(const struct task_struct *thread);
62static struct kfd_process *create_process(const struct task_struct *thread);
63
64void kfd_process_create_wq(void)
65{
66 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053067 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030068}
69
70void kfd_process_destroy_wq(void)
71{
72 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030073 destroy_workqueue(kfd_process_wq);
74 kfd_process_wq = NULL;
75 }
76}
77
78struct kfd_process *kfd_create_process(const struct task_struct *thread)
79{
80 struct kfd_process *process;
81
Kent Russell4eacc26b2017-08-15 23:00:06 -040082 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030083 return ERR_PTR(-EINVAL);
84
85 /* Only the pthreads threading model is supported. */
86 if (thread->group_leader->mm != thread->mm)
87 return ERR_PTR(-EINVAL);
88
89 /* Take mmap_sem because we call __mmu_notifier_register inside */
90 down_write(&thread->mm->mmap_sem);
91
92 /*
93 * take kfd processes mutex before starting of process creation
94 * so there won't be a case where two threads of the same process
95 * create two kfd_process structures
96 */
97 mutex_lock(&kfd_processes_mutex);
98
99 /* A prior open of /dev/kfd could have already created the process. */
100 process = find_process(thread);
101 if (process)
Kent Russell79775b62017-08-15 23:00:05 -0400102 pr_debug("Process already found\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300103
104 if (!process)
105 process = create_process(thread);
106
107 mutex_unlock(&kfd_processes_mutex);
108
109 up_write(&thread->mm->mmap_sem);
110
111 return process;
112}
113
114struct kfd_process *kfd_get_process(const struct task_struct *thread)
115{
116 struct kfd_process *process;
117
Kent Russell4eacc26b2017-08-15 23:00:06 -0400118 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300119 return ERR_PTR(-EINVAL);
120
121 /* Only the pthreads threading model is supported. */
122 if (thread->group_leader->mm != thread->mm)
123 return ERR_PTR(-EINVAL);
124
125 process = find_process(thread);
126
127 return process;
128}
129
130static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
131{
132 struct kfd_process *process;
133
134 hash_for_each_possible_rcu(kfd_processes_table, process,
135 kfd_processes, (uintptr_t)mm)
136 if (process->mm == mm)
137 return process;
138
139 return NULL;
140}
141
142static struct kfd_process *find_process(const struct task_struct *thread)
143{
144 struct kfd_process *p;
145 int idx;
146
147 idx = srcu_read_lock(&kfd_processes_srcu);
148 p = find_process_by_mm(thread->mm);
149 srcu_read_unlock(&kfd_processes_srcu, idx);
150
151 return p;
152}
153
154static void kfd_process_wq_release(struct work_struct *work)
155{
156 struct kfd_process_release_work *my_work;
157 struct kfd_process_device *pdd, *temp;
158 struct kfd_process *p;
159
160 my_work = (struct kfd_process_release_work *) work;
161
162 p = my_work->p;
163
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200164 pr_debug("Releasing process (pasid %d) in workqueue\n",
165 p->pasid);
166
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300167 mutex_lock(&p->mutex);
168
169 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
170 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200171 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
172 pdd->dev->id, p->pasid);
173
Yong Zhao733fa1f2017-09-20 18:10:14 -0400174 if (pdd->bound == PDD_BOUND)
175 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300176
Yong Zhao733fa1f2017-09-20 18:10:14 -0400177 list_del(&pdd->per_device_list);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300178 kfree(pdd);
179 }
180
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300181 kfd_event_free_process(p);
182
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300183 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400184 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300185
186 mutex_unlock(&p->mutex);
187
188 mutex_destroy(&p->mutex);
189
190 kfree(p->queues);
191
192 kfree(p);
193
Amitoj Kaur Chawla642f0f22016-01-25 23:03:57 +0530194 kfree(work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300195}
196
197static void kfd_process_destroy_delayed(struct rcu_head *rcu)
198{
199 struct kfd_process_release_work *work;
200 struct kfd_process *p;
201
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300202 p = container_of(rcu, struct kfd_process, rcu);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400203 WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300204
205 mmdrop(p->mm);
206
Firo Yang1549fcd2015-04-23 17:58:05 +0800207 work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300208
209 if (work) {
210 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
211 work->p = p;
212 queue_work(kfd_process_wq, (struct work_struct *) work);
213 }
214}
215
216static void kfd_process_notifier_release(struct mmu_notifier *mn,
217 struct mm_struct *mm)
218{
219 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200220 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300221
222 /*
223 * The kfd_process structure can not be free because the
224 * mmu_notifier srcu is read locked
225 */
226 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400227 if (WARN_ON(p->mm != mm))
228 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300229
230 mutex_lock(&kfd_processes_mutex);
231 hash_del_rcu(&p->kfd_processes);
232 mutex_unlock(&kfd_processes_mutex);
233 synchronize_srcu(&kfd_processes_srcu);
234
Ben Goz45102042014-07-17 01:04:10 +0300235 mutex_lock(&p->mutex);
236
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400237 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300238 pqm_uninit(&p->pqm);
239
Ben Goza82918f2015-03-25 13:12:20 +0200240 /* Iterate over all process device data structure and check
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400241 * if we should delete debug managers
Oded Gabbaybc4755a2016-05-26 08:41:48 +0300242 */
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400243 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
Oded Gabbaybc4755a2016-05-26 08:41:48 +0300244 if ((pdd->dev->dbgmgr) &&
245 (pdd->dev->dbgmgr->pasid == p->pasid))
246 kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
247
Ben Goz45102042014-07-17 01:04:10 +0300248 mutex_unlock(&p->mutex);
249
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300250 /*
251 * Because we drop mm_count inside kfd_process_destroy_delayed
252 * and because the mmu_notifier_unregister function also drop
253 * mm_count we need to take an extra count here.
254 */
Vegard Nossumf1f10072017-02-27 14:30:07 -0800255 mmgrab(p->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300256 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
257 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
258}
259
260static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
261 .release = kfd_process_notifier_release,
262};
263
264static struct kfd_process *create_process(const struct task_struct *thread)
265{
266 struct kfd_process *process;
267 int err = -ENOMEM;
268
269 process = kzalloc(sizeof(*process), GFP_KERNEL);
270
271 if (!process)
272 goto err_alloc_process;
273
274 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
275 sizeof(process->queues[0]), GFP_KERNEL);
276 if (!process->queues)
277 goto err_alloc_queues;
278
279 process->pasid = kfd_pasid_alloc();
280 if (process->pasid == 0)
281 goto err_alloc_pasid;
282
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400283 if (kfd_alloc_process_doorbells(process) < 0)
284 goto err_alloc_doorbells;
285
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300286 mutex_init(&process->mutex);
287
288 process->mm = thread->mm;
289
290 /* register notifier */
291 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
292 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
293 if (err)
294 goto err_mmu_notifier;
295
296 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
297 (uintptr_t)process->mm);
298
299 process->lead_thread = thread->group_leader;
300
301 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
302
303 INIT_LIST_HEAD(&process->per_device_data);
304
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300305 kfd_event_init_process(process);
306
Ben Goz45102042014-07-17 01:04:10 +0300307 err = pqm_init(&process->pqm, process);
308 if (err != 0)
309 goto err_process_pqm_init;
310
Alexey Skidanovdd592392014-11-18 13:56:23 +0200311 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700312 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300313 err = kfd_init_apertures(process);
314 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200315 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200316
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300317 return process;
318
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200319err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200320 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300321err_process_pqm_init:
322 hash_del_rcu(&process->kfd_processes);
323 synchronize_rcu();
324 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300325err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300326 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400327 kfd_free_process_doorbells(process);
328err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300329 kfd_pasid_free(process->pasid);
330err_alloc_pasid:
331 kfree(process->queues);
332err_alloc_queues:
333 kfree(process);
334err_alloc_process:
335 return ERR_PTR(err);
336}
337
338struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200339 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300340{
341 struct kfd_process_device *pdd = NULL;
342
343 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
344 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400345 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300346
Yong Zhao733fa1f2017-09-20 18:10:14 -0400347 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200348}
349
350struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
351 struct kfd_process *p)
352{
353 struct kfd_process_device *pdd = NULL;
354
355 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
356 if (pdd != NULL) {
357 pdd->dev = dev;
358 INIT_LIST_HEAD(&pdd->qpd.queues_list);
359 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
360 pdd->qpd.dqm = dev->dqm;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400361 pdd->process = p;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400362 pdd->bound = PDD_UNBOUND;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400363 pdd->already_dequeued = false;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200364 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300365 }
366
367 return pdd;
368}
369
370/*
371 * Direct the IOMMU to bind the process (specifically the pasid->mm)
372 * to the device.
373 * Unbinding occurs when the process dies or the device is removed.
374 *
375 * Assumes that the process lock is held.
376 */
377struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
378 struct kfd_process *p)
379{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200380 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300381 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300382
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200383 pdd = kfd_get_process_device_data(dev, p);
384 if (!pdd) {
385 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300386 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200387 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300388
Yong Zhao733fa1f2017-09-20 18:10:14 -0400389 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300390 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400391 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
392 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
393 return ERR_PTR(-EINVAL);
394 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300395
Oded Gabbayb17f0682014-07-17 00:06:27 +0300396 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
397 if (err < 0)
398 return ERR_PTR(err);
399
Yong Zhao733fa1f2017-09-20 18:10:14 -0400400 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300401
402 return pdd;
403}
404
Yong Zhao733fa1f2017-09-20 18:10:14 -0400405/*
406 * Bind processes do the device that have been temporarily unbound
407 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
408 */
409int kfd_bind_processes_to_device(struct kfd_dev *dev)
410{
411 struct kfd_process_device *pdd;
412 struct kfd_process *p;
413 unsigned int temp;
414 int err = 0;
415
416 int idx = srcu_read_lock(&kfd_processes_srcu);
417
418 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
419 mutex_lock(&p->mutex);
420 pdd = kfd_get_process_device_data(dev, p);
421 if (pdd->bound != PDD_BOUND_SUSPENDED) {
422 mutex_unlock(&p->mutex);
423 continue;
424 }
425
426 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
427 p->lead_thread);
428 if (err < 0) {
429 pr_err("unexpected pasid %d binding failure\n",
430 p->pasid);
431 mutex_unlock(&p->mutex);
432 break;
433 }
434
435 pdd->bound = PDD_BOUND;
436 mutex_unlock(&p->mutex);
437 }
438
439 srcu_read_unlock(&kfd_processes_srcu, idx);
440
441 return err;
442}
443
444/*
445 * Temporarily unbind currently bound processes from the device and
446 * mark them as PDD_BOUND_SUSPENDED. These processes will be restored
447 * to PDD_BOUND state in kfd_bind_processes_to_device.
448 */
449void kfd_unbind_processes_from_device(struct kfd_dev *dev)
450{
451 struct kfd_process_device *pdd;
452 struct kfd_process *p;
453 unsigned int temp, temp_bound, temp_pasid;
454
455 int idx = srcu_read_lock(&kfd_processes_srcu);
456
457 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
458 mutex_lock(&p->mutex);
459 pdd = kfd_get_process_device_data(dev, p);
460 temp_bound = pdd->bound;
461 temp_pasid = p->pasid;
462 if (pdd->bound == PDD_BOUND)
463 pdd->bound = PDD_BOUND_SUSPENDED;
464 mutex_unlock(&p->mutex);
465
466 if (temp_bound == PDD_BOUND)
467 amd_iommu_unbind_pasid(dev->pdev, temp_pasid);
468 }
469
470 srcu_read_unlock(&kfd_processes_srcu, idx);
471}
472
473void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300474{
475 struct kfd_process *p;
476 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300477
Oded Gabbay121b78e2016-05-26 08:41:08 +0300478 /*
479 * Look for the process that matches the pasid. If there is no such
480 * process, we either released it in amdkfd's own notifier, or there
481 * is a bug. Unfortunately, there is no way to tell...
482 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000483 p = kfd_lookup_process_by_pasid(pasid);
484 if (!p)
485 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300486
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000487 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300488
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000489 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
490 kfd_dbgmgr_destroy(dev->dbgmgr);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300491
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000492 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400493 if (pdd)
494 /* For GPU relying on IOMMU, we need to dequeue here
495 * when PASID is still bound.
496 */
497 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300498
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000499 mutex_unlock(&p->mutex);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300500}
501
Kent Russell8eabaf52017-08-15 23:00:04 -0400502struct kfd_process_device *kfd_get_first_process_device_data(
503 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300504{
505 return list_first_entry(&p->per_device_data,
506 struct kfd_process_device,
507 per_device_list);
508}
509
Kent Russell8eabaf52017-08-15 23:00:04 -0400510struct kfd_process_device *kfd_get_next_process_device_data(
511 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300512 struct kfd_process_device *pdd)
513{
514 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
515 return NULL;
516 return list_next_entry(pdd, per_device_list);
517}
518
519bool kfd_has_process_device_data(struct kfd_process *p)
520{
521 return !(list_empty(&p->per_device_data));
522}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300523
524/* This returns with process->mutex locked. */
525struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
526{
527 struct kfd_process *p;
528 unsigned int temp;
529
530 int idx = srcu_read_lock(&kfd_processes_srcu);
531
532 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
533 if (p->pasid == pasid) {
534 mutex_lock(&p->mutex);
535 break;
536 }
537 }
538
539 srcu_read_unlock(&kfd_processes_srcu, idx);
540
541 return p;
542}