blob: 39f4c19aaf61720575b4455979ad944a4f376780 [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030027#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030028#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030029#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020030#include <linux/compat.h>
Felix Kuehling373d7082017-11-14 16:41:19 -050031#include <linux/mman.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020032
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030033struct mm_struct;
34
35#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030036#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030037
38/*
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030039 * List of struct kfd_process (field kfd_process).
40 * Unique/indexed by mm_struct*
41 */
42#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
43static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
44static DEFINE_MUTEX(kfd_processes_mutex);
45
46DEFINE_STATIC_SRCU(kfd_processes_srcu);
47
48static struct workqueue_struct *kfd_process_wq;
49
50struct kfd_process_release_work {
51 struct work_struct kfd_work;
52 struct kfd_process *p;
53};
54
55static struct kfd_process *find_process(const struct task_struct *thread);
56static struct kfd_process *create_process(const struct task_struct *thread);
Felix Kuehling373d7082017-11-14 16:41:19 -050057static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
58
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030059
60void kfd_process_create_wq(void)
61{
62 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053063 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030064}
65
66void kfd_process_destroy_wq(void)
67{
68 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030069 destroy_workqueue(kfd_process_wq);
70 kfd_process_wq = NULL;
71 }
72}
73
Felix Kuehling373d7082017-11-14 16:41:19 -050074struct kfd_process *kfd_create_process(struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030075{
76 struct kfd_process *process;
Felix Kuehling373d7082017-11-14 16:41:19 -050077 struct task_struct *thread = current;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030078
Kent Russell4eacc26b2017-08-15 23:00:06 -040079 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030080 return ERR_PTR(-EINVAL);
81
82 /* Only the pthreads threading model is supported. */
83 if (thread->group_leader->mm != thread->mm)
84 return ERR_PTR(-EINVAL);
85
86 /* Take mmap_sem because we call __mmu_notifier_register inside */
87 down_write(&thread->mm->mmap_sem);
88
89 /*
90 * take kfd processes mutex before starting of process creation
91 * so there won't be a case where two threads of the same process
92 * create two kfd_process structures
93 */
94 mutex_lock(&kfd_processes_mutex);
95
96 /* A prior open of /dev/kfd could have already created the process. */
97 process = find_process(thread);
98 if (process)
Kent Russell79775b62017-08-15 23:00:05 -040099 pr_debug("Process already found\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300100
101 if (!process)
102 process = create_process(thread);
103
104 mutex_unlock(&kfd_processes_mutex);
105
106 up_write(&thread->mm->mmap_sem);
107
Felix Kuehling373d7082017-11-14 16:41:19 -0500108 kfd_process_init_cwsr(process, filep);
109
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300110 return process;
111}
112
113struct kfd_process *kfd_get_process(const struct task_struct *thread)
114{
115 struct kfd_process *process;
116
Kent Russell4eacc26b2017-08-15 23:00:06 -0400117 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300118 return ERR_PTR(-EINVAL);
119
120 /* Only the pthreads threading model is supported. */
121 if (thread->group_leader->mm != thread->mm)
122 return ERR_PTR(-EINVAL);
123
124 process = find_process(thread);
125
126 return process;
127}
128
129static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
130{
131 struct kfd_process *process;
132
133 hash_for_each_possible_rcu(kfd_processes_table, process,
134 kfd_processes, (uintptr_t)mm)
135 if (process->mm == mm)
136 return process;
137
138 return NULL;
139}
140
141static struct kfd_process *find_process(const struct task_struct *thread)
142{
143 struct kfd_process *p;
144 int idx;
145
146 idx = srcu_read_lock(&kfd_processes_srcu);
147 p = find_process_by_mm(thread->mm);
148 srcu_read_unlock(&kfd_processes_srcu, idx);
149
150 return p;
151}
152
153static void kfd_process_wq_release(struct work_struct *work)
154{
155 struct kfd_process_release_work *my_work;
156 struct kfd_process_device *pdd, *temp;
157 struct kfd_process *p;
158
159 my_work = (struct kfd_process_release_work *) work;
160
161 p = my_work->p;
162
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200163 pr_debug("Releasing process (pasid %d) in workqueue\n",
164 p->pasid);
165
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300166 mutex_lock(&p->mutex);
167
168 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
169 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200170 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
171 pdd->dev->id, p->pasid);
172
Yong Zhao733fa1f2017-09-20 18:10:14 -0400173 if (pdd->bound == PDD_BOUND)
174 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300175
Yong Zhao733fa1f2017-09-20 18:10:14 -0400176 list_del(&pdd->per_device_list);
Felix Kuehling373d7082017-11-14 16:41:19 -0500177
178 if (pdd->qpd.cwsr_kaddr)
179 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
180 get_order(KFD_CWSR_TBA_TMA_SIZE));
181
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300182 kfree(pdd);
183 }
184
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300185 kfd_event_free_process(p);
186
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300187 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400188 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300189
190 mutex_unlock(&p->mutex);
191
192 mutex_destroy(&p->mutex);
193
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300194 kfree(p);
195
Amitoj Kaur Chawla642f0f22016-01-25 23:03:57 +0530196 kfree(work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300197}
198
199static void kfd_process_destroy_delayed(struct rcu_head *rcu)
200{
201 struct kfd_process_release_work *work;
202 struct kfd_process *p;
203
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300204 p = container_of(rcu, struct kfd_process, rcu);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300205
206 mmdrop(p->mm);
207
Firo Yang1549fcd2015-04-23 17:58:05 +0800208 work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300209
210 if (work) {
211 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
212 work->p = p;
213 queue_work(kfd_process_wq, (struct work_struct *) work);
214 }
215}
216
217static void kfd_process_notifier_release(struct mmu_notifier *mn,
218 struct mm_struct *mm)
219{
220 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200221 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300222
223 /*
224 * The kfd_process structure can not be free because the
225 * mmu_notifier srcu is read locked
226 */
227 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400228 if (WARN_ON(p->mm != mm))
229 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300230
231 mutex_lock(&kfd_processes_mutex);
232 hash_del_rcu(&p->kfd_processes);
233 mutex_unlock(&kfd_processes_mutex);
234 synchronize_srcu(&kfd_processes_srcu);
235
Ben Goz45102042014-07-17 01:04:10 +0300236 mutex_lock(&p->mutex);
237
Yair Shachar062c5672017-11-01 19:21:29 -0400238 /* Iterate over all process device data structures and if the
239 * pdd is in debug mode, we should first force unregistration,
240 * then we will be able to destroy the queues
241 */
242 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
243 struct kfd_dev *dev = pdd->dev;
244
245 mutex_lock(kfd_get_dbgmgr_mutex());
246 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
247 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
248 kfd_dbgmgr_destroy(dev->dbgmgr);
249 dev->dbgmgr = NULL;
250 }
251 }
252 mutex_unlock(kfd_get_dbgmgr_mutex());
253 }
254
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400255 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300256 pqm_uninit(&p->pqm);
257
258 mutex_unlock(&p->mutex);
259
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300260 /*
261 * Because we drop mm_count inside kfd_process_destroy_delayed
262 * and because the mmu_notifier_unregister function also drop
263 * mm_count we need to take an extra count here.
264 */
Vegard Nossumf1f10072017-02-27 14:30:07 -0800265 mmgrab(p->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300266 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
267 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
268}
269
270static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
271 .release = kfd_process_notifier_release,
272};
273
Felix Kuehling373d7082017-11-14 16:41:19 -0500274static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
275{
276 int err = 0;
277 unsigned long offset;
278 struct kfd_process_device *temp, *pdd = NULL;
279 struct kfd_dev *dev = NULL;
280 struct qcm_process_device *qpd = NULL;
281
282 mutex_lock(&p->mutex);
283 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
284 per_device_list) {
285 dev = pdd->dev;
286 qpd = &pdd->qpd;
287 if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
288 continue;
289 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
290 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
291 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
292 MAP_SHARED, offset);
293
294 if (IS_ERR_VALUE(qpd->tba_addr)) {
295 pr_err("Failure to set tba address. error -%d.\n",
296 (int)qpd->tba_addr);
297 err = qpd->tba_addr;
298 qpd->tba_addr = 0;
299 qpd->cwsr_kaddr = NULL;
300 goto out;
301 }
302
303 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
304
305 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
306 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
307 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
308 }
309out:
310 mutex_unlock(&p->mutex);
311 return err;
312}
313
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300314static struct kfd_process *create_process(const struct task_struct *thread)
315{
316 struct kfd_process *process;
317 int err = -ENOMEM;
318
319 process = kzalloc(sizeof(*process), GFP_KERNEL);
320
321 if (!process)
322 goto err_alloc_process;
323
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300324 process->pasid = kfd_pasid_alloc();
325 if (process->pasid == 0)
326 goto err_alloc_pasid;
327
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400328 if (kfd_alloc_process_doorbells(process) < 0)
329 goto err_alloc_doorbells;
330
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300331 mutex_init(&process->mutex);
332
333 process->mm = thread->mm;
334
335 /* register notifier */
336 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
337 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
338 if (err)
339 goto err_mmu_notifier;
340
341 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
342 (uintptr_t)process->mm);
343
344 process->lead_thread = thread->group_leader;
345
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300346 INIT_LIST_HEAD(&process->per_device_data);
347
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300348 kfd_event_init_process(process);
349
Ben Goz45102042014-07-17 01:04:10 +0300350 err = pqm_init(&process->pqm, process);
351 if (err != 0)
352 goto err_process_pqm_init;
353
Alexey Skidanovdd592392014-11-18 13:56:23 +0200354 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700355 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300356 err = kfd_init_apertures(process);
357 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200358 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200359
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300360 return process;
361
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200362err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200363 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300364err_process_pqm_init:
365 hash_del_rcu(&process->kfd_processes);
366 synchronize_rcu();
367 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300368err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300369 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400370 kfd_free_process_doorbells(process);
371err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300372 kfd_pasid_free(process->pasid);
373err_alloc_pasid:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300374 kfree(process);
375err_alloc_process:
376 return ERR_PTR(err);
377}
378
379struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200380 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300381{
382 struct kfd_process_device *pdd = NULL;
383
384 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
385 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400386 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300387
Yong Zhao733fa1f2017-09-20 18:10:14 -0400388 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200389}
390
391struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
392 struct kfd_process *p)
393{
394 struct kfd_process_device *pdd = NULL;
395
396 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
397 if (pdd != NULL) {
398 pdd->dev = dev;
399 INIT_LIST_HEAD(&pdd->qpd.queues_list);
400 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
401 pdd->qpd.dqm = dev->dqm;
Felix Kuehlingb20cd0d2017-11-14 16:41:17 -0500402 pdd->qpd.pqm = &p->pqm;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400403 pdd->process = p;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400404 pdd->bound = PDD_UNBOUND;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400405 pdd->already_dequeued = false;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200406 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300407 }
408
409 return pdd;
410}
411
412/*
413 * Direct the IOMMU to bind the process (specifically the pasid->mm)
414 * to the device.
415 * Unbinding occurs when the process dies or the device is removed.
416 *
417 * Assumes that the process lock is held.
418 */
419struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
420 struct kfd_process *p)
421{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200422 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300423 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300424
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200425 pdd = kfd_get_process_device_data(dev, p);
426 if (!pdd) {
427 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300428 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200429 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300430
Yong Zhao733fa1f2017-09-20 18:10:14 -0400431 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300432 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400433 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
434 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
435 return ERR_PTR(-EINVAL);
436 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300437
Oded Gabbayb17f0682014-07-17 00:06:27 +0300438 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
439 if (err < 0)
440 return ERR_PTR(err);
441
Yong Zhao733fa1f2017-09-20 18:10:14 -0400442 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300443
444 return pdd;
445}
446
Yong Zhao733fa1f2017-09-20 18:10:14 -0400447/*
448 * Bind processes do the device that have been temporarily unbound
449 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
450 */
451int kfd_bind_processes_to_device(struct kfd_dev *dev)
452{
453 struct kfd_process_device *pdd;
454 struct kfd_process *p;
455 unsigned int temp;
456 int err = 0;
457
458 int idx = srcu_read_lock(&kfd_processes_srcu);
459
460 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
461 mutex_lock(&p->mutex);
462 pdd = kfd_get_process_device_data(dev, p);
463 if (pdd->bound != PDD_BOUND_SUSPENDED) {
464 mutex_unlock(&p->mutex);
465 continue;
466 }
467
468 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
469 p->lead_thread);
470 if (err < 0) {
Felix Kuehling894a8292017-11-01 19:21:33 -0400471 pr_err("Unexpected pasid %d binding failure\n",
Yong Zhao733fa1f2017-09-20 18:10:14 -0400472 p->pasid);
473 mutex_unlock(&p->mutex);
474 break;
475 }
476
477 pdd->bound = PDD_BOUND;
478 mutex_unlock(&p->mutex);
479 }
480
481 srcu_read_unlock(&kfd_processes_srcu, idx);
482
483 return err;
484}
485
486/*
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400487 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
488 * processes will be restored to PDD_BOUND state in
489 * kfd_bind_processes_to_device.
Yong Zhao733fa1f2017-09-20 18:10:14 -0400490 */
491void kfd_unbind_processes_from_device(struct kfd_dev *dev)
492{
493 struct kfd_process_device *pdd;
494 struct kfd_process *p;
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400495 unsigned int temp;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400496
497 int idx = srcu_read_lock(&kfd_processes_srcu);
498
499 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
500 mutex_lock(&p->mutex);
501 pdd = kfd_get_process_device_data(dev, p);
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400502
Yong Zhao733fa1f2017-09-20 18:10:14 -0400503 if (pdd->bound == PDD_BOUND)
504 pdd->bound = PDD_BOUND_SUSPENDED;
505 mutex_unlock(&p->mutex);
Yong Zhao733fa1f2017-09-20 18:10:14 -0400506 }
507
508 srcu_read_unlock(&kfd_processes_srcu, idx);
509}
510
511void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300512{
513 struct kfd_process *p;
514 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300515
Oded Gabbay121b78e2016-05-26 08:41:08 +0300516 /*
517 * Look for the process that matches the pasid. If there is no such
518 * process, we either released it in amdkfd's own notifier, or there
519 * is a bug. Unfortunately, there is no way to tell...
520 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000521 p = kfd_lookup_process_by_pasid(pasid);
522 if (!p)
523 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300524
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000525 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300526
Yair Shachar062c5672017-11-01 19:21:29 -0400527 mutex_lock(kfd_get_dbgmgr_mutex());
528
529 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
530 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
531 kfd_dbgmgr_destroy(dev->dbgmgr);
532 dev->dbgmgr = NULL;
533 }
534 }
535
536 mutex_unlock(kfd_get_dbgmgr_mutex());
Oded Gabbay121b78e2016-05-26 08:41:08 +0300537
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000538 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400539 if (pdd)
540 /* For GPU relying on IOMMU, we need to dequeue here
541 * when PASID is still bound.
542 */
543 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300544
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000545 mutex_unlock(&p->mutex);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300546}
547
Kent Russell8eabaf52017-08-15 23:00:04 -0400548struct kfd_process_device *kfd_get_first_process_device_data(
549 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300550{
551 return list_first_entry(&p->per_device_data,
552 struct kfd_process_device,
553 per_device_list);
554}
555
Kent Russell8eabaf52017-08-15 23:00:04 -0400556struct kfd_process_device *kfd_get_next_process_device_data(
557 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300558 struct kfd_process_device *pdd)
559{
560 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
561 return NULL;
562 return list_next_entry(pdd, per_device_list);
563}
564
565bool kfd_has_process_device_data(struct kfd_process *p)
566{
567 return !(list_empty(&p->per_device_data));
568}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300569
570/* This returns with process->mutex locked. */
571struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
572{
573 struct kfd_process *p;
574 unsigned int temp;
575
576 int idx = srcu_read_lock(&kfd_processes_srcu);
577
578 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
579 if (p->pasid == pasid) {
580 mutex_lock(&p->mutex);
581 break;
582 }
583 }
584
585 srcu_read_unlock(&kfd_processes_srcu, idx);
586
587 return p;
588}
Felix Kuehling373d7082017-11-14 16:41:19 -0500589
590int kfd_reserved_mem_mmap(struct kfd_process *process,
591 struct vm_area_struct *vma)
592{
593 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
594 struct kfd_process_device *pdd;
595 struct qcm_process_device *qpd;
596
597 if (!dev)
598 return -EINVAL;
599 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
600 pr_err("Incorrect CWSR mapping size.\n");
601 return -EINVAL;
602 }
603
604 pdd = kfd_get_process_device_data(dev, process);
605 if (!pdd)
606 return -EINVAL;
607 qpd = &pdd->qpd;
608
609 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
610 get_order(KFD_CWSR_TBA_TMA_SIZE));
611 if (!qpd->cwsr_kaddr) {
612 pr_err("Error allocating per process CWSR buffer.\n");
613 return -ENOMEM;
614 }
615
616 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
617 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
618 /* Mapping pages to user process */
619 return remap_pfn_range(vma, vma->vm_start,
620 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
621 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
622}