blob: 660d8bcb93e052ea3018afb9fc26fe8dfa7c39bb [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Felix Kuehlingc7b12432017-11-27 18:29:50 -050027#include <linux/sched/task.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030028#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030029#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030030#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020031#include <linux/compat.h>
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include <linux/mman.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020033
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030034struct mm_struct;
35
36#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030037#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030038
39/*
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
42 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex);
46
47DEFINE_STATIC_SRCU(kfd_processes_srcu);
48
49static struct workqueue_struct *kfd_process_wq;
50
51struct kfd_process_release_work {
52 struct work_struct kfd_work;
53 struct kfd_process *p;
54};
55
56static struct kfd_process *find_process(const struct task_struct *thread);
57static struct kfd_process *create_process(const struct task_struct *thread);
Felix Kuehling373d7082017-11-14 16:41:19 -050058static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
59
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030060
61void kfd_process_create_wq(void)
62{
63 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053064 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030065}
66
67void kfd_process_destroy_wq(void)
68{
69 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030070 destroy_workqueue(kfd_process_wq);
71 kfd_process_wq = NULL;
72 }
73}
74
Felix Kuehling373d7082017-11-14 16:41:19 -050075struct kfd_process *kfd_create_process(struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030076{
77 struct kfd_process *process;
Felix Kuehling373d7082017-11-14 16:41:19 -050078 struct task_struct *thread = current;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030079
Kent Russell4eacc26b2017-08-15 23:00:06 -040080 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030081 return ERR_PTR(-EINVAL);
82
83 /* Only the pthreads threading model is supported. */
84 if (thread->group_leader->mm != thread->mm)
85 return ERR_PTR(-EINVAL);
86
87 /* Take mmap_sem because we call __mmu_notifier_register inside */
88 down_write(&thread->mm->mmap_sem);
89
90 /*
91 * take kfd processes mutex before starting of process creation
92 * so there won't be a case where two threads of the same process
93 * create two kfd_process structures
94 */
95 mutex_lock(&kfd_processes_mutex);
96
97 /* A prior open of /dev/kfd could have already created the process. */
98 process = find_process(thread);
99 if (process)
Kent Russell79775b62017-08-15 23:00:05 -0400100 pr_debug("Process already found\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300101
102 if (!process)
103 process = create_process(thread);
104
105 mutex_unlock(&kfd_processes_mutex);
106
107 up_write(&thread->mm->mmap_sem);
108
Felix Kuehling373d7082017-11-14 16:41:19 -0500109 kfd_process_init_cwsr(process, filep);
110
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300111 return process;
112}
113
114struct kfd_process *kfd_get_process(const struct task_struct *thread)
115{
116 struct kfd_process *process;
117
Kent Russell4eacc26b2017-08-15 23:00:06 -0400118 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300119 return ERR_PTR(-EINVAL);
120
121 /* Only the pthreads threading model is supported. */
122 if (thread->group_leader->mm != thread->mm)
123 return ERR_PTR(-EINVAL);
124
125 process = find_process(thread);
126
127 return process;
128}
129
130static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
131{
132 struct kfd_process *process;
133
134 hash_for_each_possible_rcu(kfd_processes_table, process,
135 kfd_processes, (uintptr_t)mm)
136 if (process->mm == mm)
137 return process;
138
139 return NULL;
140}
141
142static struct kfd_process *find_process(const struct task_struct *thread)
143{
144 struct kfd_process *p;
145 int idx;
146
147 idx = srcu_read_lock(&kfd_processes_srcu);
148 p = find_process_by_mm(thread->mm);
149 srcu_read_unlock(&kfd_processes_srcu, idx);
150
151 return p;
152}
153
154static void kfd_process_wq_release(struct work_struct *work)
155{
156 struct kfd_process_release_work *my_work;
157 struct kfd_process_device *pdd, *temp;
158 struct kfd_process *p;
159
160 my_work = (struct kfd_process_release_work *) work;
161
162 p = my_work->p;
163
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200164 pr_debug("Releasing process (pasid %d) in workqueue\n",
165 p->pasid);
166
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300167 mutex_lock(&p->mutex);
168
169 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
170 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200171 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
172 pdd->dev->id, p->pasid);
173
Yong Zhao733fa1f2017-09-20 18:10:14 -0400174 if (pdd->bound == PDD_BOUND)
175 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300176
Yong Zhao733fa1f2017-09-20 18:10:14 -0400177 list_del(&pdd->per_device_list);
Felix Kuehling373d7082017-11-14 16:41:19 -0500178
179 if (pdd->qpd.cwsr_kaddr)
180 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
181 get_order(KFD_CWSR_TBA_TMA_SIZE));
182
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300183 kfree(pdd);
184 }
185
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300186 kfd_event_free_process(p);
187
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300188 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400189 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300190
191 mutex_unlock(&p->mutex);
192
193 mutex_destroy(&p->mutex);
194
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500195 put_task_struct(p->lead_thread);
196
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300197 kfree(p);
198
Amitoj Kaur Chawla642f0f22016-01-25 23:03:57 +0530199 kfree(work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300200}
201
202static void kfd_process_destroy_delayed(struct rcu_head *rcu)
203{
204 struct kfd_process_release_work *work;
205 struct kfd_process *p;
206
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300207 p = container_of(rcu, struct kfd_process, rcu);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300208
209 mmdrop(p->mm);
210
Firo Yang1549fcd2015-04-23 17:58:05 +0800211 work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300212
213 if (work) {
214 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
215 work->p = p;
216 queue_work(kfd_process_wq, (struct work_struct *) work);
217 }
218}
219
220static void kfd_process_notifier_release(struct mmu_notifier *mn,
221 struct mm_struct *mm)
222{
223 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200224 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300225
226 /*
227 * The kfd_process structure can not be free because the
228 * mmu_notifier srcu is read locked
229 */
230 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400231 if (WARN_ON(p->mm != mm))
232 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300233
234 mutex_lock(&kfd_processes_mutex);
235 hash_del_rcu(&p->kfd_processes);
236 mutex_unlock(&kfd_processes_mutex);
237 synchronize_srcu(&kfd_processes_srcu);
238
Ben Goz45102042014-07-17 01:04:10 +0300239 mutex_lock(&p->mutex);
240
Yair Shachar062c5672017-11-01 19:21:29 -0400241 /* Iterate over all process device data structures and if the
242 * pdd is in debug mode, we should first force unregistration,
243 * then we will be able to destroy the queues
244 */
245 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
246 struct kfd_dev *dev = pdd->dev;
247
248 mutex_lock(kfd_get_dbgmgr_mutex());
249 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
250 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
251 kfd_dbgmgr_destroy(dev->dbgmgr);
252 dev->dbgmgr = NULL;
253 }
254 }
255 mutex_unlock(kfd_get_dbgmgr_mutex());
256 }
257
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400258 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300259 pqm_uninit(&p->pqm);
260
261 mutex_unlock(&p->mutex);
262
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300263 /*
264 * Because we drop mm_count inside kfd_process_destroy_delayed
265 * and because the mmu_notifier_unregister function also drop
266 * mm_count we need to take an extra count here.
267 */
Vegard Nossumf1f10072017-02-27 14:30:07 -0800268 mmgrab(p->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300269 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
270 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
271}
272
273static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
274 .release = kfd_process_notifier_release,
275};
276
Felix Kuehling373d7082017-11-14 16:41:19 -0500277static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
278{
279 int err = 0;
280 unsigned long offset;
281 struct kfd_process_device *temp, *pdd = NULL;
282 struct kfd_dev *dev = NULL;
283 struct qcm_process_device *qpd = NULL;
284
285 mutex_lock(&p->mutex);
286 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
287 per_device_list) {
288 dev = pdd->dev;
289 qpd = &pdd->qpd;
290 if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
291 continue;
292 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
293 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
294 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
295 MAP_SHARED, offset);
296
297 if (IS_ERR_VALUE(qpd->tba_addr)) {
298 pr_err("Failure to set tba address. error -%d.\n",
299 (int)qpd->tba_addr);
300 err = qpd->tba_addr;
301 qpd->tba_addr = 0;
302 qpd->cwsr_kaddr = NULL;
303 goto out;
304 }
305
306 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
307
308 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
309 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
310 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
311 }
312out:
313 mutex_unlock(&p->mutex);
314 return err;
315}
316
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300317static struct kfd_process *create_process(const struct task_struct *thread)
318{
319 struct kfd_process *process;
320 int err = -ENOMEM;
321
322 process = kzalloc(sizeof(*process), GFP_KERNEL);
323
324 if (!process)
325 goto err_alloc_process;
326
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300327 process->pasid = kfd_pasid_alloc();
328 if (process->pasid == 0)
329 goto err_alloc_pasid;
330
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400331 if (kfd_alloc_process_doorbells(process) < 0)
332 goto err_alloc_doorbells;
333
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300334 mutex_init(&process->mutex);
335
336 process->mm = thread->mm;
337
338 /* register notifier */
339 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
340 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
341 if (err)
342 goto err_mmu_notifier;
343
344 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
345 (uintptr_t)process->mm);
346
347 process->lead_thread = thread->group_leader;
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500348 get_task_struct(process->lead_thread);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300349
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300350 INIT_LIST_HEAD(&process->per_device_data);
351
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300352 kfd_event_init_process(process);
353
Ben Goz45102042014-07-17 01:04:10 +0300354 err = pqm_init(&process->pqm, process);
355 if (err != 0)
356 goto err_process_pqm_init;
357
Alexey Skidanovdd592392014-11-18 13:56:23 +0200358 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700359 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300360 err = kfd_init_apertures(process);
361 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200362 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200363
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300364 return process;
365
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200366err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200367 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300368err_process_pqm_init:
369 hash_del_rcu(&process->kfd_processes);
370 synchronize_rcu();
371 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300372err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300373 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400374 kfd_free_process_doorbells(process);
375err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300376 kfd_pasid_free(process->pasid);
377err_alloc_pasid:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300378 kfree(process);
379err_alloc_process:
380 return ERR_PTR(err);
381}
382
383struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200384 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300385{
386 struct kfd_process_device *pdd = NULL;
387
388 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
389 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400390 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300391
Yong Zhao733fa1f2017-09-20 18:10:14 -0400392 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200393}
394
395struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
396 struct kfd_process *p)
397{
398 struct kfd_process_device *pdd = NULL;
399
400 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
401 if (pdd != NULL) {
402 pdd->dev = dev;
403 INIT_LIST_HEAD(&pdd->qpd.queues_list);
404 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
405 pdd->qpd.dqm = dev->dqm;
Felix Kuehlingb20cd0d2017-11-14 16:41:17 -0500406 pdd->qpd.pqm = &p->pqm;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400407 pdd->process = p;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400408 pdd->bound = PDD_UNBOUND;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400409 pdd->already_dequeued = false;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200410 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300411 }
412
413 return pdd;
414}
415
416/*
417 * Direct the IOMMU to bind the process (specifically the pasid->mm)
418 * to the device.
419 * Unbinding occurs when the process dies or the device is removed.
420 *
421 * Assumes that the process lock is held.
422 */
423struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
424 struct kfd_process *p)
425{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200426 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300427 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300428
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200429 pdd = kfd_get_process_device_data(dev, p);
430 if (!pdd) {
431 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300432 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200433 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300434
Yong Zhao733fa1f2017-09-20 18:10:14 -0400435 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300436 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400437 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
438 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
439 return ERR_PTR(-EINVAL);
440 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300441
Oded Gabbayb17f0682014-07-17 00:06:27 +0300442 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
443 if (err < 0)
444 return ERR_PTR(err);
445
Yong Zhao733fa1f2017-09-20 18:10:14 -0400446 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300447
448 return pdd;
449}
450
Yong Zhao733fa1f2017-09-20 18:10:14 -0400451/*
452 * Bind processes do the device that have been temporarily unbound
453 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
454 */
455int kfd_bind_processes_to_device(struct kfd_dev *dev)
456{
457 struct kfd_process_device *pdd;
458 struct kfd_process *p;
459 unsigned int temp;
460 int err = 0;
461
462 int idx = srcu_read_lock(&kfd_processes_srcu);
463
464 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
465 mutex_lock(&p->mutex);
466 pdd = kfd_get_process_device_data(dev, p);
467 if (pdd->bound != PDD_BOUND_SUSPENDED) {
468 mutex_unlock(&p->mutex);
469 continue;
470 }
471
472 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
473 p->lead_thread);
474 if (err < 0) {
Felix Kuehling894a8292017-11-01 19:21:33 -0400475 pr_err("Unexpected pasid %d binding failure\n",
Yong Zhao733fa1f2017-09-20 18:10:14 -0400476 p->pasid);
477 mutex_unlock(&p->mutex);
478 break;
479 }
480
481 pdd->bound = PDD_BOUND;
482 mutex_unlock(&p->mutex);
483 }
484
485 srcu_read_unlock(&kfd_processes_srcu, idx);
486
487 return err;
488}
489
490/*
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400491 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
492 * processes will be restored to PDD_BOUND state in
493 * kfd_bind_processes_to_device.
Yong Zhao733fa1f2017-09-20 18:10:14 -0400494 */
495void kfd_unbind_processes_from_device(struct kfd_dev *dev)
496{
497 struct kfd_process_device *pdd;
498 struct kfd_process *p;
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400499 unsigned int temp;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400500
501 int idx = srcu_read_lock(&kfd_processes_srcu);
502
503 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
504 mutex_lock(&p->mutex);
505 pdd = kfd_get_process_device_data(dev, p);
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400506
Yong Zhao733fa1f2017-09-20 18:10:14 -0400507 if (pdd->bound == PDD_BOUND)
508 pdd->bound = PDD_BOUND_SUSPENDED;
509 mutex_unlock(&p->mutex);
Yong Zhao733fa1f2017-09-20 18:10:14 -0400510 }
511
512 srcu_read_unlock(&kfd_processes_srcu, idx);
513}
514
515void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300516{
517 struct kfd_process *p;
518 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300519
Oded Gabbay121b78e2016-05-26 08:41:08 +0300520 /*
521 * Look for the process that matches the pasid. If there is no such
522 * process, we either released it in amdkfd's own notifier, or there
523 * is a bug. Unfortunately, there is no way to tell...
524 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000525 p = kfd_lookup_process_by_pasid(pasid);
526 if (!p)
527 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300528
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000529 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300530
Yair Shachar062c5672017-11-01 19:21:29 -0400531 mutex_lock(kfd_get_dbgmgr_mutex());
532
533 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
534 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
535 kfd_dbgmgr_destroy(dev->dbgmgr);
536 dev->dbgmgr = NULL;
537 }
538 }
539
540 mutex_unlock(kfd_get_dbgmgr_mutex());
Oded Gabbay121b78e2016-05-26 08:41:08 +0300541
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000542 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400543 if (pdd)
544 /* For GPU relying on IOMMU, we need to dequeue here
545 * when PASID is still bound.
546 */
547 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300548
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000549 mutex_unlock(&p->mutex);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300550}
551
Kent Russell8eabaf52017-08-15 23:00:04 -0400552struct kfd_process_device *kfd_get_first_process_device_data(
553 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300554{
555 return list_first_entry(&p->per_device_data,
556 struct kfd_process_device,
557 per_device_list);
558}
559
Kent Russell8eabaf52017-08-15 23:00:04 -0400560struct kfd_process_device *kfd_get_next_process_device_data(
561 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300562 struct kfd_process_device *pdd)
563{
564 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
565 return NULL;
566 return list_next_entry(pdd, per_device_list);
567}
568
569bool kfd_has_process_device_data(struct kfd_process *p)
570{
571 return !(list_empty(&p->per_device_data));
572}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300573
574/* This returns with process->mutex locked. */
575struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
576{
577 struct kfd_process *p;
578 unsigned int temp;
579
580 int idx = srcu_read_lock(&kfd_processes_srcu);
581
582 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
583 if (p->pasid == pasid) {
584 mutex_lock(&p->mutex);
585 break;
586 }
587 }
588
589 srcu_read_unlock(&kfd_processes_srcu, idx);
590
591 return p;
592}
Felix Kuehling373d7082017-11-14 16:41:19 -0500593
594int kfd_reserved_mem_mmap(struct kfd_process *process,
595 struct vm_area_struct *vma)
596{
597 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
598 struct kfd_process_device *pdd;
599 struct qcm_process_device *qpd;
600
601 if (!dev)
602 return -EINVAL;
603 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
604 pr_err("Incorrect CWSR mapping size.\n");
605 return -EINVAL;
606 }
607
608 pdd = kfd_get_process_device_data(dev, process);
609 if (!pdd)
610 return -EINVAL;
611 qpd = &pdd->qpd;
612
613 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
614 get_order(KFD_CWSR_TBA_TMA_SIZE));
615 if (!qpd->cwsr_kaddr) {
616 pr_err("Error allocating per process CWSR buffer.\n");
617 return -ENOMEM;
618 }
619
620 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
621 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
622 /* Mapping pages to user process */
623 return remap_pfn_range(vma, vma->vm_start,
624 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
625 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
626}
Felix Kuehling851a6452017-11-27 18:29:49 -0500627
628#if defined(CONFIG_DEBUG_FS)
629
630int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
631{
632 struct kfd_process *p;
633 unsigned int temp;
634 int r = 0;
635
636 int idx = srcu_read_lock(&kfd_processes_srcu);
637
638 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
639 seq_printf(m, "Process %d PASID %d:\n",
640 p->lead_thread->tgid, p->pasid);
641
642 mutex_lock(&p->mutex);
643 r = pqm_debugfs_mqds(m, &p->pqm);
644 mutex_unlock(&p->mutex);
645
646 if (r)
647 break;
648 }
649
650 srcu_read_unlock(&kfd_processes_srcu, idx);
651
652 return r;
653}
654
655#endif