blob: 88fc822c9e5a42f78f051ef372d2846866af1e95 [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Felix Kuehlingc7b12432017-11-27 18:29:50 -050027#include <linux/sched/task.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030028#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030029#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030030#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020031#include <linux/compat.h>
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include <linux/mman.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020033
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030034struct mm_struct;
35
36#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030037#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030038
39/*
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
42 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex);
46
47DEFINE_STATIC_SRCU(kfd_processes_srcu);
48
49static struct workqueue_struct *kfd_process_wq;
50
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030051static struct kfd_process *find_process(const struct task_struct *thread);
Felix Kuehlingabb208a2017-11-27 18:29:52 -050052static void kfd_process_ref_release(struct kref *ref);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030053static struct kfd_process *create_process(const struct task_struct *thread);
Felix Kuehling373d7082017-11-14 16:41:19 -050054static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
55
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030056
57void kfd_process_create_wq(void)
58{
59 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053060 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030061}
62
63void kfd_process_destroy_wq(void)
64{
65 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030066 destroy_workqueue(kfd_process_wq);
67 kfd_process_wq = NULL;
68 }
69}
70
Felix Kuehling373d7082017-11-14 16:41:19 -050071struct kfd_process *kfd_create_process(struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030072{
73 struct kfd_process *process;
Felix Kuehling373d7082017-11-14 16:41:19 -050074 struct task_struct *thread = current;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030075
Kent Russell4eacc26b2017-08-15 23:00:06 -040076 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030077 return ERR_PTR(-EINVAL);
78
79 /* Only the pthreads threading model is supported. */
80 if (thread->group_leader->mm != thread->mm)
81 return ERR_PTR(-EINVAL);
82
83 /* Take mmap_sem because we call __mmu_notifier_register inside */
84 down_write(&thread->mm->mmap_sem);
85
86 /*
87 * take kfd processes mutex before starting of process creation
88 * so there won't be a case where two threads of the same process
89 * create two kfd_process structures
90 */
91 mutex_lock(&kfd_processes_mutex);
92
93 /* A prior open of /dev/kfd could have already created the process. */
94 process = find_process(thread);
95 if (process)
Kent Russell79775b62017-08-15 23:00:05 -040096 pr_debug("Process already found\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030097
98 if (!process)
99 process = create_process(thread);
100
101 mutex_unlock(&kfd_processes_mutex);
102
103 up_write(&thread->mm->mmap_sem);
104
Felix Kuehling373d7082017-11-14 16:41:19 -0500105 kfd_process_init_cwsr(process, filep);
106
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300107 return process;
108}
109
110struct kfd_process *kfd_get_process(const struct task_struct *thread)
111{
112 struct kfd_process *process;
113
Kent Russell4eacc26b2017-08-15 23:00:06 -0400114 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300115 return ERR_PTR(-EINVAL);
116
117 /* Only the pthreads threading model is supported. */
118 if (thread->group_leader->mm != thread->mm)
119 return ERR_PTR(-EINVAL);
120
121 process = find_process(thread);
122
123 return process;
124}
125
126static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
127{
128 struct kfd_process *process;
129
130 hash_for_each_possible_rcu(kfd_processes_table, process,
131 kfd_processes, (uintptr_t)mm)
132 if (process->mm == mm)
133 return process;
134
135 return NULL;
136}
137
138static struct kfd_process *find_process(const struct task_struct *thread)
139{
140 struct kfd_process *p;
141 int idx;
142
143 idx = srcu_read_lock(&kfd_processes_srcu);
144 p = find_process_by_mm(thread->mm);
145 srcu_read_unlock(&kfd_processes_srcu, idx);
146
147 return p;
148}
149
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500150void kfd_unref_process(struct kfd_process *p)
151{
152 kref_put(&p->ref, kfd_process_ref_release);
153}
154
Felix Kuehling5ce10682017-11-27 18:29:51 -0500155/* No process locking is needed in this function, because the process
156 * is not findable any more. We must assume that no other thread is
157 * using it any more, otherwise we couldn't safely free the process
158 * structure in the end.
159 */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300160static void kfd_process_wq_release(struct work_struct *work)
161{
Felix Kuehling5ce10682017-11-27 18:29:51 -0500162 struct kfd_process *p = container_of(work, struct kfd_process,
163 release_work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300164 struct kfd_process_device *pdd, *temp;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300165
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200166 pr_debug("Releasing process (pasid %d) in workqueue\n",
167 p->pasid);
168
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300169 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
170 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200171 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
172 pdd->dev->id, p->pasid);
173
Yong Zhao733fa1f2017-09-20 18:10:14 -0400174 if (pdd->bound == PDD_BOUND)
175 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300176
Yong Zhao733fa1f2017-09-20 18:10:14 -0400177 list_del(&pdd->per_device_list);
Felix Kuehling373d7082017-11-14 16:41:19 -0500178
179 if (pdd->qpd.cwsr_kaddr)
180 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
181 get_order(KFD_CWSR_TBA_TMA_SIZE));
182
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300183 kfree(pdd);
184 }
185
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300186 kfd_event_free_process(p);
187
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300188 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400189 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300190
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300191 mutex_destroy(&p->mutex);
192
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500193 put_task_struct(p->lead_thread);
194
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300195 kfree(p);
Felix Kuehling5ce10682017-11-27 18:29:51 -0500196}
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300197
Felix Kuehling5ce10682017-11-27 18:29:51 -0500198static void kfd_process_ref_release(struct kref *ref)
199{
200 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
201
202 INIT_WORK(&p->release_work, kfd_process_wq_release);
203 queue_work(kfd_process_wq, &p->release_work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300204}
205
206static void kfd_process_destroy_delayed(struct rcu_head *rcu)
207{
Felix Kuehling5ce10682017-11-27 18:29:51 -0500208 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300209
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500210 kfd_unref_process(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300211}
212
213static void kfd_process_notifier_release(struct mmu_notifier *mn,
214 struct mm_struct *mm)
215{
216 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200217 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300218
219 /*
220 * The kfd_process structure can not be free because the
221 * mmu_notifier srcu is read locked
222 */
223 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400224 if (WARN_ON(p->mm != mm))
225 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300226
227 mutex_lock(&kfd_processes_mutex);
228 hash_del_rcu(&p->kfd_processes);
229 mutex_unlock(&kfd_processes_mutex);
230 synchronize_srcu(&kfd_processes_srcu);
231
Ben Goz45102042014-07-17 01:04:10 +0300232 mutex_lock(&p->mutex);
233
Yair Shachar062c5672017-11-01 19:21:29 -0400234 /* Iterate over all process device data structures and if the
235 * pdd is in debug mode, we should first force unregistration,
236 * then we will be able to destroy the queues
237 */
238 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
239 struct kfd_dev *dev = pdd->dev;
240
241 mutex_lock(kfd_get_dbgmgr_mutex());
242 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
243 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
244 kfd_dbgmgr_destroy(dev->dbgmgr);
245 dev->dbgmgr = NULL;
246 }
247 }
248 mutex_unlock(kfd_get_dbgmgr_mutex());
249 }
250
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400251 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300252 pqm_uninit(&p->pqm);
253
Felix Kuehling5ce10682017-11-27 18:29:51 -0500254 /* Indicate to other users that MM is no longer valid */
255 p->mm = NULL;
256
Ben Goz45102042014-07-17 01:04:10 +0300257 mutex_unlock(&p->mutex);
258
Felix Kuehling5ce10682017-11-27 18:29:51 -0500259 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300260 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
261}
262
263static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
264 .release = kfd_process_notifier_release,
265};
266
Felix Kuehling373d7082017-11-14 16:41:19 -0500267static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
268{
269 int err = 0;
270 unsigned long offset;
271 struct kfd_process_device *temp, *pdd = NULL;
272 struct kfd_dev *dev = NULL;
273 struct qcm_process_device *qpd = NULL;
274
275 mutex_lock(&p->mutex);
276 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
277 per_device_list) {
278 dev = pdd->dev;
279 qpd = &pdd->qpd;
280 if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
281 continue;
282 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
283 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
284 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
285 MAP_SHARED, offset);
286
287 if (IS_ERR_VALUE(qpd->tba_addr)) {
288 pr_err("Failure to set tba address. error -%d.\n",
289 (int)qpd->tba_addr);
290 err = qpd->tba_addr;
291 qpd->tba_addr = 0;
292 qpd->cwsr_kaddr = NULL;
293 goto out;
294 }
295
296 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
297
298 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
299 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
300 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
301 }
302out:
303 mutex_unlock(&p->mutex);
304 return err;
305}
306
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300307static struct kfd_process *create_process(const struct task_struct *thread)
308{
309 struct kfd_process *process;
310 int err = -ENOMEM;
311
312 process = kzalloc(sizeof(*process), GFP_KERNEL);
313
314 if (!process)
315 goto err_alloc_process;
316
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300317 process->pasid = kfd_pasid_alloc();
318 if (process->pasid == 0)
319 goto err_alloc_pasid;
320
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400321 if (kfd_alloc_process_doorbells(process) < 0)
322 goto err_alloc_doorbells;
323
Felix Kuehling5ce10682017-11-27 18:29:51 -0500324 kref_init(&process->ref);
325
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300326 mutex_init(&process->mutex);
327
328 process->mm = thread->mm;
329
330 /* register notifier */
331 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
332 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
333 if (err)
334 goto err_mmu_notifier;
335
336 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
337 (uintptr_t)process->mm);
338
339 process->lead_thread = thread->group_leader;
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500340 get_task_struct(process->lead_thread);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300341
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300342 INIT_LIST_HEAD(&process->per_device_data);
343
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300344 kfd_event_init_process(process);
345
Ben Goz45102042014-07-17 01:04:10 +0300346 err = pqm_init(&process->pqm, process);
347 if (err != 0)
348 goto err_process_pqm_init;
349
Alexey Skidanovdd592392014-11-18 13:56:23 +0200350 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700351 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300352 err = kfd_init_apertures(process);
353 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200354 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200355
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300356 return process;
357
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200358err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200359 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300360err_process_pqm_init:
361 hash_del_rcu(&process->kfd_processes);
362 synchronize_rcu();
363 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300364err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300365 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400366 kfd_free_process_doorbells(process);
367err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300368 kfd_pasid_free(process->pasid);
369err_alloc_pasid:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300370 kfree(process);
371err_alloc_process:
372 return ERR_PTR(err);
373}
374
375struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200376 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300377{
378 struct kfd_process_device *pdd = NULL;
379
380 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
381 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400382 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300383
Yong Zhao733fa1f2017-09-20 18:10:14 -0400384 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200385}
386
387struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
388 struct kfd_process *p)
389{
390 struct kfd_process_device *pdd = NULL;
391
392 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
Felix Kuehling2d9b36f2017-11-27 18:29:54 -0500393 if (!pdd)
394 return NULL;
395
396 pdd->dev = dev;
397 INIT_LIST_HEAD(&pdd->qpd.queues_list);
398 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
399 pdd->qpd.dqm = dev->dqm;
400 pdd->qpd.pqm = &p->pqm;
401 pdd->process = p;
402 pdd->bound = PDD_UNBOUND;
403 pdd->already_dequeued = false;
404 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300405
406 return pdd;
407}
408
409/*
410 * Direct the IOMMU to bind the process (specifically the pasid->mm)
411 * to the device.
412 * Unbinding occurs when the process dies or the device is removed.
413 *
414 * Assumes that the process lock is held.
415 */
416struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
417 struct kfd_process *p)
418{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200419 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300420 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300421
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200422 pdd = kfd_get_process_device_data(dev, p);
423 if (!pdd) {
424 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300425 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200426 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300427
Yong Zhao733fa1f2017-09-20 18:10:14 -0400428 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300429 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400430 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
431 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
432 return ERR_PTR(-EINVAL);
433 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300434
Oded Gabbayb17f0682014-07-17 00:06:27 +0300435 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
436 if (err < 0)
437 return ERR_PTR(err);
438
Yong Zhao733fa1f2017-09-20 18:10:14 -0400439 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300440
441 return pdd;
442}
443
Yong Zhao733fa1f2017-09-20 18:10:14 -0400444/*
445 * Bind processes do the device that have been temporarily unbound
446 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
447 */
448int kfd_bind_processes_to_device(struct kfd_dev *dev)
449{
450 struct kfd_process_device *pdd;
451 struct kfd_process *p;
452 unsigned int temp;
453 int err = 0;
454
455 int idx = srcu_read_lock(&kfd_processes_srcu);
456
457 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
458 mutex_lock(&p->mutex);
459 pdd = kfd_get_process_device_data(dev, p);
460 if (pdd->bound != PDD_BOUND_SUSPENDED) {
461 mutex_unlock(&p->mutex);
462 continue;
463 }
464
465 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
466 p->lead_thread);
467 if (err < 0) {
Felix Kuehling894a8292017-11-01 19:21:33 -0400468 pr_err("Unexpected pasid %d binding failure\n",
Yong Zhao733fa1f2017-09-20 18:10:14 -0400469 p->pasid);
470 mutex_unlock(&p->mutex);
471 break;
472 }
473
474 pdd->bound = PDD_BOUND;
475 mutex_unlock(&p->mutex);
476 }
477
478 srcu_read_unlock(&kfd_processes_srcu, idx);
479
480 return err;
481}
482
483/*
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400484 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
485 * processes will be restored to PDD_BOUND state in
486 * kfd_bind_processes_to_device.
Yong Zhao733fa1f2017-09-20 18:10:14 -0400487 */
488void kfd_unbind_processes_from_device(struct kfd_dev *dev)
489{
490 struct kfd_process_device *pdd;
491 struct kfd_process *p;
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400492 unsigned int temp;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400493
494 int idx = srcu_read_lock(&kfd_processes_srcu);
495
496 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
497 mutex_lock(&p->mutex);
498 pdd = kfd_get_process_device_data(dev, p);
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400499
Yong Zhao733fa1f2017-09-20 18:10:14 -0400500 if (pdd->bound == PDD_BOUND)
501 pdd->bound = PDD_BOUND_SUSPENDED;
502 mutex_unlock(&p->mutex);
Yong Zhao733fa1f2017-09-20 18:10:14 -0400503 }
504
505 srcu_read_unlock(&kfd_processes_srcu, idx);
506}
507
508void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300509{
510 struct kfd_process *p;
511 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300512
Oded Gabbay121b78e2016-05-26 08:41:08 +0300513 /*
514 * Look for the process that matches the pasid. If there is no such
515 * process, we either released it in amdkfd's own notifier, or there
516 * is a bug. Unfortunately, there is no way to tell...
517 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000518 p = kfd_lookup_process_by_pasid(pasid);
519 if (!p)
520 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300521
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000522 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300523
Yair Shachar062c5672017-11-01 19:21:29 -0400524 mutex_lock(kfd_get_dbgmgr_mutex());
525
526 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
527 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
528 kfd_dbgmgr_destroy(dev->dbgmgr);
529 dev->dbgmgr = NULL;
530 }
531 }
532
533 mutex_unlock(kfd_get_dbgmgr_mutex());
Oded Gabbay121b78e2016-05-26 08:41:08 +0300534
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500535 mutex_lock(&p->mutex);
536
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000537 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400538 if (pdd)
539 /* For GPU relying on IOMMU, we need to dequeue here
540 * when PASID is still bound.
541 */
542 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300543
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000544 mutex_unlock(&p->mutex);
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500545
546 kfd_unref_process(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300547}
548
Kent Russell8eabaf52017-08-15 23:00:04 -0400549struct kfd_process_device *kfd_get_first_process_device_data(
550 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300551{
552 return list_first_entry(&p->per_device_data,
553 struct kfd_process_device,
554 per_device_list);
555}
556
Kent Russell8eabaf52017-08-15 23:00:04 -0400557struct kfd_process_device *kfd_get_next_process_device_data(
558 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300559 struct kfd_process_device *pdd)
560{
561 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
562 return NULL;
563 return list_next_entry(pdd, per_device_list);
564}
565
566bool kfd_has_process_device_data(struct kfd_process *p)
567{
568 return !(list_empty(&p->per_device_data));
569}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300570
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500571/* This increments the process->ref counter. */
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300572struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
573{
Yong Zhao82c16b42017-11-27 18:29:53 -0500574 struct kfd_process *p, *ret_p = NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300575 unsigned int temp;
576
577 int idx = srcu_read_lock(&kfd_processes_srcu);
578
579 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
580 if (p->pasid == pasid) {
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500581 kref_get(&p->ref);
Yong Zhao82c16b42017-11-27 18:29:53 -0500582 ret_p = p;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300583 break;
584 }
585 }
586
587 srcu_read_unlock(&kfd_processes_srcu, idx);
588
Yong Zhao82c16b42017-11-27 18:29:53 -0500589 return ret_p;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300590}
Felix Kuehling373d7082017-11-14 16:41:19 -0500591
592int kfd_reserved_mem_mmap(struct kfd_process *process,
593 struct vm_area_struct *vma)
594{
595 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
596 struct kfd_process_device *pdd;
597 struct qcm_process_device *qpd;
598
599 if (!dev)
600 return -EINVAL;
601 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
602 pr_err("Incorrect CWSR mapping size.\n");
603 return -EINVAL;
604 }
605
606 pdd = kfd_get_process_device_data(dev, process);
607 if (!pdd)
608 return -EINVAL;
609 qpd = &pdd->qpd;
610
611 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
612 get_order(KFD_CWSR_TBA_TMA_SIZE));
613 if (!qpd->cwsr_kaddr) {
614 pr_err("Error allocating per process CWSR buffer.\n");
615 return -ENOMEM;
616 }
617
618 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
619 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
620 /* Mapping pages to user process */
621 return remap_pfn_range(vma, vma->vm_start,
622 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
623 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
624}
Felix Kuehling851a6452017-11-27 18:29:49 -0500625
626#if defined(CONFIG_DEBUG_FS)
627
628int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
629{
630 struct kfd_process *p;
631 unsigned int temp;
632 int r = 0;
633
634 int idx = srcu_read_lock(&kfd_processes_srcu);
635
636 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
637 seq_printf(m, "Process %d PASID %d:\n",
638 p->lead_thread->tgid, p->pasid);
639
640 mutex_lock(&p->mutex);
641 r = pqm_debugfs_mqds(m, &p->pqm);
642 mutex_unlock(&p->mutex);
643
644 if (r)
645 break;
646 }
647
648 srcu_read_unlock(&kfd_processes_srcu, idx);
649
650 return r;
651}
652
653#endif