blob: e02e8a2e7b289117835b2002adb2696c917bb0d6 [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Felix Kuehlingc7b12432017-11-27 18:29:50 -050027#include <linux/sched/task.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030028#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030029#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030030#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020031#include <linux/compat.h>
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include <linux/mman.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020033
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030034struct mm_struct;
35
36#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030037#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030038
39/*
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
42 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex);
46
47DEFINE_STATIC_SRCU(kfd_processes_srcu);
48
49static struct workqueue_struct *kfd_process_wq;
50
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030051static struct kfd_process *find_process(const struct task_struct *thread);
52static struct kfd_process *create_process(const struct task_struct *thread);
Felix Kuehling373d7082017-11-14 16:41:19 -050053static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
54
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030055
56void kfd_process_create_wq(void)
57{
58 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053059 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030060}
61
62void kfd_process_destroy_wq(void)
63{
64 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030065 destroy_workqueue(kfd_process_wq);
66 kfd_process_wq = NULL;
67 }
68}
69
Felix Kuehling373d7082017-11-14 16:41:19 -050070struct kfd_process *kfd_create_process(struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030071{
72 struct kfd_process *process;
Felix Kuehling373d7082017-11-14 16:41:19 -050073 struct task_struct *thread = current;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030074
Kent Russell4eacc26b2017-08-15 23:00:06 -040075 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030076 return ERR_PTR(-EINVAL);
77
78 /* Only the pthreads threading model is supported. */
79 if (thread->group_leader->mm != thread->mm)
80 return ERR_PTR(-EINVAL);
81
82 /* Take mmap_sem because we call __mmu_notifier_register inside */
83 down_write(&thread->mm->mmap_sem);
84
85 /*
86 * take kfd processes mutex before starting of process creation
87 * so there won't be a case where two threads of the same process
88 * create two kfd_process structures
89 */
90 mutex_lock(&kfd_processes_mutex);
91
92 /* A prior open of /dev/kfd could have already created the process. */
93 process = find_process(thread);
94 if (process)
Kent Russell79775b62017-08-15 23:00:05 -040095 pr_debug("Process already found\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030096
97 if (!process)
98 process = create_process(thread);
99
100 mutex_unlock(&kfd_processes_mutex);
101
102 up_write(&thread->mm->mmap_sem);
103
Felix Kuehling373d7082017-11-14 16:41:19 -0500104 kfd_process_init_cwsr(process, filep);
105
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300106 return process;
107}
108
109struct kfd_process *kfd_get_process(const struct task_struct *thread)
110{
111 struct kfd_process *process;
112
Kent Russell4eacc26b2017-08-15 23:00:06 -0400113 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300114 return ERR_PTR(-EINVAL);
115
116 /* Only the pthreads threading model is supported. */
117 if (thread->group_leader->mm != thread->mm)
118 return ERR_PTR(-EINVAL);
119
120 process = find_process(thread);
121
122 return process;
123}
124
125static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
126{
127 struct kfd_process *process;
128
129 hash_for_each_possible_rcu(kfd_processes_table, process,
130 kfd_processes, (uintptr_t)mm)
131 if (process->mm == mm)
132 return process;
133
134 return NULL;
135}
136
137static struct kfd_process *find_process(const struct task_struct *thread)
138{
139 struct kfd_process *p;
140 int idx;
141
142 idx = srcu_read_lock(&kfd_processes_srcu);
143 p = find_process_by_mm(thread->mm);
144 srcu_read_unlock(&kfd_processes_srcu, idx);
145
146 return p;
147}
148
Felix Kuehling5ce10682017-11-27 18:29:51 -0500149/* No process locking is needed in this function, because the process
150 * is not findable any more. We must assume that no other thread is
151 * using it any more, otherwise we couldn't safely free the process
152 * structure in the end.
153 */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300154static void kfd_process_wq_release(struct work_struct *work)
155{
Felix Kuehling5ce10682017-11-27 18:29:51 -0500156 struct kfd_process *p = container_of(work, struct kfd_process,
157 release_work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300158 struct kfd_process_device *pdd, *temp;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300159
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200160 pr_debug("Releasing process (pasid %d) in workqueue\n",
161 p->pasid);
162
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300163 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
164 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200165 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
166 pdd->dev->id, p->pasid);
167
Yong Zhao733fa1f2017-09-20 18:10:14 -0400168 if (pdd->bound == PDD_BOUND)
169 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300170
Yong Zhao733fa1f2017-09-20 18:10:14 -0400171 list_del(&pdd->per_device_list);
Felix Kuehling373d7082017-11-14 16:41:19 -0500172
173 if (pdd->qpd.cwsr_kaddr)
174 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
175 get_order(KFD_CWSR_TBA_TMA_SIZE));
176
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300177 kfree(pdd);
178 }
179
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300180 kfd_event_free_process(p);
181
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300182 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400183 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300184
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300185 mutex_destroy(&p->mutex);
186
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500187 put_task_struct(p->lead_thread);
188
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300189 kfree(p);
Felix Kuehling5ce10682017-11-27 18:29:51 -0500190}
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300191
Felix Kuehling5ce10682017-11-27 18:29:51 -0500192static void kfd_process_ref_release(struct kref *ref)
193{
194 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
195
196 INIT_WORK(&p->release_work, kfd_process_wq_release);
197 queue_work(kfd_process_wq, &p->release_work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300198}
199
200static void kfd_process_destroy_delayed(struct rcu_head *rcu)
201{
Felix Kuehling5ce10682017-11-27 18:29:51 -0500202 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300203
Felix Kuehling5ce10682017-11-27 18:29:51 -0500204 kref_put(&p->ref, kfd_process_ref_release);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300205}
206
207static void kfd_process_notifier_release(struct mmu_notifier *mn,
208 struct mm_struct *mm)
209{
210 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200211 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300212
213 /*
214 * The kfd_process structure can not be free because the
215 * mmu_notifier srcu is read locked
216 */
217 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400218 if (WARN_ON(p->mm != mm))
219 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300220
221 mutex_lock(&kfd_processes_mutex);
222 hash_del_rcu(&p->kfd_processes);
223 mutex_unlock(&kfd_processes_mutex);
224 synchronize_srcu(&kfd_processes_srcu);
225
Ben Goz45102042014-07-17 01:04:10 +0300226 mutex_lock(&p->mutex);
227
Yair Shachar062c5672017-11-01 19:21:29 -0400228 /* Iterate over all process device data structures and if the
229 * pdd is in debug mode, we should first force unregistration,
230 * then we will be able to destroy the queues
231 */
232 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
233 struct kfd_dev *dev = pdd->dev;
234
235 mutex_lock(kfd_get_dbgmgr_mutex());
236 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
237 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
238 kfd_dbgmgr_destroy(dev->dbgmgr);
239 dev->dbgmgr = NULL;
240 }
241 }
242 mutex_unlock(kfd_get_dbgmgr_mutex());
243 }
244
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400245 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300246 pqm_uninit(&p->pqm);
247
Felix Kuehling5ce10682017-11-27 18:29:51 -0500248 /* Indicate to other users that MM is no longer valid */
249 p->mm = NULL;
250
Ben Goz45102042014-07-17 01:04:10 +0300251 mutex_unlock(&p->mutex);
252
Felix Kuehling5ce10682017-11-27 18:29:51 -0500253 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300254 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
255}
256
257static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
258 .release = kfd_process_notifier_release,
259};
260
Felix Kuehling373d7082017-11-14 16:41:19 -0500261static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
262{
263 int err = 0;
264 unsigned long offset;
265 struct kfd_process_device *temp, *pdd = NULL;
266 struct kfd_dev *dev = NULL;
267 struct qcm_process_device *qpd = NULL;
268
269 mutex_lock(&p->mutex);
270 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
271 per_device_list) {
272 dev = pdd->dev;
273 qpd = &pdd->qpd;
274 if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
275 continue;
276 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
277 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
278 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
279 MAP_SHARED, offset);
280
281 if (IS_ERR_VALUE(qpd->tba_addr)) {
282 pr_err("Failure to set tba address. error -%d.\n",
283 (int)qpd->tba_addr);
284 err = qpd->tba_addr;
285 qpd->tba_addr = 0;
286 qpd->cwsr_kaddr = NULL;
287 goto out;
288 }
289
290 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
291
292 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
293 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
294 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
295 }
296out:
297 mutex_unlock(&p->mutex);
298 return err;
299}
300
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300301static struct kfd_process *create_process(const struct task_struct *thread)
302{
303 struct kfd_process *process;
304 int err = -ENOMEM;
305
306 process = kzalloc(sizeof(*process), GFP_KERNEL);
307
308 if (!process)
309 goto err_alloc_process;
310
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300311 process->pasid = kfd_pasid_alloc();
312 if (process->pasid == 0)
313 goto err_alloc_pasid;
314
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400315 if (kfd_alloc_process_doorbells(process) < 0)
316 goto err_alloc_doorbells;
317
Felix Kuehling5ce10682017-11-27 18:29:51 -0500318 kref_init(&process->ref);
319
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300320 mutex_init(&process->mutex);
321
322 process->mm = thread->mm;
323
324 /* register notifier */
325 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
326 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
327 if (err)
328 goto err_mmu_notifier;
329
330 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
331 (uintptr_t)process->mm);
332
333 process->lead_thread = thread->group_leader;
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500334 get_task_struct(process->lead_thread);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300335
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300336 INIT_LIST_HEAD(&process->per_device_data);
337
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300338 kfd_event_init_process(process);
339
Ben Goz45102042014-07-17 01:04:10 +0300340 err = pqm_init(&process->pqm, process);
341 if (err != 0)
342 goto err_process_pqm_init;
343
Alexey Skidanovdd592392014-11-18 13:56:23 +0200344 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700345 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300346 err = kfd_init_apertures(process);
347 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200348 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200349
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300350 return process;
351
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200352err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200353 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300354err_process_pqm_init:
355 hash_del_rcu(&process->kfd_processes);
356 synchronize_rcu();
357 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300358err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300359 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400360 kfd_free_process_doorbells(process);
361err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300362 kfd_pasid_free(process->pasid);
363err_alloc_pasid:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300364 kfree(process);
365err_alloc_process:
366 return ERR_PTR(err);
367}
368
369struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200370 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300371{
372 struct kfd_process_device *pdd = NULL;
373
374 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
375 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400376 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300377
Yong Zhao733fa1f2017-09-20 18:10:14 -0400378 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200379}
380
381struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
382 struct kfd_process *p)
383{
384 struct kfd_process_device *pdd = NULL;
385
386 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
387 if (pdd != NULL) {
388 pdd->dev = dev;
389 INIT_LIST_HEAD(&pdd->qpd.queues_list);
390 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
391 pdd->qpd.dqm = dev->dqm;
Felix Kuehlingb20cd0d2017-11-14 16:41:17 -0500392 pdd->qpd.pqm = &p->pqm;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400393 pdd->process = p;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400394 pdd->bound = PDD_UNBOUND;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400395 pdd->already_dequeued = false;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200396 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300397 }
398
399 return pdd;
400}
401
402/*
403 * Direct the IOMMU to bind the process (specifically the pasid->mm)
404 * to the device.
405 * Unbinding occurs when the process dies or the device is removed.
406 *
407 * Assumes that the process lock is held.
408 */
409struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
410 struct kfd_process *p)
411{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200412 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300413 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300414
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200415 pdd = kfd_get_process_device_data(dev, p);
416 if (!pdd) {
417 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300418 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200419 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300420
Yong Zhao733fa1f2017-09-20 18:10:14 -0400421 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300422 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400423 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
424 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
425 return ERR_PTR(-EINVAL);
426 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300427
Oded Gabbayb17f0682014-07-17 00:06:27 +0300428 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
429 if (err < 0)
430 return ERR_PTR(err);
431
Yong Zhao733fa1f2017-09-20 18:10:14 -0400432 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300433
434 return pdd;
435}
436
Yong Zhao733fa1f2017-09-20 18:10:14 -0400437/*
438 * Bind processes do the device that have been temporarily unbound
439 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
440 */
441int kfd_bind_processes_to_device(struct kfd_dev *dev)
442{
443 struct kfd_process_device *pdd;
444 struct kfd_process *p;
445 unsigned int temp;
446 int err = 0;
447
448 int idx = srcu_read_lock(&kfd_processes_srcu);
449
450 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
451 mutex_lock(&p->mutex);
452 pdd = kfd_get_process_device_data(dev, p);
453 if (pdd->bound != PDD_BOUND_SUSPENDED) {
454 mutex_unlock(&p->mutex);
455 continue;
456 }
457
458 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
459 p->lead_thread);
460 if (err < 0) {
Felix Kuehling894a8292017-11-01 19:21:33 -0400461 pr_err("Unexpected pasid %d binding failure\n",
Yong Zhao733fa1f2017-09-20 18:10:14 -0400462 p->pasid);
463 mutex_unlock(&p->mutex);
464 break;
465 }
466
467 pdd->bound = PDD_BOUND;
468 mutex_unlock(&p->mutex);
469 }
470
471 srcu_read_unlock(&kfd_processes_srcu, idx);
472
473 return err;
474}
475
476/*
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400477 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
478 * processes will be restored to PDD_BOUND state in
479 * kfd_bind_processes_to_device.
Yong Zhao733fa1f2017-09-20 18:10:14 -0400480 */
481void kfd_unbind_processes_from_device(struct kfd_dev *dev)
482{
483 struct kfd_process_device *pdd;
484 struct kfd_process *p;
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400485 unsigned int temp;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400486
487 int idx = srcu_read_lock(&kfd_processes_srcu);
488
489 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
490 mutex_lock(&p->mutex);
491 pdd = kfd_get_process_device_data(dev, p);
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400492
Yong Zhao733fa1f2017-09-20 18:10:14 -0400493 if (pdd->bound == PDD_BOUND)
494 pdd->bound = PDD_BOUND_SUSPENDED;
495 mutex_unlock(&p->mutex);
Yong Zhao733fa1f2017-09-20 18:10:14 -0400496 }
497
498 srcu_read_unlock(&kfd_processes_srcu, idx);
499}
500
501void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300502{
503 struct kfd_process *p;
504 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300505
Oded Gabbay121b78e2016-05-26 08:41:08 +0300506 /*
507 * Look for the process that matches the pasid. If there is no such
508 * process, we either released it in amdkfd's own notifier, or there
509 * is a bug. Unfortunately, there is no way to tell...
510 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000511 p = kfd_lookup_process_by_pasid(pasid);
512 if (!p)
513 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300514
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000515 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300516
Yair Shachar062c5672017-11-01 19:21:29 -0400517 mutex_lock(kfd_get_dbgmgr_mutex());
518
519 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
520 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
521 kfd_dbgmgr_destroy(dev->dbgmgr);
522 dev->dbgmgr = NULL;
523 }
524 }
525
526 mutex_unlock(kfd_get_dbgmgr_mutex());
Oded Gabbay121b78e2016-05-26 08:41:08 +0300527
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000528 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400529 if (pdd)
530 /* For GPU relying on IOMMU, we need to dequeue here
531 * when PASID is still bound.
532 */
533 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300534
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000535 mutex_unlock(&p->mutex);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300536}
537
Kent Russell8eabaf52017-08-15 23:00:04 -0400538struct kfd_process_device *kfd_get_first_process_device_data(
539 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300540{
541 return list_first_entry(&p->per_device_data,
542 struct kfd_process_device,
543 per_device_list);
544}
545
Kent Russell8eabaf52017-08-15 23:00:04 -0400546struct kfd_process_device *kfd_get_next_process_device_data(
547 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300548 struct kfd_process_device *pdd)
549{
550 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
551 return NULL;
552 return list_next_entry(pdd, per_device_list);
553}
554
555bool kfd_has_process_device_data(struct kfd_process *p)
556{
557 return !(list_empty(&p->per_device_data));
558}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300559
560/* This returns with process->mutex locked. */
561struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
562{
563 struct kfd_process *p;
564 unsigned int temp;
565
566 int idx = srcu_read_lock(&kfd_processes_srcu);
567
568 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
569 if (p->pasid == pasid) {
570 mutex_lock(&p->mutex);
571 break;
572 }
573 }
574
575 srcu_read_unlock(&kfd_processes_srcu, idx);
576
577 return p;
578}
Felix Kuehling373d7082017-11-14 16:41:19 -0500579
580int kfd_reserved_mem_mmap(struct kfd_process *process,
581 struct vm_area_struct *vma)
582{
583 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
584 struct kfd_process_device *pdd;
585 struct qcm_process_device *qpd;
586
587 if (!dev)
588 return -EINVAL;
589 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
590 pr_err("Incorrect CWSR mapping size.\n");
591 return -EINVAL;
592 }
593
594 pdd = kfd_get_process_device_data(dev, process);
595 if (!pdd)
596 return -EINVAL;
597 qpd = &pdd->qpd;
598
599 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
600 get_order(KFD_CWSR_TBA_TMA_SIZE));
601 if (!qpd->cwsr_kaddr) {
602 pr_err("Error allocating per process CWSR buffer.\n");
603 return -ENOMEM;
604 }
605
606 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
607 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
608 /* Mapping pages to user process */
609 return remap_pfn_range(vma, vma->vm_start,
610 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
611 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
612}
Felix Kuehling851a6452017-11-27 18:29:49 -0500613
614#if defined(CONFIG_DEBUG_FS)
615
616int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
617{
618 struct kfd_process *p;
619 unsigned int temp;
620 int r = 0;
621
622 int idx = srcu_read_lock(&kfd_processes_srcu);
623
624 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
625 seq_printf(m, "Process %d PASID %d:\n",
626 p->lead_thread->tgid, p->pasid);
627
628 mutex_lock(&p->mutex);
629 r = pqm_debugfs_mqds(m, &p->pqm);
630 mutex_unlock(&p->mutex);
631
632 if (r)
633 break;
634 }
635
636 srcu_read_unlock(&kfd_processes_srcu, idx);
637
638 return r;
639}
640
641#endif