blob: a22fb0710f1535411f29f7b7e1f9943cfb68d4ab [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010026#include <linux/sched/mm.h>
Felix Kuehlingc7b12432017-11-27 18:29:50 -050027#include <linux/sched/task.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030028#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030029#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030030#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020031#include <linux/compat.h>
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include <linux/mman.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020033
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030034struct mm_struct;
35
36#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030037#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030038
39/*
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
42 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex);
46
47DEFINE_STATIC_SRCU(kfd_processes_srcu);
48
49static struct workqueue_struct *kfd_process_wq;
50
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030051static struct kfd_process *find_process(const struct task_struct *thread);
Felix Kuehlingabb208a2017-11-27 18:29:52 -050052static void kfd_process_ref_release(struct kref *ref);
Yong Zhaoc0ede1f2017-11-27 18:29:56 -050053static struct kfd_process *create_process(const struct task_struct *thread,
54 struct file *filep);
Felix Kuehling373d7082017-11-14 16:41:19 -050055static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
56
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030057
58void kfd_process_create_wq(void)
59{
60 if (!kfd_process_wq)
Bhaktipriya Shridharfd320bf2016-05-29 21:14:11 +053061 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030062}
63
64void kfd_process_destroy_wq(void)
65{
66 if (kfd_process_wq) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030067 destroy_workqueue(kfd_process_wq);
68 kfd_process_wq = NULL;
69 }
70}
71
Felix Kuehling373d7082017-11-14 16:41:19 -050072struct kfd_process *kfd_create_process(struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030073{
74 struct kfd_process *process;
Felix Kuehling373d7082017-11-14 16:41:19 -050075 struct task_struct *thread = current;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030076
Kent Russell4eacc26b2017-08-15 23:00:06 -040077 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030078 return ERR_PTR(-EINVAL);
79
80 /* Only the pthreads threading model is supported. */
81 if (thread->group_leader->mm != thread->mm)
82 return ERR_PTR(-EINVAL);
83
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030084 /*
85 * take kfd processes mutex before starting of process creation
86 * so there won't be a case where two threads of the same process
87 * create two kfd_process structures
88 */
89 mutex_lock(&kfd_processes_mutex);
90
91 /* A prior open of /dev/kfd could have already created the process. */
92 process = find_process(thread);
93 if (process)
Kent Russell79775b62017-08-15 23:00:05 -040094 pr_debug("Process already found\n");
Yong Zhaoc0ede1f2017-11-27 18:29:56 -050095 else
96 process = create_process(thread, filep);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030097
98 mutex_unlock(&kfd_processes_mutex);
99
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300100 return process;
101}
102
103struct kfd_process *kfd_get_process(const struct task_struct *thread)
104{
105 struct kfd_process *process;
106
Kent Russell4eacc26b2017-08-15 23:00:06 -0400107 if (!thread->mm)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300108 return ERR_PTR(-EINVAL);
109
110 /* Only the pthreads threading model is supported. */
111 if (thread->group_leader->mm != thread->mm)
112 return ERR_PTR(-EINVAL);
113
114 process = find_process(thread);
115
116 return process;
117}
118
119static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
120{
121 struct kfd_process *process;
122
123 hash_for_each_possible_rcu(kfd_processes_table, process,
124 kfd_processes, (uintptr_t)mm)
125 if (process->mm == mm)
126 return process;
127
128 return NULL;
129}
130
131static struct kfd_process *find_process(const struct task_struct *thread)
132{
133 struct kfd_process *p;
134 int idx;
135
136 idx = srcu_read_lock(&kfd_processes_srcu);
137 p = find_process_by_mm(thread->mm);
138 srcu_read_unlock(&kfd_processes_srcu, idx);
139
140 return p;
141}
142
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500143void kfd_unref_process(struct kfd_process *p)
144{
145 kref_put(&p->ref, kfd_process_ref_release);
146}
147
Felix Kuehlingde1450a2017-11-27 18:29:55 -0500148static void kfd_process_destroy_pdds(struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300149{
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300150 struct kfd_process_device *pdd, *temp;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300151
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300152 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
Felix Kuehlingde1450a2017-11-27 18:29:55 -0500153 per_device_list) {
154 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200155 pdd->dev->id, p->pasid);
156
Yong Zhao733fa1f2017-09-20 18:10:14 -0400157 list_del(&pdd->per_device_list);
Felix Kuehling373d7082017-11-14 16:41:19 -0500158
159 if (pdd->qpd.cwsr_kaddr)
160 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
161 get_order(KFD_CWSR_TBA_TMA_SIZE));
162
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300163 kfree(pdd);
164 }
Felix Kuehlingde1450a2017-11-27 18:29:55 -0500165}
166
167/* No process locking is needed in this function, because the process
168 * is not findable any more. We must assume that no other thread is
169 * using it any more, otherwise we couldn't safely free the process
170 * structure in the end.
171 */
172static void kfd_process_wq_release(struct work_struct *work)
173{
174 struct kfd_process *p = container_of(work, struct kfd_process,
175 release_work);
176 struct kfd_process_device *pdd;
177
178 pr_debug("Releasing process (pasid %d) in workqueue\n", p->pasid);
179
180 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
181 if (pdd->bound == PDD_BOUND)
182 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
183 }
184
185 kfd_process_destroy_pdds(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300186
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300187 kfd_event_free_process(p);
188
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300189 kfd_pasid_free(p->pasid);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400190 kfd_free_process_doorbells(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300191
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300192 mutex_destroy(&p->mutex);
193
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500194 put_task_struct(p->lead_thread);
195
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300196 kfree(p);
Felix Kuehling5ce10682017-11-27 18:29:51 -0500197}
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300198
Felix Kuehling5ce10682017-11-27 18:29:51 -0500199static void kfd_process_ref_release(struct kref *ref)
200{
201 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
202
203 INIT_WORK(&p->release_work, kfd_process_wq_release);
204 queue_work(kfd_process_wq, &p->release_work);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300205}
206
207static void kfd_process_destroy_delayed(struct rcu_head *rcu)
208{
Felix Kuehling5ce10682017-11-27 18:29:51 -0500209 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300210
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500211 kfd_unref_process(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300212}
213
214static void kfd_process_notifier_release(struct mmu_notifier *mn,
215 struct mm_struct *mm)
216{
217 struct kfd_process *p;
Ben Goza82918f2015-03-25 13:12:20 +0200218 struct kfd_process_device *pdd = NULL;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300219
220 /*
221 * The kfd_process structure can not be free because the
222 * mmu_notifier srcu is read locked
223 */
224 p = container_of(mn, struct kfd_process, mmu_notifier);
Felix Kuehling32fa8212017-08-15 23:00:12 -0400225 if (WARN_ON(p->mm != mm))
226 return;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300227
228 mutex_lock(&kfd_processes_mutex);
229 hash_del_rcu(&p->kfd_processes);
230 mutex_unlock(&kfd_processes_mutex);
231 synchronize_srcu(&kfd_processes_srcu);
232
Ben Goz45102042014-07-17 01:04:10 +0300233 mutex_lock(&p->mutex);
234
Yair Shachar062c5672017-11-01 19:21:29 -0400235 /* Iterate over all process device data structures and if the
236 * pdd is in debug mode, we should first force unregistration,
237 * then we will be able to destroy the queues
238 */
239 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
240 struct kfd_dev *dev = pdd->dev;
241
242 mutex_lock(kfd_get_dbgmgr_mutex());
243 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
244 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
245 kfd_dbgmgr_destroy(dev->dbgmgr);
246 dev->dbgmgr = NULL;
247 }
248 }
249 mutex_unlock(kfd_get_dbgmgr_mutex());
250 }
251
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400252 kfd_process_dequeue_from_all_devices(p);
Ben Goz45102042014-07-17 01:04:10 +0300253 pqm_uninit(&p->pqm);
254
Felix Kuehling5ce10682017-11-27 18:29:51 -0500255 /* Indicate to other users that MM is no longer valid */
256 p->mm = NULL;
257
Ben Goz45102042014-07-17 01:04:10 +0300258 mutex_unlock(&p->mutex);
259
Felix Kuehling5ce10682017-11-27 18:29:51 -0500260 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300261 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
262}
263
264static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
265 .release = kfd_process_notifier_release,
266};
267
Felix Kuehling373d7082017-11-14 16:41:19 -0500268static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
269{
Felix Kuehling373d7082017-11-14 16:41:19 -0500270 unsigned long offset;
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500271 struct kfd_process_device *pdd = NULL;
Felix Kuehling373d7082017-11-14 16:41:19 -0500272 struct kfd_dev *dev = NULL;
273 struct qcm_process_device *qpd = NULL;
274
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500275 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
Felix Kuehling373d7082017-11-14 16:41:19 -0500276 dev = pdd->dev;
277 qpd = &pdd->qpd;
278 if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
279 continue;
280 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
281 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
282 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
283 MAP_SHARED, offset);
284
285 if (IS_ERR_VALUE(qpd->tba_addr)) {
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500286 int err = qpd->tba_addr;
287
288 pr_err("Failure to set tba address. error %d.\n", err);
Felix Kuehling373d7082017-11-14 16:41:19 -0500289 qpd->tba_addr = 0;
290 qpd->cwsr_kaddr = NULL;
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500291 return err;
Felix Kuehling373d7082017-11-14 16:41:19 -0500292 }
293
294 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
295
296 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
297 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
298 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
299 }
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500300
301 return 0;
Felix Kuehling373d7082017-11-14 16:41:19 -0500302}
303
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500304static struct kfd_process *create_process(const struct task_struct *thread,
305 struct file *filep)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300306{
307 struct kfd_process *process;
308 int err = -ENOMEM;
309
310 process = kzalloc(sizeof(*process), GFP_KERNEL);
311
312 if (!process)
313 goto err_alloc_process;
314
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300315 process->pasid = kfd_pasid_alloc();
316 if (process->pasid == 0)
317 goto err_alloc_pasid;
318
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400319 if (kfd_alloc_process_doorbells(process) < 0)
320 goto err_alloc_doorbells;
321
Felix Kuehling5ce10682017-11-27 18:29:51 -0500322 kref_init(&process->ref);
323
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300324 mutex_init(&process->mutex);
325
326 process->mm = thread->mm;
327
328 /* register notifier */
329 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500330 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300331 if (err)
332 goto err_mmu_notifier;
333
334 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
335 (uintptr_t)process->mm);
336
337 process->lead_thread = thread->group_leader;
Felix Kuehlingc7b12432017-11-27 18:29:50 -0500338 get_task_struct(process->lead_thread);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300339
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300340 INIT_LIST_HEAD(&process->per_device_data);
341
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300342 kfd_event_init_process(process);
343
Ben Goz45102042014-07-17 01:04:10 +0300344 err = pqm_init(&process->pqm, process);
345 if (err != 0)
346 goto err_process_pqm_init;
347
Alexey Skidanovdd592392014-11-18 13:56:23 +0200348 /* init process apertures*/
Andy Lutomirski10f16852016-03-22 14:25:19 -0700349 process->is_32bit_user_mode = in_compat_syscall();
Dan Carpenterb312b2b2017-06-14 13:58:53 +0300350 err = kfd_init_apertures(process);
351 if (err != 0)
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200352 goto err_init_apertures;
Alexey Skidanovdd592392014-11-18 13:56:23 +0200353
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500354 err = kfd_process_init_cwsr(process, filep);
355 if (err)
356 goto err_init_cwsr;
357
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300358 return process;
359
Yong Zhaoc0ede1f2017-11-27 18:29:56 -0500360err_init_cwsr:
361 kfd_process_destroy_pdds(process);
Geert Uytterhoeven7a10d632017-06-01 12:28:38 +0200362err_init_apertures:
Alexey Skidanovdd592392014-11-18 13:56:23 +0200363 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300364err_process_pqm_init:
365 hash_del_rcu(&process->kfd_processes);
366 synchronize_rcu();
367 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300368err_mmu_notifier:
Oded Gabbay7fd5e032016-06-23 17:54:29 +0300369 mutex_destroy(&process->mutex);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400370 kfd_free_process_doorbells(process);
371err_alloc_doorbells:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300372 kfd_pasid_free(process->pasid);
373err_alloc_pasid:
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300374 kfree(process);
375err_alloc_process:
376 return ERR_PTR(err);
377}
378
379struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200380 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300381{
382 struct kfd_process_device *pdd = NULL;
383
384 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
385 if (pdd->dev == dev)
Yong Zhao733fa1f2017-09-20 18:10:14 -0400386 return pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300387
Yong Zhao733fa1f2017-09-20 18:10:14 -0400388 return NULL;
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200389}
390
391struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
392 struct kfd_process *p)
393{
394 struct kfd_process_device *pdd = NULL;
395
396 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
Felix Kuehling2d9b36f2017-11-27 18:29:54 -0500397 if (!pdd)
398 return NULL;
399
400 pdd->dev = dev;
401 INIT_LIST_HEAD(&pdd->qpd.queues_list);
402 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
403 pdd->qpd.dqm = dev->dqm;
404 pdd->qpd.pqm = &p->pqm;
405 pdd->process = p;
406 pdd->bound = PDD_UNBOUND;
407 pdd->already_dequeued = false;
408 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300409
410 return pdd;
411}
412
413/*
414 * Direct the IOMMU to bind the process (specifically the pasid->mm)
415 * to the device.
416 * Unbinding occurs when the process dies or the device is removed.
417 *
418 * Assumes that the process lock is held.
419 */
420struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
421 struct kfd_process *p)
422{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200423 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300424 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300425
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200426 pdd = kfd_get_process_device_data(dev, p);
427 if (!pdd) {
428 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300429 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200430 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300431
Yong Zhao733fa1f2017-09-20 18:10:14 -0400432 if (pdd->bound == PDD_BOUND) {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300433 return pdd;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400434 } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
435 pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
436 return ERR_PTR(-EINVAL);
437 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300438
Oded Gabbayb17f0682014-07-17 00:06:27 +0300439 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
440 if (err < 0)
441 return ERR_PTR(err);
442
Yong Zhao733fa1f2017-09-20 18:10:14 -0400443 pdd->bound = PDD_BOUND;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300444
445 return pdd;
446}
447
Yong Zhao733fa1f2017-09-20 18:10:14 -0400448/*
449 * Bind processes do the device that have been temporarily unbound
450 * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
451 */
452int kfd_bind_processes_to_device(struct kfd_dev *dev)
453{
454 struct kfd_process_device *pdd;
455 struct kfd_process *p;
456 unsigned int temp;
457 int err = 0;
458
459 int idx = srcu_read_lock(&kfd_processes_srcu);
460
461 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
462 mutex_lock(&p->mutex);
463 pdd = kfd_get_process_device_data(dev, p);
464 if (pdd->bound != PDD_BOUND_SUSPENDED) {
465 mutex_unlock(&p->mutex);
466 continue;
467 }
468
469 err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
470 p->lead_thread);
471 if (err < 0) {
Felix Kuehling894a8292017-11-01 19:21:33 -0400472 pr_err("Unexpected pasid %d binding failure\n",
Yong Zhao733fa1f2017-09-20 18:10:14 -0400473 p->pasid);
474 mutex_unlock(&p->mutex);
475 break;
476 }
477
478 pdd->bound = PDD_BOUND;
479 mutex_unlock(&p->mutex);
480 }
481
482 srcu_read_unlock(&kfd_processes_srcu, idx);
483
484 return err;
485}
486
487/*
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400488 * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
489 * processes will be restored to PDD_BOUND state in
490 * kfd_bind_processes_to_device.
Yong Zhao733fa1f2017-09-20 18:10:14 -0400491 */
492void kfd_unbind_processes_from_device(struct kfd_dev *dev)
493{
494 struct kfd_process_device *pdd;
495 struct kfd_process *p;
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400496 unsigned int temp;
Yong Zhao733fa1f2017-09-20 18:10:14 -0400497
498 int idx = srcu_read_lock(&kfd_processes_srcu);
499
500 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
501 mutex_lock(&p->mutex);
502 pdd = kfd_get_process_device_data(dev, p);
Yong Zhaoe2a8e992017-11-01 19:21:28 -0400503
Yong Zhao733fa1f2017-09-20 18:10:14 -0400504 if (pdd->bound == PDD_BOUND)
505 pdd->bound = PDD_BOUND_SUSPENDED;
506 mutex_unlock(&p->mutex);
Yong Zhao733fa1f2017-09-20 18:10:14 -0400507 }
508
509 srcu_read_unlock(&kfd_processes_srcu, idx);
510}
511
512void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300513{
514 struct kfd_process *p;
515 struct kfd_process_device *pdd;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300516
Oded Gabbay121b78e2016-05-26 08:41:08 +0300517 /*
518 * Look for the process that matches the pasid. If there is no such
519 * process, we either released it in amdkfd's own notifier, or there
520 * is a bug. Unfortunately, there is no way to tell...
521 */
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000522 p = kfd_lookup_process_by_pasid(pasid);
523 if (!p)
524 return;
Oded Gabbay121b78e2016-05-26 08:41:08 +0300525
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000526 pr_debug("Unbinding process %d from IOMMU\n", pasid);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300527
Yair Shachar062c5672017-11-01 19:21:29 -0400528 mutex_lock(kfd_get_dbgmgr_mutex());
529
530 if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
531 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
532 kfd_dbgmgr_destroy(dev->dbgmgr);
533 dev->dbgmgr = NULL;
534 }
535 }
536
537 mutex_unlock(kfd_get_dbgmgr_mutex());
Oded Gabbay121b78e2016-05-26 08:41:08 +0300538
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500539 mutex_lock(&p->mutex);
540
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000541 pdd = kfd_get_process_device_data(dev, p);
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400542 if (pdd)
543 /* For GPU relying on IOMMU, we need to dequeue here
544 * when PASID is still bound.
545 */
546 kfd_process_dequeue_from_device(pdd);
Oded Gabbay121b78e2016-05-26 08:41:08 +0300547
Edward O'Callaghanad16a4692016-09-17 15:01:42 +1000548 mutex_unlock(&p->mutex);
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500549
550 kfd_unref_process(p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300551}
552
Kent Russell8eabaf52017-08-15 23:00:04 -0400553struct kfd_process_device *kfd_get_first_process_device_data(
554 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300555{
556 return list_first_entry(&p->per_device_data,
557 struct kfd_process_device,
558 per_device_list);
559}
560
Kent Russell8eabaf52017-08-15 23:00:04 -0400561struct kfd_process_device *kfd_get_next_process_device_data(
562 struct kfd_process *p,
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300563 struct kfd_process_device *pdd)
564{
565 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
566 return NULL;
567 return list_next_entry(pdd, per_device_list);
568}
569
570bool kfd_has_process_device_data(struct kfd_process *p)
571{
572 return !(list_empty(&p->per_device_data));
573}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300574
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500575/* This increments the process->ref counter. */
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300576struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
577{
Yong Zhao82c16b42017-11-27 18:29:53 -0500578 struct kfd_process *p, *ret_p = NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300579 unsigned int temp;
580
581 int idx = srcu_read_lock(&kfd_processes_srcu);
582
583 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
584 if (p->pasid == pasid) {
Felix Kuehlingabb208a2017-11-27 18:29:52 -0500585 kref_get(&p->ref);
Yong Zhao82c16b42017-11-27 18:29:53 -0500586 ret_p = p;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300587 break;
588 }
589 }
590
591 srcu_read_unlock(&kfd_processes_srcu, idx);
592
Yong Zhao82c16b42017-11-27 18:29:53 -0500593 return ret_p;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300594}
Felix Kuehling373d7082017-11-14 16:41:19 -0500595
596int kfd_reserved_mem_mmap(struct kfd_process *process,
597 struct vm_area_struct *vma)
598{
599 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
600 struct kfd_process_device *pdd;
601 struct qcm_process_device *qpd;
602
603 if (!dev)
604 return -EINVAL;
605 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
606 pr_err("Incorrect CWSR mapping size.\n");
607 return -EINVAL;
608 }
609
610 pdd = kfd_get_process_device_data(dev, process);
611 if (!pdd)
612 return -EINVAL;
613 qpd = &pdd->qpd;
614
615 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
616 get_order(KFD_CWSR_TBA_TMA_SIZE));
617 if (!qpd->cwsr_kaddr) {
618 pr_err("Error allocating per process CWSR buffer.\n");
619 return -ENOMEM;
620 }
621
622 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
623 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
624 /* Mapping pages to user process */
625 return remap_pfn_range(vma, vma->vm_start,
626 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
627 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
628}
Felix Kuehling851a6452017-11-27 18:29:49 -0500629
630#if defined(CONFIG_DEBUG_FS)
631
632int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
633{
634 struct kfd_process *p;
635 unsigned int temp;
636 int r = 0;
637
638 int idx = srcu_read_lock(&kfd_processes_srcu);
639
640 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
641 seq_printf(m, "Process %d PASID %d:\n",
642 p->lead_thread->tgid, p->pasid);
643
644 mutex_lock(&p->mutex);
645 r = pqm_debugfs_mqds(m, &p->pqm);
646 mutex_unlock(&p->mutex);
647
648 if (r)
649 break;
650 }
651
652 srcu_read_unlock(&kfd_processes_srcu, idx);
653
654 return r;
655}
656
657#endif