blob: 56b904f5bdb19352cb0c7d15fa26669e3e4a97aa [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
Oded Gabbayb17f0682014-07-17 00:06:27 +030027#include <linux/amd-iommu.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030028#include <linux/notifier.h>
Alexey Skidanovdd592392014-11-18 13:56:23 +020029#include <linux/compat.h>
30
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030031struct mm_struct;
32
33#include "kfd_priv.h"
Ben Gozc3447e82015-05-20 18:05:44 +030034#include "kfd_dbgmgr.h"
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030035
36/*
37 * Initial size for the array of queues.
38 * The allocated size is doubled each time
39 * it is exceeded up to MAX_PROCESS_QUEUES.
40 */
41#define INITIAL_QUEUE_ARRAY_SIZE 16
42
43/*
44 * List of struct kfd_process (field kfd_process).
45 * Unique/indexed by mm_struct*
46 */
47#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
48static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
49static DEFINE_MUTEX(kfd_processes_mutex);
50
51DEFINE_STATIC_SRCU(kfd_processes_srcu);
52
53static struct workqueue_struct *kfd_process_wq;
54
55struct kfd_process_release_work {
56 struct work_struct kfd_work;
57 struct kfd_process *p;
58};
59
60static struct kfd_process *find_process(const struct task_struct *thread);
61static struct kfd_process *create_process(const struct task_struct *thread);
62
63void kfd_process_create_wq(void)
64{
65 if (!kfd_process_wq)
66 kfd_process_wq = create_workqueue("kfd_process_wq");
67}
68
69void kfd_process_destroy_wq(void)
70{
71 if (kfd_process_wq) {
72 flush_workqueue(kfd_process_wq);
73 destroy_workqueue(kfd_process_wq);
74 kfd_process_wq = NULL;
75 }
76}
77
78struct kfd_process *kfd_create_process(const struct task_struct *thread)
79{
80 struct kfd_process *process;
81
82 BUG_ON(!kfd_process_wq);
83
84 if (thread->mm == NULL)
85 return ERR_PTR(-EINVAL);
86
87 /* Only the pthreads threading model is supported. */
88 if (thread->group_leader->mm != thread->mm)
89 return ERR_PTR(-EINVAL);
90
91 /* Take mmap_sem because we call __mmu_notifier_register inside */
92 down_write(&thread->mm->mmap_sem);
93
94 /*
95 * take kfd processes mutex before starting of process creation
96 * so there won't be a case where two threads of the same process
97 * create two kfd_process structures
98 */
99 mutex_lock(&kfd_processes_mutex);
100
101 /* A prior open of /dev/kfd could have already created the process. */
102 process = find_process(thread);
103 if (process)
104 pr_debug("kfd: process already found\n");
105
106 if (!process)
107 process = create_process(thread);
108
109 mutex_unlock(&kfd_processes_mutex);
110
111 up_write(&thread->mm->mmap_sem);
112
113 return process;
114}
115
116struct kfd_process *kfd_get_process(const struct task_struct *thread)
117{
118 struct kfd_process *process;
119
120 if (thread->mm == NULL)
121 return ERR_PTR(-EINVAL);
122
123 /* Only the pthreads threading model is supported. */
124 if (thread->group_leader->mm != thread->mm)
125 return ERR_PTR(-EINVAL);
126
127 process = find_process(thread);
128
129 return process;
130}
131
132static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
133{
134 struct kfd_process *process;
135
136 hash_for_each_possible_rcu(kfd_processes_table, process,
137 kfd_processes, (uintptr_t)mm)
138 if (process->mm == mm)
139 return process;
140
141 return NULL;
142}
143
144static struct kfd_process *find_process(const struct task_struct *thread)
145{
146 struct kfd_process *p;
147 int idx;
148
149 idx = srcu_read_lock(&kfd_processes_srcu);
150 p = find_process_by_mm(thread->mm);
151 srcu_read_unlock(&kfd_processes_srcu, idx);
152
153 return p;
154}
155
156static void kfd_process_wq_release(struct work_struct *work)
157{
158 struct kfd_process_release_work *my_work;
159 struct kfd_process_device *pdd, *temp;
160 struct kfd_process *p;
161
162 my_work = (struct kfd_process_release_work *) work;
163
164 p = my_work->p;
165
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200166 pr_debug("Releasing process (pasid %d) in workqueue\n",
167 p->pasid);
168
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300169 mutex_lock(&p->mutex);
170
171 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
172 per_device_list) {
Oded Gabbay94a1ee02015-02-24 10:51:59 +0200173 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
174 pdd->dev->id, p->pasid);
175
Ben Gozc3447e82015-05-20 18:05:44 +0300176 if (p->reset_wavefronts)
177 dbgdev_wave_reset_wavefronts(pdd->dev, p);
178
Oded Gabbayb17f0682014-07-17 00:06:27 +0300179 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300180 list_del(&pdd->per_device_list);
181
182 kfree(pdd);
183 }
184
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300185 kfd_event_free_process(p);
186
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300187 kfd_pasid_free(p->pasid);
188
189 mutex_unlock(&p->mutex);
190
191 mutex_destroy(&p->mutex);
192
193 kfree(p->queues);
194
195 kfree(p);
196
197 kfree((void *)work);
198}
199
200static void kfd_process_destroy_delayed(struct rcu_head *rcu)
201{
202 struct kfd_process_release_work *work;
203 struct kfd_process *p;
204
205 BUG_ON(!kfd_process_wq);
206
207 p = container_of(rcu, struct kfd_process, rcu);
208 BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
209
210 mmdrop(p->mm);
211
Firo Yang1549fcd2015-04-23 17:58:05 +0800212 work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300213
214 if (work) {
215 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
216 work->p = p;
217 queue_work(kfd_process_wq, (struct work_struct *) work);
218 }
219}
220
221static void kfd_process_notifier_release(struct mmu_notifier *mn,
222 struct mm_struct *mm)
223{
224 struct kfd_process *p;
225
226 /*
227 * The kfd_process structure can not be free because the
228 * mmu_notifier srcu is read locked
229 */
230 p = container_of(mn, struct kfd_process, mmu_notifier);
231 BUG_ON(p->mm != mm);
232
233 mutex_lock(&kfd_processes_mutex);
234 hash_del_rcu(&p->kfd_processes);
235 mutex_unlock(&kfd_processes_mutex);
236 synchronize_srcu(&kfd_processes_srcu);
237
Ben Goz45102042014-07-17 01:04:10 +0300238 mutex_lock(&p->mutex);
239
240 /* In case our notifier is called before IOMMU notifier */
241 pqm_uninit(&p->pqm);
242
243 mutex_unlock(&p->mutex);
244
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300245 /*
246 * Because we drop mm_count inside kfd_process_destroy_delayed
247 * and because the mmu_notifier_unregister function also drop
248 * mm_count we need to take an extra count here.
249 */
250 atomic_inc(&p->mm->mm_count);
251 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
252 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
253}
254
255static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
256 .release = kfd_process_notifier_release,
257};
258
259static struct kfd_process *create_process(const struct task_struct *thread)
260{
261 struct kfd_process *process;
262 int err = -ENOMEM;
263
264 process = kzalloc(sizeof(*process), GFP_KERNEL);
265
266 if (!process)
267 goto err_alloc_process;
268
269 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
270 sizeof(process->queues[0]), GFP_KERNEL);
271 if (!process->queues)
272 goto err_alloc_queues;
273
274 process->pasid = kfd_pasid_alloc();
275 if (process->pasid == 0)
276 goto err_alloc_pasid;
277
278 mutex_init(&process->mutex);
279
280 process->mm = thread->mm;
281
282 /* register notifier */
283 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
284 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
285 if (err)
286 goto err_mmu_notifier;
287
288 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
289 (uintptr_t)process->mm);
290
291 process->lead_thread = thread->group_leader;
292
293 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
294
295 INIT_LIST_HEAD(&process->per_device_data);
296
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300297 kfd_event_init_process(process);
298
Ben Goz45102042014-07-17 01:04:10 +0300299 err = pqm_init(&process->pqm, process);
300 if (err != 0)
301 goto err_process_pqm_init;
302
Alexey Skidanovdd592392014-11-18 13:56:23 +0200303 /* init process apertures*/
304 process->is_32bit_user_mode = is_compat_task();
305 if (kfd_init_apertures(process) != 0)
306 goto err_init_apretures;
307
Ben Gozc3447e82015-05-20 18:05:44 +0300308 process->reset_wavefronts = false;
309
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300310 return process;
311
Alexey Skidanovdd592392014-11-18 13:56:23 +0200312err_init_apretures:
313 pqm_uninit(&process->pqm);
Ben Goz45102042014-07-17 01:04:10 +0300314err_process_pqm_init:
315 hash_del_rcu(&process->kfd_processes);
316 synchronize_rcu();
317 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300318err_mmu_notifier:
319 kfd_pasid_free(process->pasid);
320err_alloc_pasid:
321 kfree(process->queues);
322err_alloc_queues:
323 kfree(process);
324err_alloc_process:
325 return ERR_PTR(err);
326}
327
328struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200329 struct kfd_process *p)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300330{
331 struct kfd_process_device *pdd = NULL;
332
333 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
334 if (pdd->dev == dev)
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200335 break;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300336
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200337 return pdd;
338}
339
340struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
341 struct kfd_process *p)
342{
343 struct kfd_process_device *pdd = NULL;
344
345 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
346 if (pdd != NULL) {
347 pdd->dev = dev;
348 INIT_LIST_HEAD(&pdd->qpd.queues_list);
349 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
350 pdd->qpd.dqm = dev->dqm;
351 list_add(&pdd->per_device_list, &p->per_device_data);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300352 }
353
354 return pdd;
355}
356
357/*
358 * Direct the IOMMU to bind the process (specifically the pasid->mm)
359 * to the device.
360 * Unbinding occurs when the process dies or the device is removed.
361 *
362 * Assumes that the process lock is held.
363 */
364struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
365 struct kfd_process *p)
366{
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200367 struct kfd_process_device *pdd;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300368 int err;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300369
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200370 pdd = kfd_get_process_device_data(dev, p);
371 if (!pdd) {
372 pr_err("Process device data doesn't exist\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300373 return ERR_PTR(-ENOMEM);
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200374 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300375
376 if (pdd->bound)
377 return pdd;
378
Oded Gabbayb17f0682014-07-17 00:06:27 +0300379 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
380 if (err < 0)
381 return ERR_PTR(err);
382
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300383 pdd->bound = true;
384
385 return pdd;
386}
387
388void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
389{
390 struct kfd_process *p;
391 struct kfd_process_device *pdd;
392 int idx, i;
393
394 BUG_ON(dev == NULL);
395
396 idx = srcu_read_lock(&kfd_processes_srcu);
397
398 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
399 if (p->pasid == pasid)
400 break;
401
402 srcu_read_unlock(&kfd_processes_srcu, idx);
403
404 BUG_ON(p->pasid != pasid);
405
406 mutex_lock(&p->mutex);
407
Ben Gozc3447e82015-05-20 18:05:44 +0300408 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
409 kfd_dbgmgr_destroy(dev->dbgmgr);
410
Ben Goz45102042014-07-17 01:04:10 +0300411 pqm_uninit(&p->pqm);
Ben Gozc3447e82015-05-20 18:05:44 +0300412 if (p->reset_wavefronts)
413 dbgdev_wave_reset_wavefronts(dev, p);
Ben Goz45102042014-07-17 01:04:10 +0300414
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200415 pdd = kfd_get_process_device_data(dev, p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300416
417 /*
418 * Just mark pdd as unbound, because we still need it to call
419 * amd_iommu_unbind_pasid() in when the process exits.
420 * We don't call amd_iommu_unbind_pasid() here
421 * because the IOMMU called us.
422 */
423 if (pdd)
424 pdd->bound = false;
425
426 mutex_unlock(&p->mutex);
427}
428
429struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
430{
431 return list_first_entry(&p->per_device_data,
432 struct kfd_process_device,
433 per_device_list);
434}
435
436struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
437 struct kfd_process_device *pdd)
438{
439 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
440 return NULL;
441 return list_next_entry(pdd, per_device_list);
442}
443
444bool kfd_has_process_device_data(struct kfd_process *p)
445{
446 return !(list_empty(&p->per_device_data));
447}
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300448
449/* This returns with process->mutex locked. */
450struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
451{
452 struct kfd_process *p;
453 unsigned int temp;
454
455 int idx = srcu_read_lock(&kfd_processes_srcu);
456
457 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
458 if (p->pasid == pasid) {
459 mutex_lock(&p->mutex);
460 break;
461 }
462 }
463
464 srcu_read_unlock(&kfd_processes_srcu, idx);
465
466 return p;
467}