blob: 5596f698cc11e96ff05e8a0f61735dcaec09205b [file] [log] [blame]
Oded Gabbay19f6d2a2014-07-16 23:25:31 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/notifier.h>
28struct mm_struct;
29
30#include "kfd_priv.h"
31
32/*
33 * Initial size for the array of queues.
34 * The allocated size is doubled each time
35 * it is exceeded up to MAX_PROCESS_QUEUES.
36 */
37#define INITIAL_QUEUE_ARRAY_SIZE 16
38
39/*
40 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
42 */
43#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45static DEFINE_MUTEX(kfd_processes_mutex);
46
47DEFINE_STATIC_SRCU(kfd_processes_srcu);
48
49static struct workqueue_struct *kfd_process_wq;
50
51struct kfd_process_release_work {
52 struct work_struct kfd_work;
53 struct kfd_process *p;
54};
55
56static struct kfd_process *find_process(const struct task_struct *thread);
57static struct kfd_process *create_process(const struct task_struct *thread);
58
59void kfd_process_create_wq(void)
60{
61 if (!kfd_process_wq)
62 kfd_process_wq = create_workqueue("kfd_process_wq");
63}
64
65void kfd_process_destroy_wq(void)
66{
67 if (kfd_process_wq) {
68 flush_workqueue(kfd_process_wq);
69 destroy_workqueue(kfd_process_wq);
70 kfd_process_wq = NULL;
71 }
72}
73
74struct kfd_process *kfd_create_process(const struct task_struct *thread)
75{
76 struct kfd_process *process;
77
78 BUG_ON(!kfd_process_wq);
79
80 if (thread->mm == NULL)
81 return ERR_PTR(-EINVAL);
82
83 /* Only the pthreads threading model is supported. */
84 if (thread->group_leader->mm != thread->mm)
85 return ERR_PTR(-EINVAL);
86
87 /* Take mmap_sem because we call __mmu_notifier_register inside */
88 down_write(&thread->mm->mmap_sem);
89
90 /*
91 * take kfd processes mutex before starting of process creation
92 * so there won't be a case where two threads of the same process
93 * create two kfd_process structures
94 */
95 mutex_lock(&kfd_processes_mutex);
96
97 /* A prior open of /dev/kfd could have already created the process. */
98 process = find_process(thread);
99 if (process)
100 pr_debug("kfd: process already found\n");
101
102 if (!process)
103 process = create_process(thread);
104
105 mutex_unlock(&kfd_processes_mutex);
106
107 up_write(&thread->mm->mmap_sem);
108
109 return process;
110}
111
112struct kfd_process *kfd_get_process(const struct task_struct *thread)
113{
114 struct kfd_process *process;
115
116 if (thread->mm == NULL)
117 return ERR_PTR(-EINVAL);
118
119 /* Only the pthreads threading model is supported. */
120 if (thread->group_leader->mm != thread->mm)
121 return ERR_PTR(-EINVAL);
122
123 process = find_process(thread);
124
125 return process;
126}
127
128static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
129{
130 struct kfd_process *process;
131
132 hash_for_each_possible_rcu(kfd_processes_table, process,
133 kfd_processes, (uintptr_t)mm)
134 if (process->mm == mm)
135 return process;
136
137 return NULL;
138}
139
140static struct kfd_process *find_process(const struct task_struct *thread)
141{
142 struct kfd_process *p;
143 int idx;
144
145 idx = srcu_read_lock(&kfd_processes_srcu);
146 p = find_process_by_mm(thread->mm);
147 srcu_read_unlock(&kfd_processes_srcu, idx);
148
149 return p;
150}
151
152static void kfd_process_wq_release(struct work_struct *work)
153{
154 struct kfd_process_release_work *my_work;
155 struct kfd_process_device *pdd, *temp;
156 struct kfd_process *p;
157
158 my_work = (struct kfd_process_release_work *) work;
159
160 p = my_work->p;
161
162 mutex_lock(&p->mutex);
163
164 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
165 per_device_list) {
166 list_del(&pdd->per_device_list);
167
168 kfree(pdd);
169 }
170
171 kfd_pasid_free(p->pasid);
172
173 mutex_unlock(&p->mutex);
174
175 mutex_destroy(&p->mutex);
176
177 kfree(p->queues);
178
179 kfree(p);
180
181 kfree((void *)work);
182}
183
184static void kfd_process_destroy_delayed(struct rcu_head *rcu)
185{
186 struct kfd_process_release_work *work;
187 struct kfd_process *p;
188
189 BUG_ON(!kfd_process_wq);
190
191 p = container_of(rcu, struct kfd_process, rcu);
192 BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
193
194 mmdrop(p->mm);
195
196 work = (struct kfd_process_release_work *)
197 kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL);
198
199 if (work) {
200 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
201 work->p = p;
202 queue_work(kfd_process_wq, (struct work_struct *) work);
203 }
204}
205
206static void kfd_process_notifier_release(struct mmu_notifier *mn,
207 struct mm_struct *mm)
208{
209 struct kfd_process *p;
210
211 /*
212 * The kfd_process structure can not be free because the
213 * mmu_notifier srcu is read locked
214 */
215 p = container_of(mn, struct kfd_process, mmu_notifier);
216 BUG_ON(p->mm != mm);
217
218 mutex_lock(&kfd_processes_mutex);
219 hash_del_rcu(&p->kfd_processes);
220 mutex_unlock(&kfd_processes_mutex);
221 synchronize_srcu(&kfd_processes_srcu);
222
223 /*
224 * Because we drop mm_count inside kfd_process_destroy_delayed
225 * and because the mmu_notifier_unregister function also drop
226 * mm_count we need to take an extra count here.
227 */
228 atomic_inc(&p->mm->mm_count);
229 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
230 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
231}
232
233static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
234 .release = kfd_process_notifier_release,
235};
236
237static struct kfd_process *create_process(const struct task_struct *thread)
238{
239 struct kfd_process *process;
240 int err = -ENOMEM;
241
242 process = kzalloc(sizeof(*process), GFP_KERNEL);
243
244 if (!process)
245 goto err_alloc_process;
246
247 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
248 sizeof(process->queues[0]), GFP_KERNEL);
249 if (!process->queues)
250 goto err_alloc_queues;
251
252 process->pasid = kfd_pasid_alloc();
253 if (process->pasid == 0)
254 goto err_alloc_pasid;
255
256 mutex_init(&process->mutex);
257
258 process->mm = thread->mm;
259
260 /* register notifier */
261 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
262 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
263 if (err)
264 goto err_mmu_notifier;
265
266 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
267 (uintptr_t)process->mm);
268
269 process->lead_thread = thread->group_leader;
270
271 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
272
273 INIT_LIST_HEAD(&process->per_device_data);
274
275 return process;
276
277err_mmu_notifier:
278 kfd_pasid_free(process->pasid);
279err_alloc_pasid:
280 kfree(process->queues);
281err_alloc_queues:
282 kfree(process);
283err_alloc_process:
284 return ERR_PTR(err);
285}
286
287struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
288 struct kfd_process *p,
289 int create_pdd)
290{
291 struct kfd_process_device *pdd = NULL;
292
293 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
294 if (pdd->dev == dev)
295 return pdd;
296
297 if (create_pdd) {
298 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
299 if (pdd != NULL) {
300 pdd->dev = dev;
301 list_add(&pdd->per_device_list, &p->per_device_data);
302 }
303 }
304
305 return pdd;
306}
307
308/*
309 * Direct the IOMMU to bind the process (specifically the pasid->mm)
310 * to the device.
311 * Unbinding occurs when the process dies or the device is removed.
312 *
313 * Assumes that the process lock is held.
314 */
315struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
316 struct kfd_process *p)
317{
318 struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
319
320 if (pdd == NULL)
321 return ERR_PTR(-ENOMEM);
322
323 if (pdd->bound)
324 return pdd;
325
326 pdd->bound = true;
327
328 return pdd;
329}
330
331void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
332{
333 struct kfd_process *p;
334 struct kfd_process_device *pdd;
335 int idx, i;
336
337 BUG_ON(dev == NULL);
338
339 idx = srcu_read_lock(&kfd_processes_srcu);
340
341 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
342 if (p->pasid == pasid)
343 break;
344
345 srcu_read_unlock(&kfd_processes_srcu, idx);
346
347 BUG_ON(p->pasid != pasid);
348
349 mutex_lock(&p->mutex);
350
351 pdd = kfd_get_process_device_data(dev, p, 0);
352
353 /*
354 * Just mark pdd as unbound, because we still need it to call
355 * amd_iommu_unbind_pasid() in when the process exits.
356 * We don't call amd_iommu_unbind_pasid() here
357 * because the IOMMU called us.
358 */
359 if (pdd)
360 pdd->bound = false;
361
362 mutex_unlock(&p->mutex);
363}
364
365struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
366{
367 return list_first_entry(&p->per_device_data,
368 struct kfd_process_device,
369 per_device_list);
370}
371
372struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
373 struct kfd_process_device *pdd)
374{
375 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
376 return NULL;
377 return list_next_entry(pdd, per_device_list);
378}
379
380bool kfd_has_process_device_data(struct kfd_process *p)
381{
382 return !(list_empty(&p->per_device_data));
383}