blob: cb92d4b72400953c453f92357b82fbd6ad4ef1fc [file] [log] [blame]
Andrew Lewyckyf3a39812015-05-10 12:15:46 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mm_types.h>
24#include <linux/slab.h>
25#include <linux/types.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010026#include <linux/sched/signal.h>
Felix Kuehling9b56bb12017-10-27 19:35:19 -040027#include <linux/sched/mm.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030028#include <linux/uaccess.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030029#include <linux/mman.h>
30#include <linux/memory.h>
31#include "kfd_priv.h"
32#include "kfd_events.h"
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +030033#include <linux/device.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030034
35/*
Felix Kuehling74e40712017-10-27 19:35:25 -040036 * Wrapper around wait_queue_entry_t
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030037 */
38struct kfd_event_waiter {
Felix Kuehling74e40712017-10-27 19:35:25 -040039 wait_queue_entry_t wait;
40 struct kfd_event *event; /* Event to wait for */
41 bool activated; /* Becomes true when event is signaled */
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030042};
43
44/*
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030045 * Each signal event needs a 64-bit signal slot where the signaler will write
Felix Kuehling482f0772017-10-27 19:35:27 -040046 * a 1 before sending an interrupt. (This is needed because some interrupts
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030047 * do not contain enough spare data bits to identify an event.)
Felix Kuehling482f0772017-10-27 19:35:27 -040048 * We get whole pages and map them to the process VA.
49 * Individual signal events use their event_id as slot index.
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030050 */
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040051struct kfd_signal_page {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030052 uint64_t *kernel_address;
53 uint64_t __user *user_address;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030054};
55
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030056
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040057static uint64_t *page_slots(struct kfd_signal_page *page)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030058{
59 return page->kernel_address;
60}
61
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040062static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030063{
64 void *backing_store;
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040065 struct kfd_signal_page *page;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030066
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040067 page = kzalloc(sizeof(*page), GFP_KERNEL);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030068 if (!page)
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040069 return NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030070
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040071 backing_store = (void *) __get_free_pages(GFP_KERNEL,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030072 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
73 if (!backing_store)
74 goto fail_alloc_signal_store;
75
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040076 /* Initialize all events to unsignaled */
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030077 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040078 KFD_SIGNAL_EVENT_LIMIT * 8);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030079
80 page->kernel_address = backing_store;
Kent Russell79775b62017-08-15 23:00:05 -040081 pr_debug("Allocated new event signal page at %p, for process %p\n",
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030082 page, p);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030083
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040084 return page;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030085
86fail_alloc_signal_store:
87 kfree(page);
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040088 return NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030089}
90
Felix Kuehling482f0772017-10-27 19:35:27 -040091static int allocate_event_notification_slot(struct kfd_process *p,
92 struct kfd_event *ev)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030093{
Felix Kuehling482f0772017-10-27 19:35:27 -040094 int id;
95
Felix Kuehling50cb7dd2017-10-27 19:35:26 -040096 if (!p->signal_page) {
97 p->signal_page = allocate_signal_page(p);
98 if (!p->signal_page)
Felix Kuehling482f0772017-10-27 19:35:27 -040099 return -ENOMEM;
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400100 /* Oldest user mode expects 256 event slots */
101 p->signal_mapped_size = 256*8;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300102 }
103
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400104 /*
105 * Compatibility with old user mode: Only use signal slots
106 * user mode has mapped, may be less than
107 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
108 * of the event limit without breaking user mode.
109 */
110 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
Felix Kuehling482f0772017-10-27 19:35:27 -0400111 GFP_KERNEL);
112 if (id < 0)
113 return id;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300114
Felix Kuehling482f0772017-10-27 19:35:27 -0400115 ev->event_id = id;
116 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300117
Felix Kuehling482f0772017-10-27 19:35:27 -0400118 return 0;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300119}
120
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300121/*
122 * Assumes that p->event_mutex is held and of course that p is not going
123 * away (current or locked).
124 */
125static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
126{
Felix Kuehling482f0772017-10-27 19:35:27 -0400127 return idr_find(&p->event_idr, id);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300128}
129
Felix Kuehling3f04f962017-10-27 19:35:28 -0400130/**
131 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
132 * @p: Pointer to struct kfd_process
133 * @id: ID to look up
134 * @bits: Number of valid bits in @id
135 *
136 * Finds the first signaled event with a matching partial ID. If no
137 * matching signaled event is found, returns NULL. In that case the
138 * caller should assume that the partial ID is invalid and do an
139 * exhaustive search of all siglaned events.
140 *
141 * If multiple events with the same partial ID signal at the same
142 * time, they will be found one interrupt at a time, not necessarily
143 * in the same order the interrupts occurred. As long as the number of
144 * interrupts is correct, all signaled events will be seen by the
145 * driver.
146 */
147static struct kfd_event *lookup_signaled_event_by_partial_id(
148 struct kfd_process *p, uint32_t id, uint32_t bits)
149{
150 struct kfd_event *ev;
151
152 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
153 return NULL;
154
155 /* Fast path for the common case that @id is not a partial ID
156 * and we only need a single lookup.
157 */
158 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
159 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
160 return NULL;
161
162 return idr_find(&p->event_idr, id);
163 }
164
165 /* General case for partial IDs: Iterate over all matching IDs
166 * and find the first one that has signaled.
167 */
168 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
169 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
170 continue;
171
172 ev = idr_find(&p->event_idr, id);
173 }
174
175 return ev;
176}
177
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300178static int create_signal_event(struct file *devkfd,
179 struct kfd_process *p,
180 struct kfd_event *ev)
181{
Felix Kuehling482f0772017-10-27 19:35:27 -0400182 int ret;
183
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400184 if (p->signal_mapped_size &&
185 p->signal_event_count == p->signal_mapped_size / 8) {
Felix Kuehlingc9861692017-09-20 18:10:22 -0400186 if (!p->signal_event_limit_reached) {
187 pr_warn("Signal event wasn't created because limit was reached\n");
188 p->signal_event_limit_reached = true;
189 }
Felix Kuehling482f0772017-10-27 19:35:27 -0400190 return -ENOSPC;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300191 }
192
Felix Kuehling482f0772017-10-27 19:35:27 -0400193 ret = allocate_event_notification_slot(p, ev);
194 if (ret) {
Kent Russell79775b62017-08-15 23:00:05 -0400195 pr_warn("Signal event wasn't created because out of kernel memory\n");
Felix Kuehling482f0772017-10-27 19:35:27 -0400196 return ret;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300197 }
198
199 p->signal_event_count++;
200
Felix Kuehling482f0772017-10-27 19:35:27 -0400201 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
Kent Russell79775b62017-08-15 23:00:05 -0400202 pr_debug("Signal event number %zu created with id %d, address %p\n",
Oded Gabbay6235e152015-04-30 18:05:36 +0300203 p->signal_event_count, ev->event_id,
204 ev->user_signal_address);
205
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300206 return 0;
207}
208
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300209static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
210{
Felix Kuehling482f0772017-10-27 19:35:27 -0400211 /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
212 * intentional integer overflow to -1 without a compiler
213 * warning. idr_alloc treats a negative value as "maximum
214 * signed integer".
215 */
216 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
217 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
218 GFP_KERNEL);
219
220 if (id < 0)
221 return id;
222 ev->event_id = id;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300223
224 return 0;
225}
226
227void kfd_event_init_process(struct kfd_process *p)
228{
229 mutex_init(&p->event_mutex);
Felix Kuehling482f0772017-10-27 19:35:27 -0400230 idr_init(&p->event_idr);
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400231 p->signal_page = NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300232 p->signal_event_count = 0;
233}
234
235static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
236{
Felix Kuehling74e40712017-10-27 19:35:25 -0400237 struct kfd_event_waiter *waiter;
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400238
Felix Kuehling74e40712017-10-27 19:35:25 -0400239 /* Wake up pending waiters. They will return failure */
240 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400241 waiter->event = NULL;
Felix Kuehling74e40712017-10-27 19:35:25 -0400242 wake_up_all(&ev->wq);
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400243
Felix Kuehling482f0772017-10-27 19:35:27 -0400244 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
245 ev->type == KFD_EVENT_TYPE_DEBUG)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300246 p->signal_event_count--;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300247
Felix Kuehling482f0772017-10-27 19:35:27 -0400248 idr_remove(&p->event_idr, ev->event_id);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300249 kfree(ev);
250}
251
252static void destroy_events(struct kfd_process *p)
253{
254 struct kfd_event *ev;
Felix Kuehling482f0772017-10-27 19:35:27 -0400255 uint32_t id;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300256
Felix Kuehling482f0772017-10-27 19:35:27 -0400257 idr_for_each_entry(&p->event_idr, ev, id)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300258 destroy_event(p, ev);
Felix Kuehling482f0772017-10-27 19:35:27 -0400259 idr_destroy(&p->event_idr);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300260}
261
262/*
263 * We assume that the process is being destroyed and there is no need to
264 * unmap the pages or keep bookkeeping data in order.
265 */
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400266static void shutdown_signal_page(struct kfd_process *p)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300267{
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400268 struct kfd_signal_page *page = p->signal_page;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300269
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400270 if (page) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300271 free_pages((unsigned long)page->kernel_address,
272 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
273 kfree(page);
274 }
275}
276
277void kfd_event_free_process(struct kfd_process *p)
278{
279 destroy_events(p);
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400280 shutdown_signal_page(p);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300281}
282
283static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
284{
285 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
286 ev->type == KFD_EVENT_TYPE_DEBUG;
287}
288
289static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
290{
291 return ev->type == KFD_EVENT_TYPE_SIGNAL;
292}
293
294int kfd_event_create(struct file *devkfd, struct kfd_process *p,
295 uint32_t event_type, bool auto_reset, uint32_t node_id,
296 uint32_t *event_id, uint32_t *event_trigger_data,
297 uint64_t *event_page_offset, uint32_t *event_slot_index)
298{
299 int ret = 0;
300 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
301
302 if (!ev)
303 return -ENOMEM;
304
305 ev->type = event_type;
306 ev->auto_reset = auto_reset;
307 ev->signaled = false;
308
Felix Kuehling74e40712017-10-27 19:35:25 -0400309 init_waitqueue_head(&ev->wq);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300310
311 *event_page_offset = 0;
312
313 mutex_lock(&p->event_mutex);
314
315 switch (event_type) {
316 case KFD_EVENT_TYPE_SIGNAL:
317 case KFD_EVENT_TYPE_DEBUG:
318 ret = create_signal_event(devkfd, p, ev);
319 if (!ret) {
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400320 *event_page_offset = KFD_MMAP_EVENTS_MASK;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300321 *event_page_offset <<= PAGE_SHIFT;
Felix Kuehling482f0772017-10-27 19:35:27 -0400322 *event_slot_index = ev->event_id;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300323 }
324 break;
325 default:
326 ret = create_other_event(p, ev);
327 break;
328 }
329
330 if (!ret) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300331 *event_id = ev->event_id;
332 *event_trigger_data = ev->event_id;
333 } else {
334 kfree(ev);
335 }
336
337 mutex_unlock(&p->event_mutex);
338
339 return ret;
340}
341
342/* Assumes that p is current. */
343int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
344{
345 struct kfd_event *ev;
346 int ret = 0;
347
348 mutex_lock(&p->event_mutex);
349
350 ev = lookup_event_by_id(p, event_id);
351
352 if (ev)
353 destroy_event(p, ev);
354 else
355 ret = -EINVAL;
356
357 mutex_unlock(&p->event_mutex);
358 return ret;
359}
360
361static void set_event(struct kfd_event *ev)
362{
363 struct kfd_event_waiter *waiter;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300364
Felix Kuehling74e40712017-10-27 19:35:25 -0400365 /* Auto reset if the list is non-empty and we're waking
366 * someone. waitqueue_active is safe here because we're
367 * protected by the p->event_mutex, which is also held when
368 * updating the wait queues in kfd_wait_on_events.
369 */
370 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300371
Felix Kuehling74e40712017-10-27 19:35:25 -0400372 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300373 waiter->activated = true;
374
Felix Kuehling74e40712017-10-27 19:35:25 -0400375 wake_up_all(&ev->wq);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300376}
377
378/* Assumes that p is current. */
379int kfd_set_event(struct kfd_process *p, uint32_t event_id)
380{
381 int ret = 0;
382 struct kfd_event *ev;
383
384 mutex_lock(&p->event_mutex);
385
386 ev = lookup_event_by_id(p, event_id);
387
388 if (ev && event_can_be_cpu_signaled(ev))
389 set_event(ev);
390 else
391 ret = -EINVAL;
392
393 mutex_unlock(&p->event_mutex);
394 return ret;
395}
396
397static void reset_event(struct kfd_event *ev)
398{
399 ev->signaled = false;
400}
401
402/* Assumes that p is current. */
403int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
404{
405 int ret = 0;
406 struct kfd_event *ev;
407
408 mutex_lock(&p->event_mutex);
409
410 ev = lookup_event_by_id(p, event_id);
411
412 if (ev && event_can_be_cpu_signaled(ev))
413 reset_event(ev);
414 else
415 ret = -EINVAL;
416
417 mutex_unlock(&p->event_mutex);
418 return ret;
419
420}
421
422static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
423{
Felix Kuehling482f0772017-10-27 19:35:27 -0400424 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300425}
426
427static void set_event_from_interrupt(struct kfd_process *p,
428 struct kfd_event *ev)
429{
430 if (ev && event_can_be_gpu_signaled(ev)) {
431 acknowledge_signal(p, ev);
432 set_event(ev);
433 }
434}
435
436void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
437 uint32_t valid_id_bits)
438{
Felix Kuehling3f04f962017-10-27 19:35:28 -0400439 struct kfd_event *ev = NULL;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300440
441 /*
442 * Because we are called from arbitrary context (workqueue) as opposed
443 * to process context, kfd_process could attempt to exit while we are
444 * running so the lookup function returns a locked process.
445 */
446 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
447
448 if (!p)
449 return; /* Presumably process exited. */
450
451 mutex_lock(&p->event_mutex);
452
Felix Kuehling3f04f962017-10-27 19:35:28 -0400453 if (valid_id_bits)
454 ev = lookup_signaled_event_by_partial_id(p, partial_id,
455 valid_id_bits);
456 if (ev) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300457 set_event_from_interrupt(p, ev);
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400458 } else if (p->signal_page) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300459 /*
Felix Kuehling3f04f962017-10-27 19:35:28 -0400460 * Partial ID lookup failed. Assume that the event ID
461 * in the interrupt payload was invalid and do an
462 * exhaustive search of signaled events.
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300463 */
Felix Kuehling482f0772017-10-27 19:35:27 -0400464 uint64_t *slots = page_slots(p->signal_page);
465 uint32_t id;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300466
Felix Kuehling3f04f962017-10-27 19:35:28 -0400467 if (valid_id_bits)
468 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
469 partial_id, valid_id_bits);
470
Felix Kuehling482f0772017-10-27 19:35:27 -0400471 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
472 /* With relatively few events, it's faster to
473 * iterate over the event IDR
474 */
475 idr_for_each_entry(&p->event_idr, ev, id) {
476 if (id >= KFD_SIGNAL_EVENT_LIMIT)
477 break;
478
479 if (slots[id] != UNSIGNALED_EVENT_SLOT)
480 set_event_from_interrupt(p, ev);
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400481 }
Felix Kuehling482f0772017-10-27 19:35:27 -0400482 } else {
483 /* With relatively many events, it's faster to
484 * iterate over the signal slots and lookup
485 * only signaled events from the IDR.
486 */
487 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
488 if (slots[id] != UNSIGNALED_EVENT_SLOT) {
489 ev = lookup_event_by_id(p, id);
490 set_event_from_interrupt(p, ev);
491 }
492 }
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300493 }
494
495 mutex_unlock(&p->event_mutex);
496 mutex_unlock(&p->mutex);
497}
498
499static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
500{
501 struct kfd_event_waiter *event_waiters;
502 uint32_t i;
503
504 event_waiters = kmalloc_array(num_events,
505 sizeof(struct kfd_event_waiter),
506 GFP_KERNEL);
507
508 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
Felix Kuehling74e40712017-10-27 19:35:25 -0400509 init_wait(&event_waiters[i].wait);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300510 event_waiters[i].activated = false;
511 }
512
513 return event_waiters;
514}
515
Sean Keely1f9d09b2017-10-27 19:35:20 -0400516static int init_event_waiter_get_status(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300517 struct kfd_event_waiter *waiter,
Felix Kuehlingebf947f2017-10-27 19:35:24 -0400518 uint32_t event_id)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300519{
520 struct kfd_event *ev = lookup_event_by_id(p, event_id);
521
522 if (!ev)
523 return -EINVAL;
524
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300525 waiter->event = ev;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300526 waiter->activated = ev->signaled;
527 ev->signaled = ev->signaled && !ev->auto_reset;
528
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300529 return 0;
530}
531
Sean Keely1f9d09b2017-10-27 19:35:20 -0400532static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
533{
534 struct kfd_event *ev = waiter->event;
535
536 /* Only add to the wait list if we actually need to
537 * wait on this event.
538 */
539 if (!waiter->activated)
Felix Kuehling74e40712017-10-27 19:35:25 -0400540 add_wait_queue(&ev->wq, &waiter->wait);
Sean Keely1f9d09b2017-10-27 19:35:20 -0400541}
542
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400543/* test_event_condition - Test condition of events being waited for
544 * @all: Return completion only if all events have signaled
545 * @num_events: Number of events to wait for
546 * @event_waiters: Array of event waiters, one per event
547 *
548 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
549 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
550 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
551 * the events have been destroyed.
552 */
553static uint32_t test_event_condition(bool all, uint32_t num_events,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300554 struct kfd_event_waiter *event_waiters)
555{
556 uint32_t i;
557 uint32_t activated_count = 0;
558
559 for (i = 0; i < num_events; i++) {
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400560 if (!event_waiters[i].event)
561 return KFD_IOC_WAIT_RESULT_FAIL;
562
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300563 if (event_waiters[i].activated) {
564 if (!all)
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400565 return KFD_IOC_WAIT_RESULT_COMPLETE;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300566
567 activated_count++;
568 }
569 }
570
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400571 return activated_count == num_events ?
572 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300573}
574
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300575/*
576 * Copy event specific data, if defined.
577 * Currently only memory exception events have additional data to copy to user
578 */
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400579static int copy_signaled_event_data(uint32_t num_events,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300580 struct kfd_event_waiter *event_waiters,
581 struct kfd_event_data __user *data)
582{
583 struct kfd_hsa_memory_exception_data *src;
584 struct kfd_hsa_memory_exception_data __user *dst;
585 struct kfd_event_waiter *waiter;
586 struct kfd_event *event;
587 uint32_t i;
588
589 for (i = 0; i < num_events; i++) {
590 waiter = &event_waiters[i];
591 event = waiter->event;
592 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
Felix Kuehlingebf947f2017-10-27 19:35:24 -0400593 dst = &data[i].memory_exception_data;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300594 src = &event->memory_exception_data;
595 if (copy_to_user(dst, src,
596 sizeof(struct kfd_hsa_memory_exception_data)))
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400597 return -EFAULT;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300598 }
599 }
600
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400601 return 0;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300602
603}
604
605
606
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300607static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
608{
609 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
610 return 0;
611
612 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
613 return MAX_SCHEDULE_TIMEOUT;
614
615 /*
616 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
617 * but we consider them finite.
618 * This hack is wrong, but nobody is likely to notice.
619 */
620 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
621
622 return msecs_to_jiffies(user_timeout_ms) + 1;
623}
624
625static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
626{
627 uint32_t i;
628
629 for (i = 0; i < num_events; i++)
Felix Kuehling74e40712017-10-27 19:35:25 -0400630 if (waiters[i].event)
631 remove_wait_queue(&waiters[i].event->wq,
632 &waiters[i].wait);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300633
634 kfree(waiters);
635}
636
637int kfd_wait_on_events(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300638 uint32_t num_events, void __user *data,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300639 bool all, uint32_t user_timeout_ms,
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400640 uint32_t *wait_result)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300641{
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300642 struct kfd_event_data __user *events =
643 (struct kfd_event_data __user *) data;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300644 uint32_t i;
645 int ret = 0;
Sean Keely1f9d09b2017-10-27 19:35:20 -0400646
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300647 struct kfd_event_waiter *event_waiters = NULL;
648 long timeout = user_timeout_to_jiffies(user_timeout_ms);
649
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400650 event_waiters = alloc_event_waiters(num_events);
651 if (!event_waiters) {
652 ret = -ENOMEM;
653 goto out;
654 }
655
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300656 mutex_lock(&p->event_mutex);
657
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300658 for (i = 0; i < num_events; i++) {
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300659 struct kfd_event_data event_data;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300660
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300661 if (copy_from_user(&event_data, &events[i],
Pan Bian8bf79382016-12-01 16:10:42 +0800662 sizeof(struct kfd_event_data))) {
663 ret = -EFAULT;
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400664 goto out_unlock;
Pan Bian8bf79382016-12-01 16:10:42 +0800665 }
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300666
Sean Keely1f9d09b2017-10-27 19:35:20 -0400667 ret = init_event_waiter_get_status(p, &event_waiters[i],
Felix Kuehlingebf947f2017-10-27 19:35:24 -0400668 event_data.event_id);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300669 if (ret)
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400670 goto out_unlock;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300671 }
672
Sean Keely1f9d09b2017-10-27 19:35:20 -0400673 /* Check condition once. */
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400674 *wait_result = test_event_condition(all, num_events, event_waiters);
675 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400676 ret = copy_signaled_event_data(num_events,
677 event_waiters, events);
678 goto out_unlock;
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400679 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
680 /* This should not happen. Events shouldn't be
681 * destroyed while we're holding the event_mutex
682 */
683 goto out_unlock;
Sean Keely1f9d09b2017-10-27 19:35:20 -0400684 }
685
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400686 /* Add to wait lists if we need to wait. */
687 for (i = 0; i < num_events; i++)
688 init_event_waiter_add_to_waitlist(&event_waiters[i]);
689
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300690 mutex_unlock(&p->event_mutex);
691
692 while (true) {
693 if (fatal_signal_pending(current)) {
694 ret = -EINTR;
695 break;
696 }
697
698 if (signal_pending(current)) {
699 /*
700 * This is wrong when a nonzero, non-infinite timeout
701 * is specified. We need to use
702 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
703 * contains a union with data for each user and it's
704 * in generic kernel code that I don't want to
705 * touch yet.
706 */
707 ret = -ERESTARTSYS;
708 break;
709 }
710
Sean Keelyd9aeec42017-10-27 19:35:21 -0400711 /* Set task state to interruptible sleep before
712 * checking wake-up conditions. A concurrent wake-up
713 * will put the task back into runnable state. In that
714 * case schedule_timeout will not put the task to
715 * sleep and we'll get a chance to re-check the
716 * updated conditions almost immediately. Otherwise,
717 * this race condition would lead to a soft hang or a
718 * very long sleep.
719 */
720 set_current_state(TASK_INTERRUPTIBLE);
721
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400722 *wait_result = test_event_condition(all, num_events,
723 event_waiters);
724 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300725 break;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300726
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400727 if (timeout <= 0)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300728 break;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300729
Sean Keelyd9aeec42017-10-27 19:35:21 -0400730 timeout = schedule_timeout(timeout);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300731 }
732 __set_current_state(TASK_RUNNING);
733
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400734 /* copy_signaled_event_data may sleep. So this has to happen
735 * after the task state is set back to RUNNING.
736 */
737 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
738 ret = copy_signaled_event_data(num_events,
739 event_waiters, events);
740
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300741 mutex_lock(&p->event_mutex);
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400742out_unlock:
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300743 free_waiters(num_events, event_waiters);
744 mutex_unlock(&p->event_mutex);
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400745out:
746 if (ret)
747 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400748 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
749 ret = -EIO;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300750
751 return ret;
752}
753
754int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
755{
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300756 unsigned long pfn;
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400757 struct kfd_signal_page *page;
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400758 int ret;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300759
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400760 /* check required size doesn't exceed the allocated size */
761 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300762 get_order(vma->vm_end - vma->vm_start)) {
Kent Russell79775b62017-08-15 23:00:05 -0400763 pr_err("Event page mmap requested illegal size\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300764 return -EINVAL;
765 }
766
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400767 page = p->signal_page;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300768 if (!page) {
769 /* Probably KFD bug, but mmap is user-accessible. */
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400770 pr_debug("Signal page could not be found\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300771 return -EINVAL;
772 }
773
774 pfn = __pa(page->kernel_address);
775 pfn >>= PAGE_SHIFT;
776
777 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
778 | VM_DONTDUMP | VM_PFNMAP;
779
Kent Russell79775b62017-08-15 23:00:05 -0400780 pr_debug("Mapping signal page\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300781 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
782 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
783 pr_debug(" pfn == 0x%016lX\n", pfn);
784 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
785 pr_debug(" size == 0x%08lX\n",
786 vma->vm_end - vma->vm_start);
787
788 page->user_address = (uint64_t __user *)vma->vm_start;
789
790 /* mapping the page to user process */
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400791 ret = remap_pfn_range(vma, vma->vm_start, pfn,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300792 vma->vm_end - vma->vm_start, vma->vm_page_prot);
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400793 if (!ret)
794 p->signal_mapped_size = vma->vm_end - vma->vm_start;
795
796 return ret;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300797}
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300798
799/*
800 * Assumes that p->event_mutex is held and of course
801 * that p is not going away (current or locked).
802 */
803static void lookup_events_by_type_and_signal(struct kfd_process *p,
804 int type, void *event_data)
805{
806 struct kfd_hsa_memory_exception_data *ev_data;
807 struct kfd_event *ev;
Felix Kuehling482f0772017-10-27 19:35:27 -0400808 uint32_t id;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300809 bool send_signal = true;
810
811 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
812
Felix Kuehling482f0772017-10-27 19:35:27 -0400813 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
814 idr_for_each_entry_continue(&p->event_idr, ev, id)
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300815 if (ev->type == type) {
816 send_signal = false;
817 dev_dbg(kfd_device,
818 "Event found: id %X type %d",
819 ev->event_id, ev->type);
820 set_event(ev);
821 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
822 ev->memory_exception_data = *ev_data;
823 }
824
825 /* Send SIGTERM no event of type "type" has been found*/
826 if (send_signal) {
Oded Gabbay81663012014-12-24 13:30:52 +0200827 if (send_sigterm) {
828 dev_warn(kfd_device,
829 "Sending SIGTERM to HSA Process with PID %d ",
830 p->lead_thread->pid);
831 send_sig(SIGTERM, p->lead_thread, 0);
832 } else {
833 dev_err(kfd_device,
834 "HSA Process (PID %d) got unhandled exception",
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300835 p->lead_thread->pid);
Oded Gabbay81663012014-12-24 13:30:52 +0200836 }
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300837 }
838}
839
840void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
841 unsigned long address, bool is_write_requested,
842 bool is_execute_requested)
843{
844 struct kfd_hsa_memory_exception_data memory_exception_data;
845 struct vm_area_struct *vma;
846
847 /*
848 * Because we are called from arbitrary context (workqueue) as opposed
849 * to process context, kfd_process could attempt to exit while we are
850 * running so the lookup function returns a locked process.
851 */
852 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400853 struct mm_struct *mm;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300854
855 if (!p)
856 return; /* Presumably process exited. */
857
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400858 /* Take a safe reference to the mm_struct, which may otherwise
859 * disappear even while the kfd_process is still referenced.
860 */
861 mm = get_task_mm(p->lead_thread);
862 if (!mm) {
863 mutex_unlock(&p->mutex);
864 return; /* Process is exiting */
865 }
866
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300867 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
868
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400869 down_read(&mm->mmap_sem);
870 vma = find_vma(mm, address);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300871
872 memory_exception_data.gpu_id = dev->id;
873 memory_exception_data.va = address;
874 /* Set failure reason */
875 memory_exception_data.failure.NotPresent = 1;
876 memory_exception_data.failure.NoExecute = 0;
877 memory_exception_data.failure.ReadOnly = 0;
878 if (vma) {
879 if (vma->vm_start > address) {
880 memory_exception_data.failure.NotPresent = 1;
881 memory_exception_data.failure.NoExecute = 0;
882 memory_exception_data.failure.ReadOnly = 0;
883 } else {
884 memory_exception_data.failure.NotPresent = 0;
885 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
886 memory_exception_data.failure.ReadOnly = 1;
887 else
888 memory_exception_data.failure.ReadOnly = 0;
889 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
890 memory_exception_data.failure.NoExecute = 1;
891 else
892 memory_exception_data.failure.NoExecute = 0;
893 }
894 }
895
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400896 up_read(&mm->mmap_sem);
897 mmput(mm);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300898
899 mutex_lock(&p->event_mutex);
900
901 /* Lookup events by type and signal them */
902 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
903 &memory_exception_data);
904
905 mutex_unlock(&p->event_mutex);
906 mutex_unlock(&p->mutex);
907}
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200908
909void kfd_signal_hw_exception_event(unsigned int pasid)
910{
911 /*
912 * Because we are called from arbitrary context (workqueue) as opposed
913 * to process context, kfd_process could attempt to exit while we are
914 * running so the lookup function returns a locked process.
915 */
916 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
917
918 if (!p)
919 return; /* Presumably process exited. */
920
921 mutex_lock(&p->event_mutex);
922
923 /* Lookup events by type and signal them */
924 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
925
926 mutex_unlock(&p->event_mutex);
927 mutex_unlock(&p->mutex);
928}