blob: 2f0fe124ce08bb90b555f572264eeb6bffc77894 [file] [log] [blame]
Andrew Lewyckyf3a39812015-05-10 12:15:46 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mm_types.h>
24#include <linux/slab.h>
25#include <linux/types.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010026#include <linux/sched/signal.h>
Felix Kuehling9b56bb12017-10-27 19:35:19 -040027#include <linux/sched/mm.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030028#include <linux/uaccess.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030029#include <linux/mman.h>
30#include <linux/memory.h>
31#include "kfd_priv.h"
32#include "kfd_events.h"
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +030033#include <linux/device.h>
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030034
35/*
36 * A task can only be on a single wait_queue at a time, but we need to support
37 * waiting on multiple events (any/all).
38 * Instead of each event simply having a wait_queue with sleeping tasks, it
39 * has a singly-linked list of tasks.
40 * A thread that wants to sleep creates an array of these, one for each event
41 * and adds one to each event's waiter chain.
42 */
43struct kfd_event_waiter {
44 struct list_head waiters;
45 struct task_struct *sleeping_task;
46
47 /* Transitions to true when the event this belongs to is signaled. */
48 bool activated;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +030049
50 /* Event */
51 struct kfd_event *event;
52 uint32_t input_index;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030053};
54
55/*
56 * Over-complicated pooled allocator for event notification slots.
57 *
58 * Each signal event needs a 64-bit signal slot where the signaler will write
59 * a 1 before sending an interrupt.l (This is needed because some interrupts
60 * do not contain enough spare data bits to identify an event.)
61 * We get whole pages from vmalloc and map them to the process VA.
62 * Individual signal events are then allocated a slot in a page.
63 */
64
65struct signal_page {
66 struct list_head event_pages; /* kfd_process.signal_event_pages */
67 uint64_t *kernel_address;
68 uint64_t __user *user_address;
69 uint32_t page_index; /* Index into the mmap aperture. */
70 unsigned int free_slots;
71 unsigned long used_slot_bitmap[0];
72};
73
74#define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
75#define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
76#define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
77#define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
78 SLOT_BITMAP_SIZE * sizeof(long))
79
80/*
81 * For signal events, the event ID is used as the interrupt user data.
82 * For SQ s_sendmsg interrupts, this is limited to 8 bits.
83 */
84
85#define INTERRUPT_DATA_BITS 8
86#define SIGNAL_EVENT_ID_SLOT_SHIFT 0
87
88static uint64_t *page_slots(struct signal_page *page)
89{
90 return page->kernel_address;
91}
92
93static bool allocate_free_slot(struct kfd_process *process,
94 struct signal_page **out_page,
95 unsigned int *out_slot_index)
96{
97 struct signal_page *page;
98
99 list_for_each_entry(page, &process->signal_event_pages, event_pages) {
100 if (page->free_slots > 0) {
101 unsigned int slot =
102 find_first_zero_bit(page->used_slot_bitmap,
103 SLOTS_PER_PAGE);
104
105 __set_bit(slot, page->used_slot_bitmap);
106 page->free_slots--;
107
108 page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
109
110 *out_page = page;
111 *out_slot_index = slot;
112
Kent Russell79775b62017-08-15 23:00:05 -0400113 pr_debug("Allocated event signal slot in page %p, slot %d\n",
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300114 page, slot);
115
116 return true;
117 }
118 }
119
120 pr_debug("No free event signal slots were found for process %p\n",
121 process);
122
123 return false;
124}
125
126#define list_tail_entry(head, type, member) \
127 list_entry((head)->prev, type, member)
128
129static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
130{
131 void *backing_store;
132 struct signal_page *page;
133
134 page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
135 if (!page)
136 goto fail_alloc_signal_page;
137
138 page->free_slots = SLOTS_PER_PAGE;
139
140 backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
141 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
142 if (!backing_store)
143 goto fail_alloc_signal_store;
144
145 /* prevent user-mode info leaks */
146 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
147 KFD_SIGNAL_EVENT_LIMIT * 8);
148
149 page->kernel_address = backing_store;
150
151 if (list_empty(&p->signal_event_pages))
152 page->page_index = 0;
153 else
154 page->page_index = list_tail_entry(&p->signal_event_pages,
155 struct signal_page,
156 event_pages)->page_index + 1;
157
Kent Russell79775b62017-08-15 23:00:05 -0400158 pr_debug("Allocated new event signal page at %p, for process %p\n",
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300159 page, p);
Kent Russell79775b62017-08-15 23:00:05 -0400160 pr_debug("Page index is %d\n", page->page_index);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300161
162 list_add(&page->event_pages, &p->signal_event_pages);
163
164 return true;
165
166fail_alloc_signal_store:
167 kfree(page);
168fail_alloc_signal_page:
169 return false;
170}
171
172static bool allocate_event_notification_slot(struct file *devkfd,
173 struct kfd_process *p,
174 struct signal_page **page,
175 unsigned int *signal_slot_index)
176{
177 bool ret;
178
179 ret = allocate_free_slot(p, page, signal_slot_index);
Edward O'Callaghan991ca8e2016-05-01 00:06:27 +1000180 if (!ret) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300181 ret = allocate_signal_page(devkfd, p);
Edward O'Callaghan991ca8e2016-05-01 00:06:27 +1000182 if (ret)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300183 ret = allocate_free_slot(p, page, signal_slot_index);
184 }
185
186 return ret;
187}
188
189/* Assumes that the process's event_mutex is locked. */
190static void release_event_notification_slot(struct signal_page *page,
191 size_t slot_index)
192{
193 __clear_bit(slot_index, page->used_slot_bitmap);
194 page->free_slots++;
195
196 /* We don't free signal pages, they are retained by the process
Kent Russell8eabaf52017-08-15 23:00:04 -0400197 * and reused until it exits.
198 */
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300199}
200
201static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
202 unsigned int page_index)
203{
204 struct signal_page *page;
205
206 /*
207 * This is safe because we don't delete signal pages until the
208 * process exits.
209 */
210 list_for_each_entry(page, &p->signal_event_pages, event_pages)
211 if (page->page_index == page_index)
212 return page;
213
214 return NULL;
215}
216
217/*
218 * Assumes that p->event_mutex is held and of course that p is not going
219 * away (current or locked).
220 */
221static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
222{
223 struct kfd_event *ev;
224
225 hash_for_each_possible(p->events, ev, events, id)
226 if (ev->event_id == id)
227 return ev;
228
229 return NULL;
230}
231
232static u32 make_signal_event_id(struct signal_page *page,
233 unsigned int signal_slot_index)
234{
235 return page->page_index |
236 (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
237}
238
239/*
240 * Produce a kfd event id for a nonsignal event.
241 * These are arbitrary numbers, so we do a sequential search through
242 * the hash table for an unused number.
243 */
244static u32 make_nonsignal_event_id(struct kfd_process *p)
245{
246 u32 id;
247
248 for (id = p->next_nonsignal_event_id;
249 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
Kent Russell4eacc26b2017-08-15 23:00:06 -0400250 lookup_event_by_id(p, id);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300251 id++)
252 ;
253
254 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
255
256 /*
257 * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
258 * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
259 * the first loop fails immediately and we proceed with the
260 * wraparound loop below.
261 */
262 p->next_nonsignal_event_id = id + 1;
263
264 return id;
265 }
266
267 for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
268 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
Kent Russell4eacc26b2017-08-15 23:00:06 -0400269 lookup_event_by_id(p, id);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300270 id++)
271 ;
272
273
274 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
275 p->next_nonsignal_event_id = id + 1;
276 return id;
277 }
278
279 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
280 return 0;
281}
282
283static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
284 struct signal_page *page,
285 unsigned int signal_slot)
286{
287 return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
288}
289
290static int create_signal_event(struct file *devkfd,
291 struct kfd_process *p,
292 struct kfd_event *ev)
293{
294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
Felix Kuehlingc9861692017-09-20 18:10:22 -0400295 if (!p->signal_event_limit_reached) {
296 pr_warn("Signal event wasn't created because limit was reached\n");
297 p->signal_event_limit_reached = true;
298 }
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300299 return -ENOMEM;
300 }
301
302 if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
303 &ev->signal_slot_index)) {
Kent Russell79775b62017-08-15 23:00:05 -0400304 pr_warn("Signal event wasn't created because out of kernel memory\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300305 return -ENOMEM;
306 }
307
308 p->signal_event_count++;
309
310 ev->user_signal_address =
311 &ev->signal_page->user_address[ev->signal_slot_index];
312
313 ev->event_id = make_signal_event_id(ev->signal_page,
314 ev->signal_slot_index);
315
Kent Russell79775b62017-08-15 23:00:05 -0400316 pr_debug("Signal event number %zu created with id %d, address %p\n",
Oded Gabbay6235e152015-04-30 18:05:36 +0300317 p->signal_event_count, ev->event_id,
318 ev->user_signal_address);
319
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300320 return 0;
321}
322
323/*
324 * No non-signal events are supported yet.
325 * We create them as events that never signal.
326 * Set event calls from user-mode are failed.
327 */
328static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
329{
330 ev->event_id = make_nonsignal_event_id(p);
331 if (ev->event_id == 0)
332 return -ENOMEM;
333
334 return 0;
335}
336
337void kfd_event_init_process(struct kfd_process *p)
338{
339 mutex_init(&p->event_mutex);
340 hash_init(p->events);
341 INIT_LIST_HEAD(&p->signal_event_pages);
342 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
343 p->signal_event_count = 0;
344}
345
346static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
347{
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400348 /* Wake up pending waiters. They will return failure */
349 while (!list_empty(&ev->waiters)) {
350 struct kfd_event_waiter *waiter =
351 list_first_entry(&ev->waiters, struct kfd_event_waiter,
352 waiters);
353
354 waiter->event = NULL;
355 /* _init because free_waiters will call list_del */
356 list_del_init(&waiter->waiters);
357 wake_up_process(waiter->sleeping_task);
358 }
359
Kent Russell4eacc26b2017-08-15 23:00:06 -0400360 if (ev->signal_page) {
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300361 release_event_notification_slot(ev->signal_page,
362 ev->signal_slot_index);
363 p->signal_event_count--;
364 }
365
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300366 hash_del(&ev->events);
367 kfree(ev);
368}
369
370static void destroy_events(struct kfd_process *p)
371{
372 struct kfd_event *ev;
373 struct hlist_node *tmp;
374 unsigned int hash_bkt;
375
376 hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
377 destroy_event(p, ev);
378}
379
380/*
381 * We assume that the process is being destroyed and there is no need to
382 * unmap the pages or keep bookkeeping data in order.
383 */
384static void shutdown_signal_pages(struct kfd_process *p)
385{
386 struct signal_page *page, *tmp;
387
388 list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
389 event_pages) {
390 free_pages((unsigned long)page->kernel_address,
391 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
392 kfree(page);
393 }
394}
395
396void kfd_event_free_process(struct kfd_process *p)
397{
398 destroy_events(p);
399 shutdown_signal_pages(p);
400}
401
402static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
403{
404 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
405 ev->type == KFD_EVENT_TYPE_DEBUG;
406}
407
408static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
409{
410 return ev->type == KFD_EVENT_TYPE_SIGNAL;
411}
412
413int kfd_event_create(struct file *devkfd, struct kfd_process *p,
414 uint32_t event_type, bool auto_reset, uint32_t node_id,
415 uint32_t *event_id, uint32_t *event_trigger_data,
416 uint64_t *event_page_offset, uint32_t *event_slot_index)
417{
418 int ret = 0;
419 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
420
421 if (!ev)
422 return -ENOMEM;
423
424 ev->type = event_type;
425 ev->auto_reset = auto_reset;
426 ev->signaled = false;
427
428 INIT_LIST_HEAD(&ev->waiters);
429
430 *event_page_offset = 0;
431
432 mutex_lock(&p->event_mutex);
433
434 switch (event_type) {
435 case KFD_EVENT_TYPE_SIGNAL:
436 case KFD_EVENT_TYPE_DEBUG:
437 ret = create_signal_event(devkfd, p, ev);
438 if (!ret) {
439 *event_page_offset = (ev->signal_page->page_index |
440 KFD_MMAP_EVENTS_MASK);
441 *event_page_offset <<= PAGE_SHIFT;
442 *event_slot_index = ev->signal_slot_index;
443 }
444 break;
445 default:
446 ret = create_other_event(p, ev);
447 break;
448 }
449
450 if (!ret) {
451 hash_add(p->events, &ev->events, ev->event_id);
452
453 *event_id = ev->event_id;
454 *event_trigger_data = ev->event_id;
455 } else {
456 kfree(ev);
457 }
458
459 mutex_unlock(&p->event_mutex);
460
461 return ret;
462}
463
464/* Assumes that p is current. */
465int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
466{
467 struct kfd_event *ev;
468 int ret = 0;
469
470 mutex_lock(&p->event_mutex);
471
472 ev = lookup_event_by_id(p, event_id);
473
474 if (ev)
475 destroy_event(p, ev);
476 else
477 ret = -EINVAL;
478
479 mutex_unlock(&p->event_mutex);
480 return ret;
481}
482
483static void set_event(struct kfd_event *ev)
484{
485 struct kfd_event_waiter *waiter;
486 struct kfd_event_waiter *next;
487
488 /* Auto reset if the list is non-empty and we're waking someone. */
489 ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
490
491 list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
492 waiter->activated = true;
493
494 /* _init because free_waiters will call list_del */
495 list_del_init(&waiter->waiters);
496
497 wake_up_process(waiter->sleeping_task);
498 }
499}
500
501/* Assumes that p is current. */
502int kfd_set_event(struct kfd_process *p, uint32_t event_id)
503{
504 int ret = 0;
505 struct kfd_event *ev;
506
507 mutex_lock(&p->event_mutex);
508
509 ev = lookup_event_by_id(p, event_id);
510
511 if (ev && event_can_be_cpu_signaled(ev))
512 set_event(ev);
513 else
514 ret = -EINVAL;
515
516 mutex_unlock(&p->event_mutex);
517 return ret;
518}
519
520static void reset_event(struct kfd_event *ev)
521{
522 ev->signaled = false;
523}
524
525/* Assumes that p is current. */
526int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
527{
528 int ret = 0;
529 struct kfd_event *ev;
530
531 mutex_lock(&p->event_mutex);
532
533 ev = lookup_event_by_id(p, event_id);
534
535 if (ev && event_can_be_cpu_signaled(ev))
536 reset_event(ev);
537 else
538 ret = -EINVAL;
539
540 mutex_unlock(&p->event_mutex);
541 return ret;
542
543}
544
545static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
546{
547 page_slots(ev->signal_page)[ev->signal_slot_index] =
548 UNSIGNALED_EVENT_SLOT;
549}
550
551static bool is_slot_signaled(struct signal_page *page, unsigned int index)
552{
553 return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
554}
555
556static void set_event_from_interrupt(struct kfd_process *p,
557 struct kfd_event *ev)
558{
559 if (ev && event_can_be_gpu_signaled(ev)) {
560 acknowledge_signal(p, ev);
561 set_event(ev);
562 }
563}
564
565void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
566 uint32_t valid_id_bits)
567{
568 struct kfd_event *ev;
569
570 /*
571 * Because we are called from arbitrary context (workqueue) as opposed
572 * to process context, kfd_process could attempt to exit while we are
573 * running so the lookup function returns a locked process.
574 */
575 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
576
577 if (!p)
578 return; /* Presumably process exited. */
579
580 mutex_lock(&p->event_mutex);
581
582 if (valid_id_bits >= INTERRUPT_DATA_BITS) {
583 /* Partial ID is a full ID. */
584 ev = lookup_event_by_id(p, partial_id);
585 set_event_from_interrupt(p, ev);
586 } else {
587 /*
588 * Partial ID is in fact partial. For now we completely
589 * ignore it, but we could use any bits we did receive to
590 * search faster.
591 */
592 struct signal_page *page;
Kent Russell8eabaf52017-08-15 23:00:04 -0400593 unsigned int i;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300594
595 list_for_each_entry(page, &p->signal_event_pages, event_pages)
596 for (i = 0; i < SLOTS_PER_PAGE; i++)
597 if (is_slot_signaled(page, i)) {
598 ev = lookup_event_by_page_slot(p,
599 page, i);
600 set_event_from_interrupt(p, ev);
601 }
602 }
603
604 mutex_unlock(&p->event_mutex);
605 mutex_unlock(&p->mutex);
606}
607
608static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
609{
610 struct kfd_event_waiter *event_waiters;
611 uint32_t i;
612
613 event_waiters = kmalloc_array(num_events,
614 sizeof(struct kfd_event_waiter),
615 GFP_KERNEL);
616
617 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
618 INIT_LIST_HEAD(&event_waiters[i].waiters);
619 event_waiters[i].sleeping_task = current;
620 event_waiters[i].activated = false;
621 }
622
623 return event_waiters;
624}
625
Sean Keely1f9d09b2017-10-27 19:35:20 -0400626static int init_event_waiter_get_status(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300627 struct kfd_event_waiter *waiter,
628 uint32_t event_id,
629 uint32_t input_index)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300630{
631 struct kfd_event *ev = lookup_event_by_id(p, event_id);
632
633 if (!ev)
634 return -EINVAL;
635
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300636 waiter->event = ev;
637 waiter->input_index = input_index;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300638 waiter->activated = ev->signaled;
639 ev->signaled = ev->signaled && !ev->auto_reset;
640
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300641 return 0;
642}
643
Sean Keely1f9d09b2017-10-27 19:35:20 -0400644static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
645{
646 struct kfd_event *ev = waiter->event;
647
648 /* Only add to the wait list if we actually need to
649 * wait on this event.
650 */
651 if (!waiter->activated)
652 list_add(&waiter->waiters, &ev->waiters);
653}
654
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400655/* test_event_condition - Test condition of events being waited for
656 * @all: Return completion only if all events have signaled
657 * @num_events: Number of events to wait for
658 * @event_waiters: Array of event waiters, one per event
659 *
660 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
661 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
662 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
663 * the events have been destroyed.
664 */
665static uint32_t test_event_condition(bool all, uint32_t num_events,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300666 struct kfd_event_waiter *event_waiters)
667{
668 uint32_t i;
669 uint32_t activated_count = 0;
670
671 for (i = 0; i < num_events; i++) {
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400672 if (!event_waiters[i].event)
673 return KFD_IOC_WAIT_RESULT_FAIL;
674
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300675 if (event_waiters[i].activated) {
676 if (!all)
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400677 return KFD_IOC_WAIT_RESULT_COMPLETE;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300678
679 activated_count++;
680 }
681 }
682
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400683 return activated_count == num_events ?
684 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300685}
686
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300687/*
688 * Copy event specific data, if defined.
689 * Currently only memory exception events have additional data to copy to user
690 */
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400691static int copy_signaled_event_data(uint32_t num_events,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300692 struct kfd_event_waiter *event_waiters,
693 struct kfd_event_data __user *data)
694{
695 struct kfd_hsa_memory_exception_data *src;
696 struct kfd_hsa_memory_exception_data __user *dst;
697 struct kfd_event_waiter *waiter;
698 struct kfd_event *event;
699 uint32_t i;
700
701 for (i = 0; i < num_events; i++) {
702 waiter = &event_waiters[i];
703 event = waiter->event;
704 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
705 dst = &data[waiter->input_index].memory_exception_data;
706 src = &event->memory_exception_data;
707 if (copy_to_user(dst, src,
708 sizeof(struct kfd_hsa_memory_exception_data)))
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400709 return -EFAULT;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300710 }
711 }
712
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400713 return 0;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300714
715}
716
717
718
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300719static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
720{
721 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
722 return 0;
723
724 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
725 return MAX_SCHEDULE_TIMEOUT;
726
727 /*
728 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
729 * but we consider them finite.
730 * This hack is wrong, but nobody is likely to notice.
731 */
732 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
733
734 return msecs_to_jiffies(user_timeout_ms) + 1;
735}
736
737static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
738{
739 uint32_t i;
740
741 for (i = 0; i < num_events; i++)
742 list_del(&waiters[i].waiters);
743
744 kfree(waiters);
745}
746
747int kfd_wait_on_events(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300748 uint32_t num_events, void __user *data,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300749 bool all, uint32_t user_timeout_ms,
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400750 uint32_t *wait_result)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300751{
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300752 struct kfd_event_data __user *events =
753 (struct kfd_event_data __user *) data;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300754 uint32_t i;
755 int ret = 0;
Sean Keely1f9d09b2017-10-27 19:35:20 -0400756
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300757 struct kfd_event_waiter *event_waiters = NULL;
758 long timeout = user_timeout_to_jiffies(user_timeout_ms);
759
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400760 event_waiters = alloc_event_waiters(num_events);
761 if (!event_waiters) {
762 ret = -ENOMEM;
763 goto out;
764 }
765
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300766 mutex_lock(&p->event_mutex);
767
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300768 for (i = 0; i < num_events; i++) {
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300769 struct kfd_event_data event_data;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300770
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300771 if (copy_from_user(&event_data, &events[i],
Pan Bian8bf79382016-12-01 16:10:42 +0800772 sizeof(struct kfd_event_data))) {
773 ret = -EFAULT;
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400774 goto out_unlock;
Pan Bian8bf79382016-12-01 16:10:42 +0800775 }
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300776
Sean Keely1f9d09b2017-10-27 19:35:20 -0400777 ret = init_event_waiter_get_status(p, &event_waiters[i],
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300778 event_data.event_id, i);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300779 if (ret)
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400780 goto out_unlock;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300781 }
782
Sean Keely1f9d09b2017-10-27 19:35:20 -0400783 /* Check condition once. */
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400784 *wait_result = test_event_condition(all, num_events, event_waiters);
785 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400786 ret = copy_signaled_event_data(num_events,
787 event_waiters, events);
788 goto out_unlock;
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400789 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
790 /* This should not happen. Events shouldn't be
791 * destroyed while we're holding the event_mutex
792 */
793 goto out_unlock;
Sean Keely1f9d09b2017-10-27 19:35:20 -0400794 }
795
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400796 /* Add to wait lists if we need to wait. */
797 for (i = 0; i < num_events; i++)
798 init_event_waiter_add_to_waitlist(&event_waiters[i]);
799
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300800 mutex_unlock(&p->event_mutex);
801
802 while (true) {
803 if (fatal_signal_pending(current)) {
804 ret = -EINTR;
805 break;
806 }
807
808 if (signal_pending(current)) {
809 /*
810 * This is wrong when a nonzero, non-infinite timeout
811 * is specified. We need to use
812 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
813 * contains a union with data for each user and it's
814 * in generic kernel code that I don't want to
815 * touch yet.
816 */
817 ret = -ERESTARTSYS;
818 break;
819 }
820
Sean Keelyd9aeec42017-10-27 19:35:21 -0400821 /* Set task state to interruptible sleep before
822 * checking wake-up conditions. A concurrent wake-up
823 * will put the task back into runnable state. In that
824 * case schedule_timeout will not put the task to
825 * sleep and we'll get a chance to re-check the
826 * updated conditions almost immediately. Otherwise,
827 * this race condition would lead to a soft hang or a
828 * very long sleep.
829 */
830 set_current_state(TASK_INTERRUPTIBLE);
831
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400832 *wait_result = test_event_condition(all, num_events,
833 event_waiters);
834 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300835 break;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300836
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400837 if (timeout <= 0)
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300838 break;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300839
Sean Keelyd9aeec42017-10-27 19:35:21 -0400840 timeout = schedule_timeout(timeout);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300841 }
842 __set_current_state(TASK_RUNNING);
843
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400844 /* copy_signaled_event_data may sleep. So this has to happen
845 * after the task state is set back to RUNNING.
846 */
847 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
848 ret = copy_signaled_event_data(num_events,
849 event_waiters, events);
850
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300851 mutex_lock(&p->event_mutex);
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400852out_unlock:
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300853 free_waiters(num_events, event_waiters);
854 mutex_unlock(&p->event_mutex);
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400855out:
856 if (ret)
857 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
Felix Kuehlingfe528c12017-10-27 19:35:23 -0400858 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
859 ret = -EIO;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300860
861 return ret;
862}
863
864int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
865{
866
867 unsigned int page_index;
868 unsigned long pfn;
869 struct signal_page *page;
870
871 /* check required size is logical */
872 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
873 get_order(vma->vm_end - vma->vm_start)) {
Kent Russell79775b62017-08-15 23:00:05 -0400874 pr_err("Event page mmap requested illegal size\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300875 return -EINVAL;
876 }
877
878 page_index = vma->vm_pgoff;
879
880 page = lookup_signal_page_by_index(p, page_index);
881 if (!page) {
882 /* Probably KFD bug, but mmap is user-accessible. */
Kent Russell79775b62017-08-15 23:00:05 -0400883 pr_debug("Signal page could not be found for page_index %u\n",
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300884 page_index);
885 return -EINVAL;
886 }
887
888 pfn = __pa(page->kernel_address);
889 pfn >>= PAGE_SHIFT;
890
891 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
892 | VM_DONTDUMP | VM_PFNMAP;
893
Kent Russell79775b62017-08-15 23:00:05 -0400894 pr_debug("Mapping signal page\n");
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300895 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
896 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
897 pr_debug(" pfn == 0x%016lX\n", pfn);
898 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
899 pr_debug(" size == 0x%08lX\n",
900 vma->vm_end - vma->vm_start);
901
902 page->user_address = (uint64_t __user *)vma->vm_start;
903
904 /* mapping the page to user process */
905 return remap_pfn_range(vma, vma->vm_start, pfn,
906 vma->vm_end - vma->vm_start, vma->vm_page_prot);
907}
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300908
909/*
910 * Assumes that p->event_mutex is held and of course
911 * that p is not going away (current or locked).
912 */
913static void lookup_events_by_type_and_signal(struct kfd_process *p,
914 int type, void *event_data)
915{
916 struct kfd_hsa_memory_exception_data *ev_data;
917 struct kfd_event *ev;
918 int bkt;
919 bool send_signal = true;
920
921 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
922
923 hash_for_each(p->events, bkt, ev, events)
924 if (ev->type == type) {
925 send_signal = false;
926 dev_dbg(kfd_device,
927 "Event found: id %X type %d",
928 ev->event_id, ev->type);
929 set_event(ev);
930 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
931 ev->memory_exception_data = *ev_data;
932 }
933
934 /* Send SIGTERM no event of type "type" has been found*/
935 if (send_signal) {
Oded Gabbay81663012014-12-24 13:30:52 +0200936 if (send_sigterm) {
937 dev_warn(kfd_device,
938 "Sending SIGTERM to HSA Process with PID %d ",
939 p->lead_thread->pid);
940 send_sig(SIGTERM, p->lead_thread, 0);
941 } else {
942 dev_err(kfd_device,
943 "HSA Process (PID %d) got unhandled exception",
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300944 p->lead_thread->pid);
Oded Gabbay81663012014-12-24 13:30:52 +0200945 }
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300946 }
947}
948
949void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
950 unsigned long address, bool is_write_requested,
951 bool is_execute_requested)
952{
953 struct kfd_hsa_memory_exception_data memory_exception_data;
954 struct vm_area_struct *vma;
955
956 /*
957 * Because we are called from arbitrary context (workqueue) as opposed
958 * to process context, kfd_process could attempt to exit while we are
959 * running so the lookup function returns a locked process.
960 */
961 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400962 struct mm_struct *mm;
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300963
964 if (!p)
965 return; /* Presumably process exited. */
966
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400967 /* Take a safe reference to the mm_struct, which may otherwise
968 * disappear even while the kfd_process is still referenced.
969 */
970 mm = get_task_mm(p->lead_thread);
971 if (!mm) {
972 mutex_unlock(&p->mutex);
973 return; /* Process is exiting */
974 }
975
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300976 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
977
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400978 down_read(&mm->mmap_sem);
979 vma = find_vma(mm, address);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300980
981 memory_exception_data.gpu_id = dev->id;
982 memory_exception_data.va = address;
983 /* Set failure reason */
984 memory_exception_data.failure.NotPresent = 1;
985 memory_exception_data.failure.NoExecute = 0;
986 memory_exception_data.failure.ReadOnly = 0;
987 if (vma) {
988 if (vma->vm_start > address) {
989 memory_exception_data.failure.NotPresent = 1;
990 memory_exception_data.failure.NoExecute = 0;
991 memory_exception_data.failure.ReadOnly = 0;
992 } else {
993 memory_exception_data.failure.NotPresent = 0;
994 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
995 memory_exception_data.failure.ReadOnly = 1;
996 else
997 memory_exception_data.failure.ReadOnly = 0;
998 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
999 memory_exception_data.failure.NoExecute = 1;
1000 else
1001 memory_exception_data.failure.NoExecute = 0;
1002 }
1003 }
1004
Felix Kuehling9b56bb12017-10-27 19:35:19 -04001005 up_read(&mm->mmap_sem);
1006 mmput(mm);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +03001007
1008 mutex_lock(&p->event_mutex);
1009
1010 /* Lookup events by type and signal them */
1011 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
1012 &memory_exception_data);
1013
1014 mutex_unlock(&p->event_mutex);
1015 mutex_unlock(&p->mutex);
1016}
Alexey Skidanov930c5ff2014-11-25 10:34:31 +02001017
1018void kfd_signal_hw_exception_event(unsigned int pasid)
1019{
1020 /*
1021 * Because we are called from arbitrary context (workqueue) as opposed
1022 * to process context, kfd_process could attempt to exit while we are
1023 * running so the lookup function returns a locked process.
1024 */
1025 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1026
1027 if (!p)
1028 return; /* Presumably process exited. */
1029
1030 mutex_lock(&p->event_mutex);
1031
1032 /* Lookup events by type and signal them */
1033 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
1034
1035 mutex_unlock(&p->event_mutex);
1036 mutex_unlock(&p->mutex);
1037}