blob: 8443e90f9f6cdca189955da730c498b6a46fd811 [file] [log] [blame]
Jack Steiner14258642008-07-29 22:33:57 -07001/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
Jack Steinerbb04aa72009-04-02 16:59:07 -070035#include <linux/security.h>
Jack Steiner14258642008-07-29 22:33:57 -070036#include <asm/pgtable.h>
37#include "gru.h"
38#include "grutables.h"
39#include "grulib.h"
40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h>
42
43/*
44 * Test if a physical address is a valid GRU GSEG address
45 */
46static inline int is_gru_paddr(unsigned long paddr)
47{
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
49}
50
51/*
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
53 */
54struct vm_area_struct *gru_find_vma(unsigned long vaddr)
55{
56 struct vm_area_struct *vma;
57
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
60 return vma;
61 return NULL;
62}
63
64/*
65 * Find and lock the gts that contains the specified user vaddr.
66 *
67 * Returns:
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
70 */
71
72static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
73{
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
77
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
80 if (vma)
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
82 if (gts)
83 mutex_lock(&gts->ts_ctxlock);
84 else
85 up_read(&mm->mmap_sem);
86 return gts;
87}
88
89static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL;
94
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
97 if (vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105
106 return gts;
107}
108
109/*
110 * Unlock a GTS that was previously locked with gru_find_lock_gts().
111 */
112static void gru_unlock_gts(struct gru_thread_state *gts)
113{
114 mutex_unlock(&gts->ts_ctxlock);
115 up_read(&current->mm->mmap_sem);
116}
117
118/*
119 * Set a CB.istatus to active using a user virtual address. This must be done
120 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
121 * If the line is evicted, the status may be lost. The in-cache update
122 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok.
125 *
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
129 */
130static void gru_cb_set_istatus_active(unsigned long __user *cb)
131{
132 union {
133 struct gru_instruction_bits bits;
134 unsigned long dw;
135 } u;
136
137 if (cb) {
138 get_user(u.dw, cb);
139 u.bits.istatus = CBS_ACTIVE;
140 put_user(u.dw, cb);
141 }
142}
143
144/*
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
151 */
152static inline struct gru_state *irq_to_gru(int irq)
153{
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
155}
156
157/*
158 * Read & clear a TFM
159 *
160 * The GRU has an array of fault maps. A map is private to a cpu
161 * Only one cpu will be accessing a cpu's fault map.
162 *
163 * This function scans the cpu-private fault map & clears all bits that
164 * are set. The function returns a bitmap that indicates the bits that
165 * were cleared. Note that sense the maps may be updated asynchronously by
166 * the GRU, atomic operations must be used to clear bits.
167 */
168static void get_clear_fault_map(struct gru_state *gru,
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700169 struct gru_tlb_fault_map *imap,
170 struct gru_tlb_fault_map *dmap)
Jack Steiner14258642008-07-29 22:33:57 -0700171{
172 unsigned long i, k;
173 struct gru_tlb_fault_map *tfm;
174
175 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
176 prefetchw(tfm); /* Helps on hardware, required for emulator */
177 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
178 k = tfm->fault_bits[i];
179 if (k)
180 k = xchg(&tfm->fault_bits[i], 0UL);
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700181 imap->fault_bits[i] = k;
182 k = tfm->done_bits[i];
183 if (k)
184 k = xchg(&tfm->done_bits[i], 0UL);
185 dmap->fault_bits[i] = k;
Jack Steiner14258642008-07-29 22:33:57 -0700186 }
187
188 /*
189 * Not functionally required but helps performance. (Required
190 * on emulator)
191 */
192 gru_flush_cache(tfm);
193}
194
195/*
196 * Atomic (interrupt context) & non-atomic (user context) functions to
197 * convert a vaddr into a physical address. The size of the page
198 * is returned in pageshift.
199 * returns:
200 * 0 - successful
201 * < 0 - error code
202 * 1 - (atomic only) try again in non-atomic context
203 */
204static int non_atomic_pte_lookup(struct vm_area_struct *vma,
205 unsigned long vaddr, int write,
206 unsigned long *paddr, int *pageshift)
207{
208 struct page *page;
209
210 /* ZZZ Need to handle HUGE pages */
211 if (is_vm_hugetlb_page(vma))
212 return -EFAULT;
213 *pageshift = PAGE_SHIFT;
214 if (get_user_pages
215 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
216 return -EFAULT;
217 *paddr = page_to_phys(page);
218 put_page(page);
219 return 0;
220}
221
222/*
Jack Steiner14258642008-07-29 22:33:57 -0700223 * atomic_pte_lookup
224 *
225 * Convert a user virtual address to a physical address
226 * Only supports Intel large pages (2MB only) on x86_64.
227 * ZZZ - hugepage support is incomplete
Jack Steiner923f7f62008-10-15 22:05:13 -0700228 *
229 * NOTE: mmap_sem is already held on entry to this function. This
230 * guarantees existence of the page tables.
Jack Steiner14258642008-07-29 22:33:57 -0700231 */
232static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
233 int write, unsigned long *paddr, int *pageshift)
234{
235 pgd_t *pgdp;
236 pmd_t *pmdp;
237 pud_t *pudp;
238 pte_t pte;
239
Jack Steiner14258642008-07-29 22:33:57 -0700240 pgdp = pgd_offset(vma->vm_mm, vaddr);
241 if (unlikely(pgd_none(*pgdp)))
242 goto err;
243
244 pudp = pud_offset(pgdp, vaddr);
245 if (unlikely(pud_none(*pudp)))
246 goto err;
247
248 pmdp = pmd_offset(pudp, vaddr);
249 if (unlikely(pmd_none(*pmdp)))
250 goto err;
251#ifdef CONFIG_X86_64
252 if (unlikely(pmd_large(*pmdp)))
253 pte = *(pte_t *) pmdp;
254 else
255#endif
256 pte = *pte_offset_kernel(pmdp, vaddr);
257
Jack Steiner14258642008-07-29 22:33:57 -0700258 if (unlikely(!pte_present(pte) ||
259 (write && (!pte_write(pte) || !pte_dirty(pte)))))
260 return 1;
261
262 *paddr = pte_pfn(pte) << PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600263#ifdef CONFIG_HUGETLB_PAGE
Jack Steiner14258642008-07-29 22:33:57 -0700264 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600265#else
266 *pageshift = PAGE_SHIFT;
267#endif
Jack Steiner14258642008-07-29 22:33:57 -0700268 return 0;
269
270err:
271 local_irq_enable();
272 return 1;
273}
274
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700275static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
276 int write, int atomic, unsigned long *gpa, int *pageshift)
277{
278 struct mm_struct *mm = gts->ts_mm;
279 struct vm_area_struct *vma;
280 unsigned long paddr;
281 int ret, ps;
282
283 vma = find_vma(mm, vaddr);
284 if (!vma)
285 goto inval;
286
287 /*
288 * Atomic lookup is faster & usually works even if called in non-atomic
289 * context.
290 */
291 rmb(); /* Must/check ms_range_active before loading PTEs */
292 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
293 if (ret) {
294 if (atomic)
295 goto upm;
296 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
297 goto inval;
298 }
299 if (is_gru_paddr(paddr))
300 goto inval;
301 paddr = paddr & ~((1UL << ps) - 1);
302 *gpa = uv_soc_phys_ram_to_gpa(paddr);
303 *pageshift = ps;
304 return 0;
305
306inval:
307 return -1;
308upm:
309 return -2;
310}
311
312
Jack Steiner14258642008-07-29 22:33:57 -0700313/*
314 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
315 * Input:
316 * cb Address of user CBR. Null if not running in user context
317 * Return:
318 * 0 = dropin, exception, or switch to UPM successful
319 * 1 = range invalidate active
320 * < 0 = error code
321 *
322 */
323static int gru_try_dropin(struct gru_thread_state *gts,
324 struct gru_tlb_fault_handle *tfh,
325 unsigned long __user *cb)
326{
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700327 int pageshift = 0, asid, write, ret, atomic = !cb;
328 unsigned long gpa = 0, vaddr = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700329
330 /*
331 * NOTE: The GRU contains magic hardware that eliminates races between
332 * TLB invalidates and TLB dropins. If an invalidate occurs
333 * in the window between reading the TFH and the subsequent TLB dropin,
334 * the dropin is ignored. This eliminates the need for additional locks.
335 */
336
337 /*
338 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
339 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
340 * is a transient state.
341 */
Jack Steinercd1334f2009-06-17 16:28:19 -0700342 if (tfh->status != TFHSTATUS_EXCEPTION)
343 goto failnoexception;
Jack Steiner14258642008-07-29 22:33:57 -0700344 if (tfh->state == TFHSTATE_IDLE)
345 goto failidle;
346 if (tfh->state == TFHSTATE_MISS_FMM && cb)
347 goto failfmm;
348
349 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
350 vaddr = tfh->missvaddr;
351 asid = tfh->missasid;
352 if (asid == 0)
353 goto failnoasid;
354
355 rmb(); /* TFH must be cache resident before reading ms_range_active */
356
357 /*
358 * TFH is cache resident - at least briefly. Fail the dropin
359 * if a range invalidate is active.
360 */
361 if (atomic_read(&gts->ts_gms->ms_range_active))
362 goto failactive;
363
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700364 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
365 if (ret == -1)
Jack Steiner14258642008-07-29 22:33:57 -0700366 goto failinval;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700367 if (ret == -2)
368 goto failupm;
Jack Steiner14258642008-07-29 22:33:57 -0700369
Jack Steiner7b8274e2009-04-02 16:59:12 -0700370 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
371 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
372 if (atomic || !gru_update_cch(gts, 0)) {
373 gts->ts_force_cch_reload = 1;
374 goto failupm;
375 }
376 }
Jack Steiner14258642008-07-29 22:33:57 -0700377 gru_cb_set_istatus_active(cb);
378 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
379 GRU_PAGESIZE(pageshift));
380 STAT(tlb_dropin);
381 gru_dbg(grudev,
382 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
383 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
384 pageshift, gpa);
385 return 0;
386
387failnoasid:
388 /* No asid (delayed unload). */
389 STAT(tlb_dropin_fail_no_asid);
390 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
391 if (!cb)
392 tfh_user_polling_mode(tfh);
393 else
394 gru_flush_cache(tfh);
395 return -EAGAIN;
396
397failupm:
398 /* Atomic failure switch CBR to UPM */
399 tfh_user_polling_mode(tfh);
400 STAT(tlb_dropin_fail_upm);
401 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
402 return 1;
403
404failfmm:
405 /* FMM state on UPM call */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700406 gru_flush_cache(tfh);
Jack Steiner14258642008-07-29 22:33:57 -0700407 STAT(tlb_dropin_fail_fmm);
408 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
409 return 0;
410
Jack Steinercd1334f2009-06-17 16:28:19 -0700411failnoexception:
412 /* TFH status did not show exception pending */
413 gru_flush_cache(tfh);
414 if (cb)
415 gru_flush_cache(cb);
416 STAT(tlb_dropin_fail_no_exception);
417 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
418 return 0;
419
Jack Steiner14258642008-07-29 22:33:57 -0700420failidle:
Jack Steinercd1334f2009-06-17 16:28:19 -0700421 /* TFH state was idle - no miss pending */
Jack Steiner14258642008-07-29 22:33:57 -0700422 gru_flush_cache(tfh);
423 if (cb)
424 gru_flush_cache(cb);
425 STAT(tlb_dropin_fail_idle);
426 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
427 return 0;
428
429failinval:
430 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
431 tfh_exception(tfh);
432 STAT(tlb_dropin_fail_invalid);
433 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
434 return -EFAULT;
435
436failactive:
437 /* Range invalidate active. Switch to UPM iff atomic */
438 if (!cb)
439 tfh_user_polling_mode(tfh);
440 else
441 gru_flush_cache(tfh);
442 STAT(tlb_dropin_fail_range_active);
443 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
444 tfh, vaddr);
445 return 1;
446}
447
448/*
449 * Process an external interrupt from the GRU. This interrupt is
450 * caused by a TLB miss.
451 * Note that this is the interrupt handler that is registered with linux
452 * interrupt handlers.
453 */
454irqreturn_t gru_intr(int irq, void *dev_id)
455{
456 struct gru_state *gru;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700457 struct gru_tlb_fault_map imap, dmap;
Jack Steiner14258642008-07-29 22:33:57 -0700458 struct gru_thread_state *gts;
459 struct gru_tlb_fault_handle *tfh = NULL;
460 int cbrnum, ctxnum;
461
462 STAT(intr);
463
464 gru = irq_to_gru(irq);
465 if (!gru) {
466 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
467 raw_smp_processor_id(), irq);
468 return IRQ_NONE;
469 }
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700470 get_clear_fault_map(gru, &imap, &dmap);
Jack Steiner14258642008-07-29 22:33:57 -0700471
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700472 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
473 complete(gru->gs_blade->bs_async_wq);
474 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
475 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
476 }
477
478 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
Jack Steiner14258642008-07-29 22:33:57 -0700479 tfh = get_tfh_by_index(gru, cbrnum);
480 prefetchw(tfh); /* Helps on hdw, required for emulator */
481
482 /*
483 * When hardware sets a bit in the faultmap, it implicitly
484 * locks the GRU context so that it cannot be unloaded.
485 * The gts cannot change until a TFH start/writestart command
486 * is issued.
487 */
488 ctxnum = tfh->ctxnum;
489 gts = gru->gs_gts[ctxnum];
490
491 /*
492 * This is running in interrupt context. Trylock the mmap_sem.
493 * If it fails, retry the fault in user context.
494 */
Jack Steinercd1334f2009-06-17 16:28:19 -0700495 if (!gts->ts_force_cch_reload &&
496 down_read_trylock(&gts->ts_mm->mmap_sem)) {
Jack Steiner14258642008-07-29 22:33:57 -0700497 gru_try_dropin(gts, tfh, NULL);
498 up_read(&gts->ts_mm->mmap_sem);
499 } else {
500 tfh_user_polling_mode(tfh);
Jack Steiner43884602009-04-02 16:59:05 -0700501 STAT(intr_mm_lock_failed);
Jack Steiner14258642008-07-29 22:33:57 -0700502 }
503 }
504 return IRQ_HANDLED;
505}
506
507
508static int gru_user_dropin(struct gru_thread_state *gts,
509 struct gru_tlb_fault_handle *tfh,
510 unsigned long __user *cb)
511{
512 struct gru_mm_struct *gms = gts->ts_gms;
513 int ret;
514
515 while (1) {
516 wait_event(gms->ms_wait_queue,
517 atomic_read(&gms->ms_range_active) == 0);
518 prefetchw(tfh); /* Helps on hdw, required for emulator */
519 ret = gru_try_dropin(gts, tfh, cb);
520 if (ret <= 0)
521 return ret;
522 STAT(call_os_wait_queue);
523 }
524}
525
526/*
527 * This interface is called as a result of a user detecting a "call OS" bit
528 * in a user CB. Normally means that a TLB fault has occurred.
529 * cb - user virtual address of the CB
530 */
531int gru_handle_user_call_os(unsigned long cb)
532{
533 struct gru_tlb_fault_handle *tfh;
534 struct gru_thread_state *gts;
535 unsigned long __user *cbp;
536 int ucbnum, cbrnum, ret = -EINVAL;
537
538 STAT(call_os);
539 gru_dbg(grudev, "address 0x%lx\n", cb);
540
541 /* sanity check the cb pointer */
542 ucbnum = get_cb_number((void *)cb);
543 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
544 return -EINVAL;
545 cbp = (unsigned long *)cb;
546
547 gts = gru_find_lock_gts(cb);
548 if (!gts)
549 return -EINVAL;
550
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700551 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
Jack Steiner14258642008-07-29 22:33:57 -0700552 goto exit;
Jack Steiner14258642008-07-29 22:33:57 -0700553
554 /*
555 * If force_unload is set, the UPM TLB fault is phony. The task
556 * has migrated to another node and the GSEG must be moved. Just
557 * unload the context. The task will page fault and assign a new
558 * context.
559 */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700560 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
Jack Steiner43884602009-04-02 16:59:05 -0700561 gts->ts_blade != uv_numa_blade_id()) {
562 STAT(call_os_offnode_reference);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700563 gts->ts_force_unload = 1;
Jack Steiner43884602009-04-02 16:59:05 -0700564 }
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700565
Jack Steiner7b8274e2009-04-02 16:59:12 -0700566 /*
567 * CCH may contain stale data if ts_force_cch_reload is set.
568 */
569 if (gts->ts_gru && gts->ts_force_cch_reload) {
Jack Steiner7b8274e2009-04-02 16:59:12 -0700570 gts->ts_force_cch_reload = 0;
Jack Steinerd57c82b2009-06-17 16:28:20 -0700571 gru_update_cch(gts, 0);
Jack Steiner7b8274e2009-04-02 16:59:12 -0700572 }
573
Jack Steiner14258642008-07-29 22:33:57 -0700574 ret = -EAGAIN;
575 cbrnum = thread_cbr_number(gts, ucbnum);
576 if (gts->ts_force_unload) {
577 gru_unload_context(gts, 1);
578 } else if (gts->ts_gru) {
579 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
580 ret = gru_user_dropin(gts, tfh, cbp);
581 }
582exit:
583 gru_unlock_gts(gts);
584 return ret;
585}
586
587/*
588 * Fetch the exception detail information for a CB that terminated with
589 * an exception.
590 */
591int gru_get_exception_detail(unsigned long arg)
592{
593 struct control_block_extended_exc_detail excdet;
594 struct gru_control_block_extended *cbe;
595 struct gru_thread_state *gts;
596 int ucbnum, cbrnum, ret;
597
598 STAT(user_exception);
599 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
600 return -EFAULT;
601
602 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
603 gts = gru_find_lock_gts(excdet.cb);
604 if (!gts)
605 return -EINVAL;
606
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700607 ucbnum = get_cb_number((void *)excdet.cb);
608 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
609 ret = -EINVAL;
610 } else if (gts->ts_gru) {
Jack Steiner14258642008-07-29 22:33:57 -0700611 cbrnum = thread_cbr_number(gts, ucbnum);
612 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700613 prefetchw(cbe);/* Harmless on hardware, required for emulator */
Jack Steiner14258642008-07-29 22:33:57 -0700614 excdet.opc = cbe->opccpy;
615 excdet.exopc = cbe->exopccpy;
616 excdet.ecause = cbe->ecause;
617 excdet.exceptdet0 = cbe->idef1upd;
618 excdet.exceptdet1 = cbe->idef3upd;
Jack Steinercd1334f2009-06-17 16:28:19 -0700619 excdet.cbrstate = cbe->cbrstate;
620 excdet.cbrexecstatus = cbe->cbrexecstatus;
Jack Steiner14258642008-07-29 22:33:57 -0700621 ret = 0;
622 } else {
623 ret = -EAGAIN;
624 }
625 gru_unlock_gts(gts);
626
Jack Steinercd1334f2009-06-17 16:28:19 -0700627 gru_dbg(grudev,
628 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
629 "exdet0 0x%lx, exdet1 0x%x\n",
630 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
631 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
Jack Steiner14258642008-07-29 22:33:57 -0700632 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
633 ret = -EFAULT;
634 return ret;
635}
636
637/*
638 * User request to unload a context. Content is saved for possible reload.
639 */
Jack Steinerbb04aa72009-04-02 16:59:07 -0700640static int gru_unload_all_contexts(void)
641{
642 struct gru_thread_state *gts;
643 struct gru_state *gru;
Jack Steinere1c32192009-04-02 16:59:10 -0700644 int gid, ctxnum;
Jack Steinerbb04aa72009-04-02 16:59:07 -0700645
646 if (!capable(CAP_SYS_ADMIN))
647 return -EPERM;
Jack Steinere1c32192009-04-02 16:59:10 -0700648 foreach_gid(gid) {
Jack Steinerbb04aa72009-04-02 16:59:07 -0700649 gru = GID_TO_GRU(gid);
650 spin_lock(&gru->gs_lock);
651 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
652 gts = gru->gs_gts[ctxnum];
653 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
654 spin_unlock(&gru->gs_lock);
655 gru_unload_context(gts, 1);
Jack Steinerd57c82b2009-06-17 16:28:20 -0700656 mutex_unlock(&gts->ts_ctxlock);
Jack Steinerbb04aa72009-04-02 16:59:07 -0700657 spin_lock(&gru->gs_lock);
658 }
659 }
660 spin_unlock(&gru->gs_lock);
661 }
662 return 0;
663}
664
Jack Steiner14258642008-07-29 22:33:57 -0700665int gru_user_unload_context(unsigned long arg)
666{
667 struct gru_thread_state *gts;
668 struct gru_unload_context_req req;
669
670 STAT(user_unload_context);
671 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
672 return -EFAULT;
673
674 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
675
Jack Steinerbb04aa72009-04-02 16:59:07 -0700676 if (!req.gseg)
677 return gru_unload_all_contexts();
678
Jack Steiner14258642008-07-29 22:33:57 -0700679 gts = gru_find_lock_gts(req.gseg);
680 if (!gts)
681 return -EINVAL;
682
683 if (gts->ts_gru)
684 gru_unload_context(gts, 1);
685 gru_unlock_gts(gts);
686
687 return 0;
688}
689
690/*
691 * User request to flush a range of virtual addresses from the GRU TLB
692 * (Mainly for testing).
693 */
694int gru_user_flush_tlb(unsigned long arg)
695{
696 struct gru_thread_state *gts;
697 struct gru_flush_tlb_req req;
698
699 STAT(user_flush_tlb);
700 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
701 return -EFAULT;
702
703 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
704 req.vaddr, req.len);
705
706 gts = gru_find_lock_gts(req.gseg);
707 if (!gts)
708 return -EINVAL;
709
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700710 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
Jack Steiner14258642008-07-29 22:33:57 -0700711 gru_unlock_gts(gts);
712
713 return 0;
714}
715
716/*
717 * Register the current task as the user of the GSEG slice.
718 * Needed for TLB fault interrupt targeting.
719 */
720int gru_set_task_slice(long address)
721{
722 struct gru_thread_state *gts;
723
724 STAT(set_task_slice);
725 gru_dbg(grudev, "address 0x%lx\n", address);
726 gts = gru_alloc_locked_gts(address);
727 if (!gts)
728 return -EINVAL;
729
730 gts->ts_tgid_owner = current->tgid;
731 gru_unlock_gts(gts);
732
733 return 0;
734}