blob: 15507d31e62bd5c467c52034796d0418adce154b [file] [log] [blame]
Jack Steiner14258642008-07-29 22:33:57 -07001/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
Jack Steinerbb04aa72009-04-02 16:59:07 -070035#include <linux/security.h>
Jack Steiner14258642008-07-29 22:33:57 -070036#include <asm/pgtable.h>
37#include "gru.h"
38#include "grutables.h"
39#include "grulib.h"
40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h>
42
43/*
44 * Test if a physical address is a valid GRU GSEG address
45 */
46static inline int is_gru_paddr(unsigned long paddr)
47{
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
49}
50
51/*
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
53 */
54struct vm_area_struct *gru_find_vma(unsigned long vaddr)
55{
56 struct vm_area_struct *vma;
57
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
60 return vma;
61 return NULL;
62}
63
64/*
65 * Find and lock the gts that contains the specified user vaddr.
66 *
67 * Returns:
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
70 */
71
72static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
73{
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
77
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
80 if (vma)
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
82 if (gts)
83 mutex_lock(&gts->ts_ctxlock);
84 else
85 up_read(&mm->mmap_sem);
86 return gts;
87}
88
89static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL;
94
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
97 if (vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105
106 return gts;
107}
108
109/*
110 * Unlock a GTS that was previously locked with gru_find_lock_gts().
111 */
112static void gru_unlock_gts(struct gru_thread_state *gts)
113{
114 mutex_unlock(&gts->ts_ctxlock);
115 up_read(&current->mm->mmap_sem);
116}
117
118/*
119 * Set a CB.istatus to active using a user virtual address. This must be done
120 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
121 * If the line is evicted, the status may be lost. The in-cache update
122 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok.
125 *
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
129 */
130static void gru_cb_set_istatus_active(unsigned long __user *cb)
131{
132 union {
133 struct gru_instruction_bits bits;
134 unsigned long dw;
135 } u;
136
137 if (cb) {
138 get_user(u.dw, cb);
139 u.bits.istatus = CBS_ACTIVE;
140 put_user(u.dw, cb);
141 }
142}
143
144/*
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
151 */
152static inline struct gru_state *irq_to_gru(int irq)
153{
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
155}
156
157/*
158 * Read & clear a TFM
159 *
160 * The GRU has an array of fault maps. A map is private to a cpu
161 * Only one cpu will be accessing a cpu's fault map.
162 *
163 * This function scans the cpu-private fault map & clears all bits that
164 * are set. The function returns a bitmap that indicates the bits that
165 * were cleared. Note that sense the maps may be updated asynchronously by
166 * the GRU, atomic operations must be used to clear bits.
167 */
168static void get_clear_fault_map(struct gru_state *gru,
169 struct gru_tlb_fault_map *map)
170{
171 unsigned long i, k;
172 struct gru_tlb_fault_map *tfm;
173
174 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
175 prefetchw(tfm); /* Helps on hardware, required for emulator */
176 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
177 k = tfm->fault_bits[i];
178 if (k)
179 k = xchg(&tfm->fault_bits[i], 0UL);
180 map->fault_bits[i] = k;
181 }
182
183 /*
184 * Not functionally required but helps performance. (Required
185 * on emulator)
186 */
187 gru_flush_cache(tfm);
188}
189
190/*
191 * Atomic (interrupt context) & non-atomic (user context) functions to
192 * convert a vaddr into a physical address. The size of the page
193 * is returned in pageshift.
194 * returns:
195 * 0 - successful
196 * < 0 - error code
197 * 1 - (atomic only) try again in non-atomic context
198 */
199static int non_atomic_pte_lookup(struct vm_area_struct *vma,
200 unsigned long vaddr, int write,
201 unsigned long *paddr, int *pageshift)
202{
203 struct page *page;
204
205 /* ZZZ Need to handle HUGE pages */
206 if (is_vm_hugetlb_page(vma))
207 return -EFAULT;
208 *pageshift = PAGE_SHIFT;
209 if (get_user_pages
210 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
211 return -EFAULT;
212 *paddr = page_to_phys(page);
213 put_page(page);
214 return 0;
215}
216
217/*
Jack Steiner14258642008-07-29 22:33:57 -0700218 * atomic_pte_lookup
219 *
220 * Convert a user virtual address to a physical address
221 * Only supports Intel large pages (2MB only) on x86_64.
222 * ZZZ - hugepage support is incomplete
Jack Steiner923f7f62008-10-15 22:05:13 -0700223 *
224 * NOTE: mmap_sem is already held on entry to this function. This
225 * guarantees existence of the page tables.
Jack Steiner14258642008-07-29 22:33:57 -0700226 */
227static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
228 int write, unsigned long *paddr, int *pageshift)
229{
230 pgd_t *pgdp;
231 pmd_t *pmdp;
232 pud_t *pudp;
233 pte_t pte;
234
Jack Steiner14258642008-07-29 22:33:57 -0700235 pgdp = pgd_offset(vma->vm_mm, vaddr);
236 if (unlikely(pgd_none(*pgdp)))
237 goto err;
238
239 pudp = pud_offset(pgdp, vaddr);
240 if (unlikely(pud_none(*pudp)))
241 goto err;
242
243 pmdp = pmd_offset(pudp, vaddr);
244 if (unlikely(pmd_none(*pmdp)))
245 goto err;
246#ifdef CONFIG_X86_64
247 if (unlikely(pmd_large(*pmdp)))
248 pte = *(pte_t *) pmdp;
249 else
250#endif
251 pte = *pte_offset_kernel(pmdp, vaddr);
252
Jack Steiner14258642008-07-29 22:33:57 -0700253 if (unlikely(!pte_present(pte) ||
254 (write && (!pte_write(pte) || !pte_dirty(pte)))))
255 return 1;
256
257 *paddr = pte_pfn(pte) << PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600258#ifdef CONFIG_HUGETLB_PAGE
Jack Steiner14258642008-07-29 22:33:57 -0700259 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600260#else
261 *pageshift = PAGE_SHIFT;
262#endif
Jack Steiner14258642008-07-29 22:33:57 -0700263 return 0;
264
265err:
266 local_irq_enable();
267 return 1;
268}
269
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700270static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
272{
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
275 unsigned long paddr;
276 int ret, ps;
277
278 vma = find_vma(mm, vaddr);
279 if (!vma)
280 goto inval;
281
282 /*
283 * Atomic lookup is faster & usually works even if called in non-atomic
284 * context.
285 */
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
288 if (ret) {
289 if (atomic)
290 goto upm;
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
292 goto inval;
293 }
294 if (is_gru_paddr(paddr))
295 goto inval;
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
298 *pageshift = ps;
299 return 0;
300
301inval:
302 return -1;
303upm:
304 return -2;
305}
306
307
Jack Steiner14258642008-07-29 22:33:57 -0700308/*
309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
310 * Input:
311 * cb Address of user CBR. Null if not running in user context
312 * Return:
313 * 0 = dropin, exception, or switch to UPM successful
314 * 1 = range invalidate active
315 * < 0 = error code
316 *
317 */
318static int gru_try_dropin(struct gru_thread_state *gts,
319 struct gru_tlb_fault_handle *tfh,
320 unsigned long __user *cb)
321{
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700322 int pageshift = 0, asid, write, ret, atomic = !cb;
323 unsigned long gpa = 0, vaddr = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700324
325 /*
326 * NOTE: The GRU contains magic hardware that eliminates races between
327 * TLB invalidates and TLB dropins. If an invalidate occurs
328 * in the window between reading the TFH and the subsequent TLB dropin,
329 * the dropin is ignored. This eliminates the need for additional locks.
330 */
331
332 /*
333 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
334 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
335 * is a transient state.
336 */
337 if (tfh->state == TFHSTATE_IDLE)
338 goto failidle;
339 if (tfh->state == TFHSTATE_MISS_FMM && cb)
340 goto failfmm;
341
342 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
343 vaddr = tfh->missvaddr;
344 asid = tfh->missasid;
345 if (asid == 0)
346 goto failnoasid;
347
348 rmb(); /* TFH must be cache resident before reading ms_range_active */
349
350 /*
351 * TFH is cache resident - at least briefly. Fail the dropin
352 * if a range invalidate is active.
353 */
354 if (atomic_read(&gts->ts_gms->ms_range_active))
355 goto failactive;
356
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
358 if (ret == -1)
Jack Steiner14258642008-07-29 22:33:57 -0700359 goto failinval;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700360 if (ret == -2)
361 goto failupm;
Jack Steiner14258642008-07-29 22:33:57 -0700362
Jack Steiner14258642008-07-29 22:33:57 -0700363 gru_cb_set_istatus_active(cb);
364 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
365 GRU_PAGESIZE(pageshift));
366 STAT(tlb_dropin);
367 gru_dbg(grudev,
368 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
369 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
370 pageshift, gpa);
371 return 0;
372
373failnoasid:
374 /* No asid (delayed unload). */
375 STAT(tlb_dropin_fail_no_asid);
376 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
377 if (!cb)
378 tfh_user_polling_mode(tfh);
379 else
380 gru_flush_cache(tfh);
381 return -EAGAIN;
382
383failupm:
384 /* Atomic failure switch CBR to UPM */
385 tfh_user_polling_mode(tfh);
386 STAT(tlb_dropin_fail_upm);
387 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
388 return 1;
389
390failfmm:
391 /* FMM state on UPM call */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700392 gru_flush_cache(tfh);
Jack Steiner14258642008-07-29 22:33:57 -0700393 STAT(tlb_dropin_fail_fmm);
394 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
395 return 0;
396
397failidle:
398 /* TFH was idle - no miss pending */
399 gru_flush_cache(tfh);
400 if (cb)
401 gru_flush_cache(cb);
402 STAT(tlb_dropin_fail_idle);
403 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
404 return 0;
405
406failinval:
407 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
408 tfh_exception(tfh);
409 STAT(tlb_dropin_fail_invalid);
410 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
411 return -EFAULT;
412
413failactive:
414 /* Range invalidate active. Switch to UPM iff atomic */
415 if (!cb)
416 tfh_user_polling_mode(tfh);
417 else
418 gru_flush_cache(tfh);
419 STAT(tlb_dropin_fail_range_active);
420 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
421 tfh, vaddr);
422 return 1;
423}
424
425/*
426 * Process an external interrupt from the GRU. This interrupt is
427 * caused by a TLB miss.
428 * Note that this is the interrupt handler that is registered with linux
429 * interrupt handlers.
430 */
431irqreturn_t gru_intr(int irq, void *dev_id)
432{
433 struct gru_state *gru;
434 struct gru_tlb_fault_map map;
435 struct gru_thread_state *gts;
436 struct gru_tlb_fault_handle *tfh = NULL;
437 int cbrnum, ctxnum;
438
439 STAT(intr);
440
441 gru = irq_to_gru(irq);
442 if (!gru) {
443 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
444 raw_smp_processor_id(), irq);
445 return IRQ_NONE;
446 }
447 get_clear_fault_map(gru, &map);
448 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
449 map.fault_bits[0]);
450
451 for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
452 tfh = get_tfh_by_index(gru, cbrnum);
453 prefetchw(tfh); /* Helps on hdw, required for emulator */
454
455 /*
456 * When hardware sets a bit in the faultmap, it implicitly
457 * locks the GRU context so that it cannot be unloaded.
458 * The gts cannot change until a TFH start/writestart command
459 * is issued.
460 */
461 ctxnum = tfh->ctxnum;
462 gts = gru->gs_gts[ctxnum];
463
464 /*
465 * This is running in interrupt context. Trylock the mmap_sem.
466 * If it fails, retry the fault in user context.
467 */
468 if (down_read_trylock(&gts->ts_mm->mmap_sem)) {
469 gru_try_dropin(gts, tfh, NULL);
470 up_read(&gts->ts_mm->mmap_sem);
471 } else {
472 tfh_user_polling_mode(tfh);
Jack Steiner43884602009-04-02 16:59:05 -0700473 STAT(intr_mm_lock_failed);
Jack Steiner14258642008-07-29 22:33:57 -0700474 }
475 }
476 return IRQ_HANDLED;
477}
478
479
480static int gru_user_dropin(struct gru_thread_state *gts,
481 struct gru_tlb_fault_handle *tfh,
482 unsigned long __user *cb)
483{
484 struct gru_mm_struct *gms = gts->ts_gms;
485 int ret;
486
487 while (1) {
488 wait_event(gms->ms_wait_queue,
489 atomic_read(&gms->ms_range_active) == 0);
490 prefetchw(tfh); /* Helps on hdw, required for emulator */
491 ret = gru_try_dropin(gts, tfh, cb);
492 if (ret <= 0)
493 return ret;
494 STAT(call_os_wait_queue);
495 }
496}
497
498/*
499 * This interface is called as a result of a user detecting a "call OS" bit
500 * in a user CB. Normally means that a TLB fault has occurred.
501 * cb - user virtual address of the CB
502 */
503int gru_handle_user_call_os(unsigned long cb)
504{
505 struct gru_tlb_fault_handle *tfh;
506 struct gru_thread_state *gts;
507 unsigned long __user *cbp;
508 int ucbnum, cbrnum, ret = -EINVAL;
509
510 STAT(call_os);
511 gru_dbg(grudev, "address 0x%lx\n", cb);
512
513 /* sanity check the cb pointer */
514 ucbnum = get_cb_number((void *)cb);
515 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
516 return -EINVAL;
517 cbp = (unsigned long *)cb;
518
519 gts = gru_find_lock_gts(cb);
520 if (!gts)
521 return -EINVAL;
522
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700523 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
Jack Steiner14258642008-07-29 22:33:57 -0700524 goto exit;
Jack Steiner14258642008-07-29 22:33:57 -0700525
526 /*
527 * If force_unload is set, the UPM TLB fault is phony. The task
528 * has migrated to another node and the GSEG must be moved. Just
529 * unload the context. The task will page fault and assign a new
530 * context.
531 */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700532 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
Jack Steiner43884602009-04-02 16:59:05 -0700533 gts->ts_blade != uv_numa_blade_id()) {
534 STAT(call_os_offnode_reference);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700535 gts->ts_force_unload = 1;
Jack Steiner43884602009-04-02 16:59:05 -0700536 }
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700537
Jack Steiner14258642008-07-29 22:33:57 -0700538 ret = -EAGAIN;
539 cbrnum = thread_cbr_number(gts, ucbnum);
540 if (gts->ts_force_unload) {
541 gru_unload_context(gts, 1);
542 } else if (gts->ts_gru) {
543 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
544 ret = gru_user_dropin(gts, tfh, cbp);
545 }
546exit:
547 gru_unlock_gts(gts);
548 return ret;
549}
550
551/*
552 * Fetch the exception detail information for a CB that terminated with
553 * an exception.
554 */
555int gru_get_exception_detail(unsigned long arg)
556{
557 struct control_block_extended_exc_detail excdet;
558 struct gru_control_block_extended *cbe;
559 struct gru_thread_state *gts;
560 int ucbnum, cbrnum, ret;
561
562 STAT(user_exception);
563 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
564 return -EFAULT;
565
566 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
567 gts = gru_find_lock_gts(excdet.cb);
568 if (!gts)
569 return -EINVAL;
570
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700571 ucbnum = get_cb_number((void *)excdet.cb);
572 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
573 ret = -EINVAL;
574 } else if (gts->ts_gru) {
Jack Steiner14258642008-07-29 22:33:57 -0700575 cbrnum = thread_cbr_number(gts, ucbnum);
576 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700577 prefetchw(cbe);/* Harmless on hardware, required for emulator */
Jack Steiner14258642008-07-29 22:33:57 -0700578 excdet.opc = cbe->opccpy;
579 excdet.exopc = cbe->exopccpy;
580 excdet.ecause = cbe->ecause;
581 excdet.exceptdet0 = cbe->idef1upd;
582 excdet.exceptdet1 = cbe->idef3upd;
583 ret = 0;
584 } else {
585 ret = -EAGAIN;
586 }
587 gru_unlock_gts(gts);
588
589 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
590 excdet.ecause);
591 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
592 ret = -EFAULT;
593 return ret;
594}
595
596/*
597 * User request to unload a context. Content is saved for possible reload.
598 */
Jack Steinerbb04aa72009-04-02 16:59:07 -0700599static int gru_unload_all_contexts(void)
600{
601 struct gru_thread_state *gts;
602 struct gru_state *gru;
Jack Steinere1c32192009-04-02 16:59:10 -0700603 int gid, ctxnum;
Jack Steinerbb04aa72009-04-02 16:59:07 -0700604
605 if (!capable(CAP_SYS_ADMIN))
606 return -EPERM;
Jack Steinere1c32192009-04-02 16:59:10 -0700607 foreach_gid(gid) {
Jack Steinerbb04aa72009-04-02 16:59:07 -0700608 gru = GID_TO_GRU(gid);
609 spin_lock(&gru->gs_lock);
610 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
611 gts = gru->gs_gts[ctxnum];
612 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
613 spin_unlock(&gru->gs_lock);
614 gru_unload_context(gts, 1);
615 gru_unlock_gts(gts);
616 spin_lock(&gru->gs_lock);
617 }
618 }
619 spin_unlock(&gru->gs_lock);
620 }
621 return 0;
622}
623
Jack Steiner14258642008-07-29 22:33:57 -0700624int gru_user_unload_context(unsigned long arg)
625{
626 struct gru_thread_state *gts;
627 struct gru_unload_context_req req;
628
629 STAT(user_unload_context);
630 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
631 return -EFAULT;
632
633 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
634
Jack Steinerbb04aa72009-04-02 16:59:07 -0700635 if (!req.gseg)
636 return gru_unload_all_contexts();
637
Jack Steiner14258642008-07-29 22:33:57 -0700638 gts = gru_find_lock_gts(req.gseg);
639 if (!gts)
640 return -EINVAL;
641
642 if (gts->ts_gru)
643 gru_unload_context(gts, 1);
644 gru_unlock_gts(gts);
645
646 return 0;
647}
648
649/*
650 * User request to flush a range of virtual addresses from the GRU TLB
651 * (Mainly for testing).
652 */
653int gru_user_flush_tlb(unsigned long arg)
654{
655 struct gru_thread_state *gts;
656 struct gru_flush_tlb_req req;
657
658 STAT(user_flush_tlb);
659 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
660 return -EFAULT;
661
662 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
663 req.vaddr, req.len);
664
665 gts = gru_find_lock_gts(req.gseg);
666 if (!gts)
667 return -EINVAL;
668
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700669 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
Jack Steiner14258642008-07-29 22:33:57 -0700670 gru_unlock_gts(gts);
671
672 return 0;
673}
674
675/*
676 * Register the current task as the user of the GSEG slice.
677 * Needed for TLB fault interrupt targeting.
678 */
679int gru_set_task_slice(long address)
680{
681 struct gru_thread_state *gts;
682
683 STAT(set_task_slice);
684 gru_dbg(grudev, "address 0x%lx\n", address);
685 gts = gru_alloc_locked_gts(address);
686 if (!gts)
687 return -EINVAL;
688
689 gts->ts_tgid_owner = current->tgid;
690 gru_unlock_gts(gts);
691
692 return 0;
693}