blob: a69d119921ff7832c748721020ff8077937ff7ea [file] [log] [blame]
Jack Steiner14258642008-07-29 22:33:57 -07001/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
Jack Steinerbb04aa72009-04-02 16:59:07 -070035#include <linux/security.h>
Jack Steiner14258642008-07-29 22:33:57 -070036#include <asm/pgtable.h>
37#include "gru.h"
38#include "grutables.h"
39#include "grulib.h"
40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h>
42
43/*
44 * Test if a physical address is a valid GRU GSEG address
45 */
46static inline int is_gru_paddr(unsigned long paddr)
47{
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
49}
50
51/*
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
53 */
54struct vm_area_struct *gru_find_vma(unsigned long vaddr)
55{
56 struct vm_area_struct *vma;
57
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
60 return vma;
61 return NULL;
62}
63
64/*
65 * Find and lock the gts that contains the specified user vaddr.
66 *
67 * Returns:
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
70 */
71
72static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
73{
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
77
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
80 if (vma)
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
82 if (gts)
83 mutex_lock(&gts->ts_ctxlock);
84 else
85 up_read(&mm->mmap_sem);
86 return gts;
87}
88
89static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL;
94
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
97 if (vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105
106 return gts;
107}
108
109/*
110 * Unlock a GTS that was previously locked with gru_find_lock_gts().
111 */
112static void gru_unlock_gts(struct gru_thread_state *gts)
113{
114 mutex_unlock(&gts->ts_ctxlock);
115 up_read(&current->mm->mmap_sem);
116}
117
118/*
119 * Set a CB.istatus to active using a user virtual address. This must be done
120 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
121 * If the line is evicted, the status may be lost. The in-cache update
122 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok.
Jack Steiner14258642008-07-29 22:33:57 -0700125 */
Jack Steinerb61fc692009-12-15 16:48:03 -0800126static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700127{
Jack Steinerb61fc692009-12-15 16:48:03 -0800128 if (cbk) {
129 cbk->istatus = CBS_ACTIVE;
Jack Steiner14258642008-07-29 22:33:57 -0700130 }
131}
132
133/*
134 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
135 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
136 * GRU (except for headless blades which are not currently supported). A blade
137 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
138 * number uniquely identifies the GRU chiplet on the local blade that caused the
139 * interrupt. Always called in interrupt context.
140 */
141static inline struct gru_state *irq_to_gru(int irq)
142{
143 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
144}
145
146/*
147 * Read & clear a TFM
148 *
149 * The GRU has an array of fault maps. A map is private to a cpu
150 * Only one cpu will be accessing a cpu's fault map.
151 *
152 * This function scans the cpu-private fault map & clears all bits that
153 * are set. The function returns a bitmap that indicates the bits that
154 * were cleared. Note that sense the maps may be updated asynchronously by
155 * the GRU, atomic operations must be used to clear bits.
156 */
157static void get_clear_fault_map(struct gru_state *gru,
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700158 struct gru_tlb_fault_map *imap,
159 struct gru_tlb_fault_map *dmap)
Jack Steiner14258642008-07-29 22:33:57 -0700160{
161 unsigned long i, k;
162 struct gru_tlb_fault_map *tfm;
163
164 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
165 prefetchw(tfm); /* Helps on hardware, required for emulator */
166 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
167 k = tfm->fault_bits[i];
168 if (k)
169 k = xchg(&tfm->fault_bits[i], 0UL);
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700170 imap->fault_bits[i] = k;
171 k = tfm->done_bits[i];
172 if (k)
173 k = xchg(&tfm->done_bits[i], 0UL);
174 dmap->fault_bits[i] = k;
Jack Steiner14258642008-07-29 22:33:57 -0700175 }
176
177 /*
178 * Not functionally required but helps performance. (Required
179 * on emulator)
180 */
181 gru_flush_cache(tfm);
182}
183
184/*
185 * Atomic (interrupt context) & non-atomic (user context) functions to
186 * convert a vaddr into a physical address. The size of the page
187 * is returned in pageshift.
188 * returns:
189 * 0 - successful
190 * < 0 - error code
191 * 1 - (atomic only) try again in non-atomic context
192 */
193static int non_atomic_pte_lookup(struct vm_area_struct *vma,
194 unsigned long vaddr, int write,
195 unsigned long *paddr, int *pageshift)
196{
197 struct page *page;
198
199 /* ZZZ Need to handle HUGE pages */
200 if (is_vm_hugetlb_page(vma))
201 return -EFAULT;
202 *pageshift = PAGE_SHIFT;
203 if (get_user_pages
204 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
205 return -EFAULT;
206 *paddr = page_to_phys(page);
207 put_page(page);
208 return 0;
209}
210
211/*
Jack Steiner14258642008-07-29 22:33:57 -0700212 * atomic_pte_lookup
213 *
214 * Convert a user virtual address to a physical address
215 * Only supports Intel large pages (2MB only) on x86_64.
216 * ZZZ - hugepage support is incomplete
Jack Steiner923f7f62008-10-15 22:05:13 -0700217 *
218 * NOTE: mmap_sem is already held on entry to this function. This
219 * guarantees existence of the page tables.
Jack Steiner14258642008-07-29 22:33:57 -0700220 */
221static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
222 int write, unsigned long *paddr, int *pageshift)
223{
224 pgd_t *pgdp;
225 pmd_t *pmdp;
226 pud_t *pudp;
227 pte_t pte;
228
Jack Steiner14258642008-07-29 22:33:57 -0700229 pgdp = pgd_offset(vma->vm_mm, vaddr);
230 if (unlikely(pgd_none(*pgdp)))
231 goto err;
232
233 pudp = pud_offset(pgdp, vaddr);
234 if (unlikely(pud_none(*pudp)))
235 goto err;
236
237 pmdp = pmd_offset(pudp, vaddr);
238 if (unlikely(pmd_none(*pmdp)))
239 goto err;
240#ifdef CONFIG_X86_64
241 if (unlikely(pmd_large(*pmdp)))
242 pte = *(pte_t *) pmdp;
243 else
244#endif
245 pte = *pte_offset_kernel(pmdp, vaddr);
246
Jack Steiner14258642008-07-29 22:33:57 -0700247 if (unlikely(!pte_present(pte) ||
248 (write && (!pte_write(pte) || !pte_dirty(pte)))))
249 return 1;
250
251 *paddr = pte_pfn(pte) << PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600252#ifdef CONFIG_HUGETLB_PAGE
Jack Steiner14258642008-07-29 22:33:57 -0700253 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600254#else
255 *pageshift = PAGE_SHIFT;
256#endif
Jack Steiner14258642008-07-29 22:33:57 -0700257 return 0;
258
259err:
260 local_irq_enable();
261 return 1;
262}
263
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700264static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
265 int write, int atomic, unsigned long *gpa, int *pageshift)
266{
267 struct mm_struct *mm = gts->ts_mm;
268 struct vm_area_struct *vma;
269 unsigned long paddr;
270 int ret, ps;
271
272 vma = find_vma(mm, vaddr);
273 if (!vma)
274 goto inval;
275
276 /*
277 * Atomic lookup is faster & usually works even if called in non-atomic
278 * context.
279 */
280 rmb(); /* Must/check ms_range_active before loading PTEs */
281 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
282 if (ret) {
283 if (atomic)
284 goto upm;
285 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
286 goto inval;
287 }
288 if (is_gru_paddr(paddr))
289 goto inval;
290 paddr = paddr & ~((1UL << ps) - 1);
291 *gpa = uv_soc_phys_ram_to_gpa(paddr);
292 *pageshift = ps;
293 return 0;
294
295inval:
296 return -1;
297upm:
298 return -2;
299}
300
301
Jack Steiner14258642008-07-29 22:33:57 -0700302/*
303 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
304 * Input:
305 * cb Address of user CBR. Null if not running in user context
306 * Return:
307 * 0 = dropin, exception, or switch to UPM successful
308 * 1 = range invalidate active
309 * < 0 = error code
310 *
311 */
312static int gru_try_dropin(struct gru_thread_state *gts,
313 struct gru_tlb_fault_handle *tfh,
Jack Steinerb61fc692009-12-15 16:48:03 -0800314 struct gru_instruction_bits *cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700315{
Jack Steinerb61fc692009-12-15 16:48:03 -0800316 int pageshift = 0, asid, write, ret, atomic = !cbk;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700317 unsigned long gpa = 0, vaddr = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700318
319 /*
320 * NOTE: The GRU contains magic hardware that eliminates races between
321 * TLB invalidates and TLB dropins. If an invalidate occurs
322 * in the window between reading the TFH and the subsequent TLB dropin,
323 * the dropin is ignored. This eliminates the need for additional locks.
324 */
325
326 /*
327 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
328 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
329 * is a transient state.
330 */
Jack Steiner270952a2009-06-17 16:28:27 -0700331 if (tfh->status != TFHSTATUS_EXCEPTION) {
332 gru_flush_cache(tfh);
333 if (tfh->status != TFHSTATUS_EXCEPTION)
334 goto failnoexception;
335 STAT(tfh_stale_on_fault);
336 }
Jack Steiner14258642008-07-29 22:33:57 -0700337 if (tfh->state == TFHSTATE_IDLE)
338 goto failidle;
Jack Steinerb61fc692009-12-15 16:48:03 -0800339 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700340 goto failfmm;
341
342 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
343 vaddr = tfh->missvaddr;
344 asid = tfh->missasid;
345 if (asid == 0)
346 goto failnoasid;
347
348 rmb(); /* TFH must be cache resident before reading ms_range_active */
349
350 /*
351 * TFH is cache resident - at least briefly. Fail the dropin
352 * if a range invalidate is active.
353 */
354 if (atomic_read(&gts->ts_gms->ms_range_active))
355 goto failactive;
356
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
358 if (ret == -1)
Jack Steiner14258642008-07-29 22:33:57 -0700359 goto failinval;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700360 if (ret == -2)
361 goto failupm;
Jack Steiner14258642008-07-29 22:33:57 -0700362
Jack Steiner7b8274e2009-04-02 16:59:12 -0700363 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
364 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
365 if (atomic || !gru_update_cch(gts, 0)) {
366 gts->ts_force_cch_reload = 1;
367 goto failupm;
368 }
369 }
Jack Steinerb61fc692009-12-15 16:48:03 -0800370 gru_cb_set_istatus_active(cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700371 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
372 GRU_PAGESIZE(pageshift));
373 STAT(tlb_dropin);
374 gru_dbg(grudev,
375 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
376 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
377 pageshift, gpa);
378 return 0;
379
380failnoasid:
381 /* No asid (delayed unload). */
382 STAT(tlb_dropin_fail_no_asid);
383 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
Jack Steinerb61fc692009-12-15 16:48:03 -0800384 if (!cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700385 tfh_user_polling_mode(tfh);
386 else
387 gru_flush_cache(tfh);
388 return -EAGAIN;
389
390failupm:
391 /* Atomic failure switch CBR to UPM */
392 tfh_user_polling_mode(tfh);
393 STAT(tlb_dropin_fail_upm);
394 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
395 return 1;
396
397failfmm:
398 /* FMM state on UPM call */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700399 gru_flush_cache(tfh);
Jack Steiner14258642008-07-29 22:33:57 -0700400 STAT(tlb_dropin_fail_fmm);
401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
402 return 0;
403
Jack Steinercd1334f2009-06-17 16:28:19 -0700404failnoexception:
405 /* TFH status did not show exception pending */
406 gru_flush_cache(tfh);
Jack Steinerb61fc692009-12-15 16:48:03 -0800407 if (cbk)
408 gru_flush_cache(cbk);
Jack Steinercd1334f2009-06-17 16:28:19 -0700409 STAT(tlb_dropin_fail_no_exception);
Jack Steinerb61fc692009-12-15 16:48:03 -0800410 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
411 tfh, tfh->status, tfh->state);
Jack Steinercd1334f2009-06-17 16:28:19 -0700412 return 0;
413
Jack Steiner14258642008-07-29 22:33:57 -0700414failidle:
Jack Steinercd1334f2009-06-17 16:28:19 -0700415 /* TFH state was idle - no miss pending */
Jack Steiner14258642008-07-29 22:33:57 -0700416 gru_flush_cache(tfh);
Jack Steinerb61fc692009-12-15 16:48:03 -0800417 if (cbk)
418 gru_flush_cache(cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700419 STAT(tlb_dropin_fail_idle);
420 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
421 return 0;
422
423failinval:
424 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
425 tfh_exception(tfh);
426 STAT(tlb_dropin_fail_invalid);
427 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
428 return -EFAULT;
429
430failactive:
431 /* Range invalidate active. Switch to UPM iff atomic */
Jack Steinerb61fc692009-12-15 16:48:03 -0800432 if (!cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700433 tfh_user_polling_mode(tfh);
434 else
435 gru_flush_cache(tfh);
436 STAT(tlb_dropin_fail_range_active);
437 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
438 tfh, vaddr);
439 return 1;
440}
441
442/*
443 * Process an external interrupt from the GRU. This interrupt is
444 * caused by a TLB miss.
445 * Note that this is the interrupt handler that is registered with linux
446 * interrupt handlers.
447 */
448irqreturn_t gru_intr(int irq, void *dev_id)
449{
450 struct gru_state *gru;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700451 struct gru_tlb_fault_map imap, dmap;
Jack Steiner14258642008-07-29 22:33:57 -0700452 struct gru_thread_state *gts;
453 struct gru_tlb_fault_handle *tfh = NULL;
454 int cbrnum, ctxnum;
455
456 STAT(intr);
457
458 gru = irq_to_gru(irq);
459 if (!gru) {
460 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
461 raw_smp_processor_id(), irq);
462 return IRQ_NONE;
463 }
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700464 get_clear_fault_map(gru, &imap, &dmap);
Jack Steiner14258642008-07-29 22:33:57 -0700465
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700466 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
467 complete(gru->gs_blade->bs_async_wq);
468 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
469 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
470 }
471
472 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
Jack Steiner14258642008-07-29 22:33:57 -0700473 tfh = get_tfh_by_index(gru, cbrnum);
474 prefetchw(tfh); /* Helps on hdw, required for emulator */
475
476 /*
477 * When hardware sets a bit in the faultmap, it implicitly
478 * locks the GRU context so that it cannot be unloaded.
479 * The gts cannot change until a TFH start/writestart command
480 * is issued.
481 */
482 ctxnum = tfh->ctxnum;
483 gts = gru->gs_gts[ctxnum];
484
485 /*
486 * This is running in interrupt context. Trylock the mmap_sem.
487 * If it fails, retry the fault in user context.
488 */
Jack Steinercd1334f2009-06-17 16:28:19 -0700489 if (!gts->ts_force_cch_reload &&
490 down_read_trylock(&gts->ts_mm->mmap_sem)) {
Jack Steiner7e796a72009-06-17 16:28:30 -0700491 gts->ustats.fmm_tlbdropin++;
Jack Steiner14258642008-07-29 22:33:57 -0700492 gru_try_dropin(gts, tfh, NULL);
493 up_read(&gts->ts_mm->mmap_sem);
494 } else {
495 tfh_user_polling_mode(tfh);
Jack Steiner43884602009-04-02 16:59:05 -0700496 STAT(intr_mm_lock_failed);
Jack Steiner14258642008-07-29 22:33:57 -0700497 }
498 }
499 return IRQ_HANDLED;
500}
501
502
503static int gru_user_dropin(struct gru_thread_state *gts,
504 struct gru_tlb_fault_handle *tfh,
Jack Steinerb61fc692009-12-15 16:48:03 -0800505 void *cb)
Jack Steiner14258642008-07-29 22:33:57 -0700506{
507 struct gru_mm_struct *gms = gts->ts_gms;
508 int ret;
509
Jack Steiner7e796a72009-06-17 16:28:30 -0700510 gts->ustats.upm_tlbdropin++;
Jack Steiner14258642008-07-29 22:33:57 -0700511 while (1) {
512 wait_event(gms->ms_wait_queue,
513 atomic_read(&gms->ms_range_active) == 0);
514 prefetchw(tfh); /* Helps on hdw, required for emulator */
515 ret = gru_try_dropin(gts, tfh, cb);
516 if (ret <= 0)
517 return ret;
518 STAT(call_os_wait_queue);
519 }
520}
521
522/*
523 * This interface is called as a result of a user detecting a "call OS" bit
524 * in a user CB. Normally means that a TLB fault has occurred.
525 * cb - user virtual address of the CB
526 */
527int gru_handle_user_call_os(unsigned long cb)
528{
529 struct gru_tlb_fault_handle *tfh;
530 struct gru_thread_state *gts;
Jack Steinerb61fc692009-12-15 16:48:03 -0800531 void *cbk;
Jack Steiner14258642008-07-29 22:33:57 -0700532 int ucbnum, cbrnum, ret = -EINVAL;
533
534 STAT(call_os);
535 gru_dbg(grudev, "address 0x%lx\n", cb);
536
537 /* sanity check the cb pointer */
538 ucbnum = get_cb_number((void *)cb);
539 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
540 return -EINVAL;
Jack Steiner14258642008-07-29 22:33:57 -0700541
542 gts = gru_find_lock_gts(cb);
543 if (!gts)
544 return -EINVAL;
545
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700546 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
Jack Steiner14258642008-07-29 22:33:57 -0700547 goto exit;
Jack Steiner14258642008-07-29 22:33:57 -0700548
549 /*
550 * If force_unload is set, the UPM TLB fault is phony. The task
551 * has migrated to another node and the GSEG must be moved. Just
552 * unload the context. The task will page fault and assign a new
553 * context.
554 */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700555 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
Jack Steiner43884602009-04-02 16:59:05 -0700556 gts->ts_blade != uv_numa_blade_id()) {
557 STAT(call_os_offnode_reference);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700558 gts->ts_force_unload = 1;
Jack Steiner43884602009-04-02 16:59:05 -0700559 }
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700560
Jack Steiner7b8274e2009-04-02 16:59:12 -0700561 /*
562 * CCH may contain stale data if ts_force_cch_reload is set.
563 */
564 if (gts->ts_gru && gts->ts_force_cch_reload) {
Jack Steiner7b8274e2009-04-02 16:59:12 -0700565 gts->ts_force_cch_reload = 0;
Jack Steinerd57c82b2009-06-17 16:28:20 -0700566 gru_update_cch(gts, 0);
Jack Steiner7b8274e2009-04-02 16:59:12 -0700567 }
568
Jack Steiner14258642008-07-29 22:33:57 -0700569 ret = -EAGAIN;
570 cbrnum = thread_cbr_number(gts, ucbnum);
571 if (gts->ts_force_unload) {
572 gru_unload_context(gts, 1);
573 } else if (gts->ts_gru) {
574 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
Jack Steinerb61fc692009-12-15 16:48:03 -0800575 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
576 gts->ts_ctxnum, ucbnum);
577 ret = gru_user_dropin(gts, tfh, cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700578 }
579exit:
580 gru_unlock_gts(gts);
581 return ret;
582}
583
584/*
585 * Fetch the exception detail information for a CB that terminated with
586 * an exception.
587 */
588int gru_get_exception_detail(unsigned long arg)
589{
590 struct control_block_extended_exc_detail excdet;
591 struct gru_control_block_extended *cbe;
592 struct gru_thread_state *gts;
593 int ucbnum, cbrnum, ret;
594
595 STAT(user_exception);
596 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
597 return -EFAULT;
598
599 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
600 gts = gru_find_lock_gts(excdet.cb);
601 if (!gts)
602 return -EINVAL;
603
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700604 ucbnum = get_cb_number((void *)excdet.cb);
605 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
606 ret = -EINVAL;
607 } else if (gts->ts_gru) {
Jack Steiner14258642008-07-29 22:33:57 -0700608 cbrnum = thread_cbr_number(gts, ucbnum);
609 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700610 gru_flush_cache(cbe); /* CBE not coherent */
Jack Steiner14258642008-07-29 22:33:57 -0700611 excdet.opc = cbe->opccpy;
612 excdet.exopc = cbe->exopccpy;
613 excdet.ecause = cbe->ecause;
614 excdet.exceptdet0 = cbe->idef1upd;
615 excdet.exceptdet1 = cbe->idef3upd;
Jack Steinercd1334f2009-06-17 16:28:19 -0700616 excdet.cbrstate = cbe->cbrstate;
617 excdet.cbrexecstatus = cbe->cbrexecstatus;
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700618 gru_flush_cache(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700619 ret = 0;
620 } else {
621 ret = -EAGAIN;
622 }
623 gru_unlock_gts(gts);
624
Jack Steinercd1334f2009-06-17 16:28:19 -0700625 gru_dbg(grudev,
626 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
627 "exdet0 0x%lx, exdet1 0x%x\n",
628 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
629 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
Jack Steiner14258642008-07-29 22:33:57 -0700630 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
631 ret = -EFAULT;
632 return ret;
633}
634
635/*
636 * User request to unload a context. Content is saved for possible reload.
637 */
Jack Steinerbb04aa72009-04-02 16:59:07 -0700638static int gru_unload_all_contexts(void)
639{
640 struct gru_thread_state *gts;
641 struct gru_state *gru;
Jack Steinere1c32192009-04-02 16:59:10 -0700642 int gid, ctxnum;
Jack Steinerbb04aa72009-04-02 16:59:07 -0700643
644 if (!capable(CAP_SYS_ADMIN))
645 return -EPERM;
Jack Steinere1c32192009-04-02 16:59:10 -0700646 foreach_gid(gid) {
Jack Steinerbb04aa72009-04-02 16:59:07 -0700647 gru = GID_TO_GRU(gid);
648 spin_lock(&gru->gs_lock);
649 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
650 gts = gru->gs_gts[ctxnum];
651 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
652 spin_unlock(&gru->gs_lock);
653 gru_unload_context(gts, 1);
Jack Steinerd57c82b2009-06-17 16:28:20 -0700654 mutex_unlock(&gts->ts_ctxlock);
Jack Steinerbb04aa72009-04-02 16:59:07 -0700655 spin_lock(&gru->gs_lock);
656 }
657 }
658 spin_unlock(&gru->gs_lock);
659 }
660 return 0;
661}
662
Jack Steiner14258642008-07-29 22:33:57 -0700663int gru_user_unload_context(unsigned long arg)
664{
665 struct gru_thread_state *gts;
666 struct gru_unload_context_req req;
667
668 STAT(user_unload_context);
669 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
670 return -EFAULT;
671
672 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
673
Jack Steinerbb04aa72009-04-02 16:59:07 -0700674 if (!req.gseg)
675 return gru_unload_all_contexts();
676
Jack Steiner14258642008-07-29 22:33:57 -0700677 gts = gru_find_lock_gts(req.gseg);
678 if (!gts)
679 return -EINVAL;
680
681 if (gts->ts_gru)
682 gru_unload_context(gts, 1);
683 gru_unlock_gts(gts);
684
685 return 0;
686}
687
688/*
689 * User request to flush a range of virtual addresses from the GRU TLB
690 * (Mainly for testing).
691 */
692int gru_user_flush_tlb(unsigned long arg)
693{
694 struct gru_thread_state *gts;
695 struct gru_flush_tlb_req req;
Jack Steiner1926ee82009-06-17 16:28:33 -0700696 struct gru_mm_struct *gms;
Jack Steiner14258642008-07-29 22:33:57 -0700697
698 STAT(user_flush_tlb);
699 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
700 return -EFAULT;
701
702 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
703 req.vaddr, req.len);
704
705 gts = gru_find_lock_gts(req.gseg);
706 if (!gts)
707 return -EINVAL;
708
Jack Steiner1926ee82009-06-17 16:28:33 -0700709 gms = gts->ts_gms;
Jack Steiner14258642008-07-29 22:33:57 -0700710 gru_unlock_gts(gts);
Jack Steiner1926ee82009-06-17 16:28:33 -0700711 gru_flush_tlb_range(gms, req.vaddr, req.len);
Jack Steiner14258642008-07-29 22:33:57 -0700712
713 return 0;
714}
715
716/*
Jack Steiner7e796a72009-06-17 16:28:30 -0700717 * Fetch GSEG statisticss
718 */
719long gru_get_gseg_statistics(unsigned long arg)
720{
721 struct gru_thread_state *gts;
722 struct gru_get_gseg_statistics_req req;
723
724 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
725 return -EFAULT;
726
Jack Steiner091f1a12009-12-15 16:48:02 -0800727 /*
728 * The library creates arrays of contexts for threaded programs.
729 * If no gts exists in the array, the context has never been used & all
730 * statistics are implicitly 0.
731 */
Jack Steiner7e796a72009-06-17 16:28:30 -0700732 gts = gru_find_lock_gts(req.gseg);
733 if (gts) {
734 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
735 gru_unlock_gts(gts);
736 } else {
737 memset(&req.stats, 0, sizeof(gts->ustats));
738 }
739
740 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
741 return -EFAULT;
742
743 return 0;
744}
745
746/*
Jack Steiner14258642008-07-29 22:33:57 -0700747 * Register the current task as the user of the GSEG slice.
748 * Needed for TLB fault interrupt targeting.
749 */
Jack Steiner92b39382009-06-17 16:28:32 -0700750int gru_set_context_option(unsigned long arg)
Jack Steiner14258642008-07-29 22:33:57 -0700751{
752 struct gru_thread_state *gts;
Jack Steiner92b39382009-06-17 16:28:32 -0700753 struct gru_set_context_option_req req;
754 int ret = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700755
Jack Steiner92b39382009-06-17 16:28:32 -0700756 STAT(set_context_option);
757 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
758 return -EFAULT;
759 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
760
761 gts = gru_alloc_locked_gts(req.gseg);
Jack Steiner14258642008-07-29 22:33:57 -0700762 if (!gts)
763 return -EINVAL;
764
Jack Steiner92b39382009-06-17 16:28:32 -0700765 switch (req.op) {
Jack Steiner518e5cd2009-12-15 16:48:04 -0800766 case sco_blade_chiplet:
767 /* Select blade/chiplet for GRU context */
768 if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
769 req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
770 ret = -EINVAL;
771 } else {
772 gts->ts_user_blade_id = req.val1;
773 gts->ts_user_chiplet_id = req.val0;
774 }
775 break;
Jack Steiner92b39382009-06-17 16:28:32 -0700776 case sco_gseg_owner:
777 /* Register the current task as the GSEG owner */
778 gts->ts_tgid_owner = current->tgid;
779 break;
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700780 case sco_cch_req_slice:
781 /* Set the CCH slice option */
782 gts->ts_cch_req_slice = req.val1 & 3;
783 break;
Jack Steiner92b39382009-06-17 16:28:32 -0700784 default:
785 ret = -EINVAL;
786 }
Jack Steiner14258642008-07-29 22:33:57 -0700787 gru_unlock_gts(gts);
788
Jack Steiner92b39382009-06-17 16:28:32 -0700789 return ret;
Jack Steiner14258642008-07-29 22:33:57 -0700790}