blob: a1b3a1d66af5969a06b05430a4571ad75225fceb [file] [log] [blame]
Jack Steiner14258642008-07-29 22:33:57 -07001/*
2 * SN Platform GRU Driver
3 *
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
5 *
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
Jack Steinerbb04aa72009-04-02 16:59:07 -070035#include <linux/security.h>
Jack Steiner14258642008-07-29 22:33:57 -070036#include <asm/pgtable.h>
37#include "gru.h"
38#include "grutables.h"
39#include "grulib.h"
40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h>
42
43/*
44 * Test if a physical address is a valid GRU GSEG address
45 */
46static inline int is_gru_paddr(unsigned long paddr)
47{
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
49}
50
51/*
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
53 */
54struct vm_area_struct *gru_find_vma(unsigned long vaddr)
55{
56 struct vm_area_struct *vma;
57
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
60 return vma;
61 return NULL;
62}
63
64/*
65 * Find and lock the gts that contains the specified user vaddr.
66 *
67 * Returns:
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
70 */
71
72static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
73{
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
77
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
80 if (vma)
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
82 if (gts)
83 mutex_lock(&gts->ts_ctxlock);
84 else
85 up_read(&mm->mmap_sem);
86 return gts;
87}
88
89static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
Jack Steinere0060432009-12-15 16:48:10 -080093 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
Jack Steiner14258642008-07-29 22:33:57 -070094
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
Jack Steinere0060432009-12-15 16:48:10 -080097 if (!vma)
98 goto err;
Jack Steiner14258642008-07-29 22:33:57 -070099
Jack Steinere0060432009-12-15 16:48:10 -0800100 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
101 if (IS_ERR(gts))
102 goto err;
103 mutex_lock(&gts->ts_ctxlock);
104 downgrade_write(&mm->mmap_sem);
105 return gts;
106
107err:
108 up_write(&mm->mmap_sem);
Jack Steiner14258642008-07-29 22:33:57 -0700109 return gts;
110}
111
112/*
113 * Unlock a GTS that was previously locked with gru_find_lock_gts().
114 */
115static void gru_unlock_gts(struct gru_thread_state *gts)
116{
117 mutex_unlock(&gts->ts_ctxlock);
118 up_read(&current->mm->mmap_sem);
119}
120
121/*
122 * Set a CB.istatus to active using a user virtual address. This must be done
123 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
124 * If the line is evicted, the status may be lost. The in-cache update
125 * is necessary to prevent the user from seeing a stale cb.istatus that will
126 * change as soon as the TFH restart is complete. Races may cause an
127 * occasional failure to clear the cb.istatus, but that is ok.
Jack Steiner14258642008-07-29 22:33:57 -0700128 */
Jack Steinerb61fc692009-12-15 16:48:03 -0800129static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700130{
Jack Steinerb61fc692009-12-15 16:48:03 -0800131 if (cbk) {
132 cbk->istatus = CBS_ACTIVE;
Jack Steiner14258642008-07-29 22:33:57 -0700133 }
134}
135
136/*
Jack Steiner14258642008-07-29 22:33:57 -0700137 * Read & clear a TFM
138 *
139 * The GRU has an array of fault maps. A map is private to a cpu
140 * Only one cpu will be accessing a cpu's fault map.
141 *
142 * This function scans the cpu-private fault map & clears all bits that
143 * are set. The function returns a bitmap that indicates the bits that
144 * were cleared. Note that sense the maps may be updated asynchronously by
145 * the GRU, atomic operations must be used to clear bits.
146 */
147static void get_clear_fault_map(struct gru_state *gru,
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700148 struct gru_tlb_fault_map *imap,
149 struct gru_tlb_fault_map *dmap)
Jack Steiner14258642008-07-29 22:33:57 -0700150{
151 unsigned long i, k;
152 struct gru_tlb_fault_map *tfm;
153
154 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
155 prefetchw(tfm); /* Helps on hardware, required for emulator */
156 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
157 k = tfm->fault_bits[i];
158 if (k)
159 k = xchg(&tfm->fault_bits[i], 0UL);
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700160 imap->fault_bits[i] = k;
161 k = tfm->done_bits[i];
162 if (k)
163 k = xchg(&tfm->done_bits[i], 0UL);
164 dmap->fault_bits[i] = k;
Jack Steiner14258642008-07-29 22:33:57 -0700165 }
166
167 /*
168 * Not functionally required but helps performance. (Required
169 * on emulator)
170 */
171 gru_flush_cache(tfm);
172}
173
174/*
175 * Atomic (interrupt context) & non-atomic (user context) functions to
176 * convert a vaddr into a physical address. The size of the page
177 * is returned in pageshift.
178 * returns:
179 * 0 - successful
180 * < 0 - error code
181 * 1 - (atomic only) try again in non-atomic context
182 */
183static int non_atomic_pte_lookup(struct vm_area_struct *vma,
184 unsigned long vaddr, int write,
185 unsigned long *paddr, int *pageshift)
186{
187 struct page *page;
188
189 /* ZZZ Need to handle HUGE pages */
190 if (is_vm_hugetlb_page(vma))
191 return -EFAULT;
192 *pageshift = PAGE_SHIFT;
193 if (get_user_pages
194 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
195 return -EFAULT;
196 *paddr = page_to_phys(page);
197 put_page(page);
198 return 0;
199}
200
201/*
Jack Steiner14258642008-07-29 22:33:57 -0700202 * atomic_pte_lookup
203 *
204 * Convert a user virtual address to a physical address
205 * Only supports Intel large pages (2MB only) on x86_64.
206 * ZZZ - hugepage support is incomplete
Jack Steiner923f7f62008-10-15 22:05:13 -0700207 *
208 * NOTE: mmap_sem is already held on entry to this function. This
209 * guarantees existence of the page tables.
Jack Steiner14258642008-07-29 22:33:57 -0700210 */
211static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
212 int write, unsigned long *paddr, int *pageshift)
213{
214 pgd_t *pgdp;
215 pmd_t *pmdp;
216 pud_t *pudp;
217 pte_t pte;
218
Jack Steiner14258642008-07-29 22:33:57 -0700219 pgdp = pgd_offset(vma->vm_mm, vaddr);
220 if (unlikely(pgd_none(*pgdp)))
221 goto err;
222
223 pudp = pud_offset(pgdp, vaddr);
224 if (unlikely(pud_none(*pudp)))
225 goto err;
226
227 pmdp = pmd_offset(pudp, vaddr);
228 if (unlikely(pmd_none(*pmdp)))
229 goto err;
230#ifdef CONFIG_X86_64
231 if (unlikely(pmd_large(*pmdp)))
232 pte = *(pte_t *) pmdp;
233 else
234#endif
235 pte = *pte_offset_kernel(pmdp, vaddr);
236
Jack Steiner14258642008-07-29 22:33:57 -0700237 if (unlikely(!pte_present(pte) ||
238 (write && (!pte_write(pte) || !pte_dirty(pte)))))
239 return 1;
240
241 *paddr = pte_pfn(pte) << PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600242#ifdef CONFIG_HUGETLB_PAGE
Jack Steiner14258642008-07-29 22:33:57 -0700243 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
Jack Steiner023a4072008-12-09 10:51:32 -0600244#else
245 *pageshift = PAGE_SHIFT;
246#endif
Jack Steiner14258642008-07-29 22:33:57 -0700247 return 0;
248
249err:
250 local_irq_enable();
251 return 1;
252}
253
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700254static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
255 int write, int atomic, unsigned long *gpa, int *pageshift)
256{
257 struct mm_struct *mm = gts->ts_mm;
258 struct vm_area_struct *vma;
259 unsigned long paddr;
260 int ret, ps;
261
262 vma = find_vma(mm, vaddr);
263 if (!vma)
264 goto inval;
265
266 /*
267 * Atomic lookup is faster & usually works even if called in non-atomic
268 * context.
269 */
270 rmb(); /* Must/check ms_range_active before loading PTEs */
271 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
272 if (ret) {
273 if (atomic)
274 goto upm;
275 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
276 goto inval;
277 }
278 if (is_gru_paddr(paddr))
279 goto inval;
280 paddr = paddr & ~((1UL << ps) - 1);
281 *gpa = uv_soc_phys_ram_to_gpa(paddr);
282 *pageshift = ps;
283 return 0;
284
285inval:
286 return -1;
287upm:
288 return -2;
289}
290
291
Jack Steiner14258642008-07-29 22:33:57 -0700292/*
Jack Steinerc5502222009-12-15 16:48:13 -0800293 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
294 * CBE cacheline so that the line will be written back to home agent.
295 * Otherwise the line may be silently dropped. This has no impact
296 * except on performance.
297 */
298static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
299{
300 if (unlikely(cbe)) {
301 cbe->cbrexecstatus = 0; /* make CL dirty */
302 gru_flush_cache(cbe);
303 }
304}
305
306/*
307 * Preload the TLB with entries that may be required. Currently, preloading
308 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
309 * the end of the bcopy tranfer, whichever is smaller.
310 */
311static void gru_preload_tlb(struct gru_state *gru,
312 struct gru_thread_state *gts, int atomic,
313 unsigned long fault_vaddr, int asid, int write,
314 unsigned char tlb_preload_count,
315 struct gru_tlb_fault_handle *tfh,
316 struct gru_control_block_extended *cbe)
317{
318 unsigned long vaddr = 0, gpa;
319 int ret, pageshift;
320
321 if (cbe->opccpy != OP_BCOPY)
322 return;
323
324 if (fault_vaddr == cbe->cbe_baddr0)
325 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
326 else if (fault_vaddr == cbe->cbe_baddr1)
327 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
328
329 fault_vaddr &= PAGE_MASK;
330 vaddr &= PAGE_MASK;
331 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
332
333 while (vaddr > fault_vaddr) {
334 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
335 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
336 GRU_PAGESIZE(pageshift)))
337 return;
338 gru_dbg(grudev,
339 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
340 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
341 vaddr, asid, write, pageshift, gpa);
342 vaddr -= PAGE_SIZE;
343 STAT(tlb_preload_page);
344 }
345}
346
347/*
Jack Steiner14258642008-07-29 22:33:57 -0700348 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
349 * Input:
350 * cb Address of user CBR. Null if not running in user context
351 * Return:
352 * 0 = dropin, exception, or switch to UPM successful
353 * 1 = range invalidate active
354 * < 0 = error code
355 *
356 */
357static int gru_try_dropin(struct gru_thread_state *gts,
358 struct gru_tlb_fault_handle *tfh,
Jack Steinerb61fc692009-12-15 16:48:03 -0800359 struct gru_instruction_bits *cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700360{
Jack Steinerc5502222009-12-15 16:48:13 -0800361 struct gru_control_block_extended *cbe = NULL;
362 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
Jack Steiner563447d2009-12-15 16:48:12 -0800363 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700364 unsigned long gpa = 0, vaddr = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700365
366 /*
367 * NOTE: The GRU contains magic hardware that eliminates races between
368 * TLB invalidates and TLB dropins. If an invalidate occurs
369 * in the window between reading the TFH and the subsequent TLB dropin,
370 * the dropin is ignored. This eliminates the need for additional locks.
371 */
372
373 /*
Jack Steinerc5502222009-12-15 16:48:13 -0800374 * Prefetch the CBE if doing TLB preloading
375 */
376 if (unlikely(tlb_preload_count)) {
377 cbe = gru_tfh_to_cbe(tfh);
378 prefetchw(cbe);
379 }
380
381 /*
Jack Steiner14258642008-07-29 22:33:57 -0700382 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
383 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
384 * is a transient state.
385 */
Jack Steiner270952a2009-06-17 16:28:27 -0700386 if (tfh->status != TFHSTATUS_EXCEPTION) {
387 gru_flush_cache(tfh);
Jack Steiner67bf04a2009-12-15 16:48:11 -0800388 sync_core();
Jack Steiner270952a2009-06-17 16:28:27 -0700389 if (tfh->status != TFHSTATUS_EXCEPTION)
390 goto failnoexception;
391 STAT(tfh_stale_on_fault);
392 }
Jack Steiner14258642008-07-29 22:33:57 -0700393 if (tfh->state == TFHSTATE_IDLE)
394 goto failidle;
Jack Steinerb61fc692009-12-15 16:48:03 -0800395 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700396 goto failfmm;
397
398 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
399 vaddr = tfh->missvaddr;
400 asid = tfh->missasid;
Jack Steiner563447d2009-12-15 16:48:12 -0800401 indexway = tfh->indexway;
Jack Steiner14258642008-07-29 22:33:57 -0700402 if (asid == 0)
403 goto failnoasid;
404
405 rmb(); /* TFH must be cache resident before reading ms_range_active */
406
407 /*
408 * TFH is cache resident - at least briefly. Fail the dropin
409 * if a range invalidate is active.
410 */
411 if (atomic_read(&gts->ts_gms->ms_range_active))
412 goto failactive;
413
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700414 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
415 if (ret == -1)
Jack Steiner14258642008-07-29 22:33:57 -0700416 goto failinval;
Jack Steinerecdaf2b2009-04-02 16:59:09 -0700417 if (ret == -2)
418 goto failupm;
Jack Steiner14258642008-07-29 22:33:57 -0700419
Jack Steiner7b8274e2009-04-02 16:59:12 -0700420 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
421 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
Jack Steiner99f7c222009-12-15 16:48:06 -0800422 if (atomic || !gru_update_cch(gts)) {
Jack Steiner7b8274e2009-04-02 16:59:12 -0700423 gts->ts_force_cch_reload = 1;
424 goto failupm;
425 }
426 }
Jack Steinerc5502222009-12-15 16:48:13 -0800427
428 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
429 gru_preload_tlb(gts->ts_gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
430 gru_flush_cache_cbe(cbe);
431 }
432
Jack Steinerb61fc692009-12-15 16:48:03 -0800433 gru_cb_set_istatus_active(cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700434 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
435 GRU_PAGESIZE(pageshift));
Jack Steiner14258642008-07-29 22:33:57 -0700436 gru_dbg(grudev,
Jack Steiner563447d2009-12-15 16:48:12 -0800437 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
438 " rw %d, ps %d, gpa 0x%lx\n",
439 atomic ? "atomic" : "non-atomic", gts->ts_gru->gs_gid, gts, tfh, vaddr, asid,
440 indexway, write, pageshift, gpa);
441 STAT(tlb_dropin);
Jack Steiner14258642008-07-29 22:33:57 -0700442 return 0;
443
444failnoasid:
445 /* No asid (delayed unload). */
446 STAT(tlb_dropin_fail_no_asid);
447 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
Jack Steinerb61fc692009-12-15 16:48:03 -0800448 if (!cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700449 tfh_user_polling_mode(tfh);
450 else
451 gru_flush_cache(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800452 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700453 return -EAGAIN;
454
455failupm:
456 /* Atomic failure switch CBR to UPM */
457 tfh_user_polling_mode(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800458 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700459 STAT(tlb_dropin_fail_upm);
460 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
461 return 1;
462
463failfmm:
464 /* FMM state on UPM call */
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700465 gru_flush_cache(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800466 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700467 STAT(tlb_dropin_fail_fmm);
468 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
469 return 0;
470
Jack Steinercd1334f2009-06-17 16:28:19 -0700471failnoexception:
472 /* TFH status did not show exception pending */
473 gru_flush_cache(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800474 gru_flush_cache_cbe(cbe);
Jack Steinerb61fc692009-12-15 16:48:03 -0800475 if (cbk)
476 gru_flush_cache(cbk);
Jack Steinercd1334f2009-06-17 16:28:19 -0700477 STAT(tlb_dropin_fail_no_exception);
Jack Steinerb61fc692009-12-15 16:48:03 -0800478 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
479 tfh, tfh->status, tfh->state);
Jack Steinercd1334f2009-06-17 16:28:19 -0700480 return 0;
481
Jack Steiner14258642008-07-29 22:33:57 -0700482failidle:
Jack Steinercd1334f2009-06-17 16:28:19 -0700483 /* TFH state was idle - no miss pending */
Jack Steiner14258642008-07-29 22:33:57 -0700484 gru_flush_cache(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800485 gru_flush_cache_cbe(cbe);
Jack Steinerb61fc692009-12-15 16:48:03 -0800486 if (cbk)
487 gru_flush_cache(cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700488 STAT(tlb_dropin_fail_idle);
489 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
490 return 0;
491
492failinval:
493 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
494 tfh_exception(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800495 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700496 STAT(tlb_dropin_fail_invalid);
497 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
498 return -EFAULT;
499
500failactive:
501 /* Range invalidate active. Switch to UPM iff atomic */
Jack Steinerb61fc692009-12-15 16:48:03 -0800502 if (!cbk)
Jack Steiner14258642008-07-29 22:33:57 -0700503 tfh_user_polling_mode(tfh);
504 else
505 gru_flush_cache(tfh);
Jack Steinerc5502222009-12-15 16:48:13 -0800506 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700507 STAT(tlb_dropin_fail_range_active);
508 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
509 tfh, vaddr);
510 return 1;
511}
512
513/*
514 * Process an external interrupt from the GRU. This interrupt is
515 * caused by a TLB miss.
516 * Note that this is the interrupt handler that is registered with linux
517 * interrupt handlers.
518 */
Jack Steiner4107e1d2009-12-15 16:48:11 -0800519static irqreturn_t gru_intr(int chiplet, int blade)
Jack Steiner14258642008-07-29 22:33:57 -0700520{
521 struct gru_state *gru;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700522 struct gru_tlb_fault_map imap, dmap;
Jack Steiner14258642008-07-29 22:33:57 -0700523 struct gru_thread_state *gts;
524 struct gru_tlb_fault_handle *tfh = NULL;
525 int cbrnum, ctxnum;
526
527 STAT(intr);
528
Jack Steiner4107e1d2009-12-15 16:48:11 -0800529 gru = &gru_base[blade]->bs_grus[chiplet];
Jack Steiner14258642008-07-29 22:33:57 -0700530 if (!gru) {
Jack Steiner4107e1d2009-12-15 16:48:11 -0800531 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
532 raw_smp_processor_id(), chiplet);
Jack Steiner14258642008-07-29 22:33:57 -0700533 return IRQ_NONE;
534 }
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700535 get_clear_fault_map(gru, &imap, &dmap);
Jack Steiner4107e1d2009-12-15 16:48:11 -0800536 gru_dbg(grudev,
537 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
538 smp_processor_id(), chiplet, gru->gs_gid,
539 imap.fault_bits[0], imap.fault_bits[1],
540 dmap.fault_bits[0], dmap.fault_bits[1]);
Jack Steiner14258642008-07-29 22:33:57 -0700541
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700542 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
Jack Steiner563447d2009-12-15 16:48:12 -0800543 STAT(intr_cbr);
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700544 complete(gru->gs_blade->bs_async_wq);
545 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
546 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
547 }
548
549 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
Jack Steiner563447d2009-12-15 16:48:12 -0800550 STAT(intr_tfh);
Jack Steiner14258642008-07-29 22:33:57 -0700551 tfh = get_tfh_by_index(gru, cbrnum);
552 prefetchw(tfh); /* Helps on hdw, required for emulator */
553
554 /*
555 * When hardware sets a bit in the faultmap, it implicitly
556 * locks the GRU context so that it cannot be unloaded.
557 * The gts cannot change until a TFH start/writestart command
558 * is issued.
559 */
560 ctxnum = tfh->ctxnum;
561 gts = gru->gs_gts[ctxnum];
562
563 /*
564 * This is running in interrupt context. Trylock the mmap_sem.
565 * If it fails, retry the fault in user context.
566 */
Jack Steinercd1334f2009-06-17 16:28:19 -0700567 if (!gts->ts_force_cch_reload &&
568 down_read_trylock(&gts->ts_mm->mmap_sem)) {
Jack Steiner7e796a72009-06-17 16:28:30 -0700569 gts->ustats.fmm_tlbdropin++;
Jack Steiner14258642008-07-29 22:33:57 -0700570 gru_try_dropin(gts, tfh, NULL);
571 up_read(&gts->ts_mm->mmap_sem);
572 } else {
573 tfh_user_polling_mode(tfh);
Jack Steiner43884602009-04-02 16:59:05 -0700574 STAT(intr_mm_lock_failed);
Jack Steiner14258642008-07-29 22:33:57 -0700575 }
576 }
577 return IRQ_HANDLED;
578}
579
Jack Steiner4107e1d2009-12-15 16:48:11 -0800580irqreturn_t gru0_intr(int irq, void *dev_id)
581{
582 return gru_intr(0, uv_numa_blade_id());
583}
584
585irqreturn_t gru1_intr(int irq, void *dev_id)
586{
587 return gru_intr(1, uv_numa_blade_id());
588}
589
590irqreturn_t gru_intr_mblade(int irq, void *dev_id)
591{
592 int blade;
593
594 for_each_possible_blade(blade) {
595 if (uv_blade_nr_possible_cpus(blade))
596 continue;
597 gru_intr(0, blade);
598 gru_intr(1, blade);
599 }
600 return IRQ_HANDLED;
601}
602
Jack Steiner14258642008-07-29 22:33:57 -0700603
604static int gru_user_dropin(struct gru_thread_state *gts,
605 struct gru_tlb_fault_handle *tfh,
Jack Steinerb61fc692009-12-15 16:48:03 -0800606 void *cb)
Jack Steiner14258642008-07-29 22:33:57 -0700607{
608 struct gru_mm_struct *gms = gts->ts_gms;
609 int ret;
610
Jack Steiner7e796a72009-06-17 16:28:30 -0700611 gts->ustats.upm_tlbdropin++;
Jack Steiner14258642008-07-29 22:33:57 -0700612 while (1) {
613 wait_event(gms->ms_wait_queue,
614 atomic_read(&gms->ms_range_active) == 0);
615 prefetchw(tfh); /* Helps on hdw, required for emulator */
616 ret = gru_try_dropin(gts, tfh, cb);
617 if (ret <= 0)
618 return ret;
619 STAT(call_os_wait_queue);
620 }
621}
622
623/*
624 * This interface is called as a result of a user detecting a "call OS" bit
625 * in a user CB. Normally means that a TLB fault has occurred.
626 * cb - user virtual address of the CB
627 */
628int gru_handle_user_call_os(unsigned long cb)
629{
630 struct gru_tlb_fault_handle *tfh;
631 struct gru_thread_state *gts;
Jack Steinerb61fc692009-12-15 16:48:03 -0800632 void *cbk;
Jack Steiner14258642008-07-29 22:33:57 -0700633 int ucbnum, cbrnum, ret = -EINVAL;
634
635 STAT(call_os);
Jack Steiner14258642008-07-29 22:33:57 -0700636
637 /* sanity check the cb pointer */
638 ucbnum = get_cb_number((void *)cb);
639 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
640 return -EINVAL;
Jack Steiner14258642008-07-29 22:33:57 -0700641
642 gts = gru_find_lock_gts(cb);
643 if (!gts)
644 return -EINVAL;
Jack Steiner563447d2009-12-15 16:48:12 -0800645 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
Jack Steiner14258642008-07-29 22:33:57 -0700646
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700647 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
Jack Steiner14258642008-07-29 22:33:57 -0700648 goto exit;
Jack Steiner14258642008-07-29 22:33:57 -0700649
Jack Steiner55484c42009-12-15 16:48:05 -0800650 gru_check_context_placement(gts);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700651
Jack Steiner7b8274e2009-04-02 16:59:12 -0700652 /*
653 * CCH may contain stale data if ts_force_cch_reload is set.
654 */
655 if (gts->ts_gru && gts->ts_force_cch_reload) {
Jack Steiner7b8274e2009-04-02 16:59:12 -0700656 gts->ts_force_cch_reload = 0;
Jack Steiner99f7c222009-12-15 16:48:06 -0800657 gru_update_cch(gts);
Jack Steiner7b8274e2009-04-02 16:59:12 -0700658 }
659
Jack Steiner14258642008-07-29 22:33:57 -0700660 ret = -EAGAIN;
661 cbrnum = thread_cbr_number(gts, ucbnum);
Jack Steiner99f7c222009-12-15 16:48:06 -0800662 if (gts->ts_gru) {
Jack Steiner14258642008-07-29 22:33:57 -0700663 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
Jack Steinerb61fc692009-12-15 16:48:03 -0800664 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
665 gts->ts_ctxnum, ucbnum);
666 ret = gru_user_dropin(gts, tfh, cbk);
Jack Steiner14258642008-07-29 22:33:57 -0700667 }
668exit:
669 gru_unlock_gts(gts);
670 return ret;
671}
672
673/*
674 * Fetch the exception detail information for a CB that terminated with
675 * an exception.
676 */
677int gru_get_exception_detail(unsigned long arg)
678{
679 struct control_block_extended_exc_detail excdet;
680 struct gru_control_block_extended *cbe;
681 struct gru_thread_state *gts;
682 int ucbnum, cbrnum, ret;
683
684 STAT(user_exception);
685 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
686 return -EFAULT;
687
Jack Steiner14258642008-07-29 22:33:57 -0700688 gts = gru_find_lock_gts(excdet.cb);
689 if (!gts)
690 return -EINVAL;
691
Jack Steiner563447d2009-12-15 16:48:12 -0800692 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700693 ucbnum = get_cb_number((void *)excdet.cb);
694 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
695 ret = -EINVAL;
696 } else if (gts->ts_gru) {
Jack Steiner14258642008-07-29 22:33:57 -0700697 cbrnum = thread_cbr_number(gts, ucbnum);
698 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700699 gru_flush_cache(cbe); /* CBE not coherent */
Jack Steiner67bf04a2009-12-15 16:48:11 -0800700 sync_core(); /* make sure we are have current data */
Jack Steiner14258642008-07-29 22:33:57 -0700701 excdet.opc = cbe->opccpy;
702 excdet.exopc = cbe->exopccpy;
703 excdet.ecause = cbe->ecause;
704 excdet.exceptdet0 = cbe->idef1upd;
705 excdet.exceptdet1 = cbe->idef3upd;
Jack Steinercd1334f2009-06-17 16:28:19 -0700706 excdet.cbrstate = cbe->cbrstate;
707 excdet.cbrexecstatus = cbe->cbrexecstatus;
Jack Steinerc5502222009-12-15 16:48:13 -0800708 gru_flush_cache_cbe(cbe);
Jack Steiner14258642008-07-29 22:33:57 -0700709 ret = 0;
710 } else {
711 ret = -EAGAIN;
712 }
713 gru_unlock_gts(gts);
714
Jack Steinercd1334f2009-06-17 16:28:19 -0700715 gru_dbg(grudev,
716 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
717 "exdet0 0x%lx, exdet1 0x%x\n",
718 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
719 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
Jack Steiner14258642008-07-29 22:33:57 -0700720 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
721 ret = -EFAULT;
722 return ret;
723}
724
725/*
726 * User request to unload a context. Content is saved for possible reload.
727 */
Jack Steinerbb04aa72009-04-02 16:59:07 -0700728static int gru_unload_all_contexts(void)
729{
730 struct gru_thread_state *gts;
731 struct gru_state *gru;
Jack Steinere1c32192009-04-02 16:59:10 -0700732 int gid, ctxnum;
Jack Steinerbb04aa72009-04-02 16:59:07 -0700733
734 if (!capable(CAP_SYS_ADMIN))
735 return -EPERM;
Jack Steinere1c32192009-04-02 16:59:10 -0700736 foreach_gid(gid) {
Jack Steinerbb04aa72009-04-02 16:59:07 -0700737 gru = GID_TO_GRU(gid);
738 spin_lock(&gru->gs_lock);
739 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
740 gts = gru->gs_gts[ctxnum];
741 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
742 spin_unlock(&gru->gs_lock);
743 gru_unload_context(gts, 1);
Jack Steinerd57c82b2009-06-17 16:28:20 -0700744 mutex_unlock(&gts->ts_ctxlock);
Jack Steinerbb04aa72009-04-02 16:59:07 -0700745 spin_lock(&gru->gs_lock);
746 }
747 }
748 spin_unlock(&gru->gs_lock);
749 }
750 return 0;
751}
752
Jack Steiner14258642008-07-29 22:33:57 -0700753int gru_user_unload_context(unsigned long arg)
754{
755 struct gru_thread_state *gts;
756 struct gru_unload_context_req req;
757
758 STAT(user_unload_context);
759 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
760 return -EFAULT;
761
762 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
763
Jack Steinerbb04aa72009-04-02 16:59:07 -0700764 if (!req.gseg)
765 return gru_unload_all_contexts();
766
Jack Steiner14258642008-07-29 22:33:57 -0700767 gts = gru_find_lock_gts(req.gseg);
768 if (!gts)
769 return -EINVAL;
770
771 if (gts->ts_gru)
772 gru_unload_context(gts, 1);
773 gru_unlock_gts(gts);
774
775 return 0;
776}
777
778/*
779 * User request to flush a range of virtual addresses from the GRU TLB
780 * (Mainly for testing).
781 */
782int gru_user_flush_tlb(unsigned long arg)
783{
784 struct gru_thread_state *gts;
785 struct gru_flush_tlb_req req;
Jack Steiner1926ee82009-06-17 16:28:33 -0700786 struct gru_mm_struct *gms;
Jack Steiner14258642008-07-29 22:33:57 -0700787
788 STAT(user_flush_tlb);
789 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
790 return -EFAULT;
791
792 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
793 req.vaddr, req.len);
794
795 gts = gru_find_lock_gts(req.gseg);
796 if (!gts)
797 return -EINVAL;
798
Jack Steiner1926ee82009-06-17 16:28:33 -0700799 gms = gts->ts_gms;
Jack Steiner14258642008-07-29 22:33:57 -0700800 gru_unlock_gts(gts);
Jack Steiner1926ee82009-06-17 16:28:33 -0700801 gru_flush_tlb_range(gms, req.vaddr, req.len);
Jack Steiner14258642008-07-29 22:33:57 -0700802
803 return 0;
804}
805
806/*
Jack Steiner7e796a72009-06-17 16:28:30 -0700807 * Fetch GSEG statisticss
808 */
809long gru_get_gseg_statistics(unsigned long arg)
810{
811 struct gru_thread_state *gts;
812 struct gru_get_gseg_statistics_req req;
813
814 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
815 return -EFAULT;
816
Jack Steiner091f1a12009-12-15 16:48:02 -0800817 /*
818 * The library creates arrays of contexts for threaded programs.
819 * If no gts exists in the array, the context has never been used & all
820 * statistics are implicitly 0.
821 */
Jack Steiner7e796a72009-06-17 16:28:30 -0700822 gts = gru_find_lock_gts(req.gseg);
823 if (gts) {
824 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
825 gru_unlock_gts(gts);
826 } else {
827 memset(&req.stats, 0, sizeof(gts->ustats));
828 }
829
830 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
831 return -EFAULT;
832
833 return 0;
834}
835
836/*
Jack Steiner14258642008-07-29 22:33:57 -0700837 * Register the current task as the user of the GSEG slice.
838 * Needed for TLB fault interrupt targeting.
839 */
Jack Steiner92b39382009-06-17 16:28:32 -0700840int gru_set_context_option(unsigned long arg)
Jack Steiner14258642008-07-29 22:33:57 -0700841{
842 struct gru_thread_state *gts;
Jack Steiner92b39382009-06-17 16:28:32 -0700843 struct gru_set_context_option_req req;
844 int ret = 0;
Jack Steiner14258642008-07-29 22:33:57 -0700845
Jack Steiner92b39382009-06-17 16:28:32 -0700846 STAT(set_context_option);
847 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
848 return -EFAULT;
849 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
850
Jack Steinerc5502222009-12-15 16:48:13 -0800851 gts = gru_find_lock_gts(req.gseg);
852 if (!gts) {
853 gts = gru_alloc_locked_gts(req.gseg);
854 if (IS_ERR(gts))
855 return PTR_ERR(gts);
856 }
Jack Steiner14258642008-07-29 22:33:57 -0700857
Jack Steiner92b39382009-06-17 16:28:32 -0700858 switch (req.op) {
Jack Steiner518e5cd2009-12-15 16:48:04 -0800859 case sco_blade_chiplet:
860 /* Select blade/chiplet for GRU context */
861 if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
862 req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
863 ret = -EINVAL;
864 } else {
865 gts->ts_user_blade_id = req.val1;
866 gts->ts_user_chiplet_id = req.val0;
Jack Steiner55484c42009-12-15 16:48:05 -0800867 gru_check_context_placement(gts);
Jack Steiner518e5cd2009-12-15 16:48:04 -0800868 }
869 break;
Jack Steiner92b39382009-06-17 16:28:32 -0700870 case sco_gseg_owner:
871 /* Register the current task as the GSEG owner */
872 gts->ts_tgid_owner = current->tgid;
873 break;
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700874 case sco_cch_req_slice:
875 /* Set the CCH slice option */
876 gts->ts_cch_req_slice = req.val1 & 3;
877 break;
Jack Steiner92b39382009-06-17 16:28:32 -0700878 default:
879 ret = -EINVAL;
880 }
Jack Steiner14258642008-07-29 22:33:57 -0700881 gru_unlock_gts(gts);
882
Jack Steiner92b39382009-06-17 16:28:32 -0700883 return ret;
Jack Steiner14258642008-07-29 22:33:57 -0700884}