blob: 0c20be007b209dc3703763c0b5b8dbcf11910057 [file] [log] [blame]
Jack Steiner9a0deec2008-07-29 22:33:58 -07001/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/spinlock.h>
17#include <linux/sched.h>
18#include <linux/device.h>
19#include <linux/list.h>
20#include <asm/uv/uv_hub.h>
21#include "gru.h"
22#include "grutables.h"
23#include "gruhandles.h"
24
Jack Steiner9ca8e40c12008-07-29 22:34:02 -070025unsigned long gru_options __read_mostly;
Jack Steiner9a0deec2008-07-29 22:33:58 -070026
27static struct device_driver gru_driver = {
28 .name = "gru"
29};
30
31static struct device gru_device = {
Kay Sieversbb0dc432009-01-06 10:44:37 -080032 .init_name = "",
Jack Steiner9a0deec2008-07-29 22:33:58 -070033 .driver = &gru_driver,
34};
35
36struct device *grudev = &gru_device;
37
38/*
39 * Select a gru fault map to be used by the current cpu. Note that
40 * multiple cpus may be using the same map.
41 * ZZZ should "shift" be used?? Depends on HT cpu numbering
42 * ZZZ should be inline but did not work on emulator
43 */
44int gru_cpu_fault_map_id(void)
45{
46 return uv_blade_processor_id() % GRU_NUM_TFM;
47}
48
49/*--------- ASID Management -------------------------------------------
50 *
51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
52 * Once MAX is reached, flush the TLB & start over. However,
53 * some asids may still be in use. There won't be many (percentage wise) still
54 * in use. Search active contexts & determine the value of the first
55 * asid in use ("x"s below). Set "limit" to this value.
56 * This defines a block of assignable asids.
57 *
58 * When "limit" is reached, search forward from limit+1 and determine the
59 * next block of assignable asids.
60 *
61 * Repeat until MAX_ASID is reached, then start over again.
62 *
63 * Each time MAX_ASID is reached, increment the asid generation. Since
64 * the search for in-use asids only checks contexts with GRUs currently
65 * assigned, asids in some contexts will be missed. Prior to loading
66 * a context, the asid generation of the GTS asid is rechecked. If it
67 * doesn't match the current generation, a new asid will be assigned.
68 *
69 * 0---------------x------------x---------------------x----|
70 * ^-next ^-limit ^-MAX_ASID
71 *
72 * All asid manipulation & context loading/unloading is protected by the
73 * gs_lock.
74 */
75
76/* Hit the asid limit. Start over */
77static int gru_wrap_asid(struct gru_state *gru)
78{
Jack Steiner43884602009-04-02 16:59:05 -070079 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -070080 STAT(asid_wrap);
81 gru->gs_asid_gen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -070082 return MIN_ASID;
83}
84
85/* Find the next chunk of unused asids */
86static int gru_reset_asid_limit(struct gru_state *gru, int asid)
87{
88 int i, gid, inuse_asid, limit;
89
Jack Steiner43884602009-04-02 16:59:05 -070090 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -070091 STAT(asid_next);
92 limit = MAX_ASID;
93 if (asid >= limit)
94 asid = gru_wrap_asid(gru);
Jack Steiner87419412009-04-02 16:59:08 -070095 gru_flush_all_tlb(gru);
Jack Steiner9a0deec2008-07-29 22:33:58 -070096 gid = gru->gs_gid;
97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) {
Jack Steiner836ce672009-06-17 16:28:22 -070099 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
Jack Steiner43884602009-04-02 16:59:05 -0700102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
103 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
104 inuse_asid, i);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700105 if (inuse_asid == asid) {
106 asid += ASID_INC;
107 if (asid >= limit) {
108 /*
109 * empty range: reset the range limit and
110 * start over
111 */
112 limit = MAX_ASID;
113 if (asid >= MAX_ASID)
114 asid = gru_wrap_asid(gru);
115 goto again;
116 }
117 }
118
119 if ((inuse_asid > asid) && (inuse_asid < limit))
120 limit = inuse_asid;
121 }
122 gru->gs_asid_limit = limit;
123 gru->gs_asid = asid;
Jack Steiner43884602009-04-02 16:59:05 -0700124 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
125 asid, limit);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700126 return asid;
127}
128
129/* Assign a new ASID to a thread context. */
130static int gru_assign_asid(struct gru_state *gru)
131{
132 int asid;
133
Jack Steiner9a0deec2008-07-29 22:33:58 -0700134 gru->gs_asid += ASID_INC;
135 asid = gru->gs_asid;
136 if (asid >= gru->gs_asid_limit)
137 asid = gru_reset_asid_limit(gru, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700138
Jack Steiner43884602009-04-02 16:59:05 -0700139 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700140 return asid;
141}
142
143/*
144 * Clear n bits in a word. Return a word indicating the bits that were cleared.
145 * Optionally, build an array of chars that contain the bit numbers allocated.
146 */
147static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
148 char *idx)
149{
150 unsigned long bits = 0;
151 int i;
152
Jack Steiner3eac2e92009-06-17 16:28:23 -0700153 while (n--) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700154 i = find_first_bit(p, mmax);
155 if (i == mmax)
156 BUG();
157 __clear_bit(i, p);
158 __set_bit(i, &bits);
159 if (idx)
160 *idx++ = i;
Jack Steiner3eac2e92009-06-17 16:28:23 -0700161 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700162 return bits;
163}
164
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700165unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700166 char *cbmap)
167{
168 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
169 cbmap);
170}
171
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700172unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700173 char *dsmap)
174{
175 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
176 dsmap);
177}
178
179static void reserve_gru_resources(struct gru_state *gru,
180 struct gru_thread_state *gts)
181{
182 gru->gs_active_contexts++;
183 gts->ts_cbr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700184 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700185 gts->ts_cbr_idx);
186 gts->ts_dsr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700187 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700188}
189
190static void free_gru_resources(struct gru_state *gru,
191 struct gru_thread_state *gts)
192{
193 gru->gs_active_contexts--;
194 gru->gs_cbr_map |= gts->ts_cbr_map;
195 gru->gs_dsr_map |= gts->ts_dsr_map;
196}
197
198/*
199 * Check if a GRU has sufficient free resources to satisfy an allocation
200 * request. Note: GRU locks may or may not be held when this is called. If
201 * not held, recheck after acquiring the appropriate locks.
202 *
203 * Returns 1 if sufficient resources, 0 if not
204 */
205static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
206 int dsr_au_count, int max_active_contexts)
207{
208 return hweight64(gru->gs_cbr_map) >= cbr_au_count
209 && hweight64(gru->gs_dsr_map) >= dsr_au_count
210 && gru->gs_active_contexts < max_active_contexts;
211}
212
213/*
214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
215 * context.
216 */
Jack Steiner43884602009-04-02 16:59:05 -0700217static int gru_load_mm_tracker(struct gru_state *gru,
218 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700219{
Jack Steiner43884602009-04-02 16:59:05 -0700220 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700222 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700223 int asid;
224
225 spin_lock(&gms->ms_asid_lock);
226 asid = asids->mt_asid;
227
Jack Steiner87419412009-04-02 16:59:08 -0700228 spin_lock(&gru->gs_asid_lock);
229 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
230 gru->gs_asid_gen)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700231 asid = gru_assign_asid(gru);
232 asids->mt_asid = asid;
233 asids->mt_asid_gen = gru->gs_asid_gen;
234 STAT(asid_new);
235 } else {
236 STAT(asid_reuse);
237 }
Jack Steiner87419412009-04-02 16:59:08 -0700238 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700239
240 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
241 asids->mt_ctxbitmap |= ctxbitmap;
242 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
243 __set_bit(gru->gs_gid, gms->ms_asidmap);
244 spin_unlock(&gms->ms_asid_lock);
245
246 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
248 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
249 gms->ms_asidmap[0]);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700250 return asid;
251}
252
253static void gru_unload_mm_tracker(struct gru_state *gru,
Jack Steiner43884602009-04-02 16:59:05 -0700254 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700255{
Jack Steiner43884602009-04-02 16:59:05 -0700256 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700257 struct gru_mm_tracker *asids;
258 unsigned short ctxbitmap;
259
260 asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700261 ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700262 spin_lock(&gms->ms_asid_lock);
Jack Steiner87419412009-04-02 16:59:08 -0700263 spin_lock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700264 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
265 asids->mt_ctxbitmap ^= ctxbitmap;
Jack Steiner43884602009-04-02 16:59:05 -0700266 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
267 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
Jack Steiner87419412009-04-02 16:59:08 -0700268 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700269 spin_unlock(&gms->ms_asid_lock);
270}
271
272/*
273 * Decrement the reference count on a GTS structure. Free the structure
274 * if the reference count goes to zero.
275 */
276void gts_drop(struct gru_thread_state *gts)
277{
278 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
279 gru_drop_mmu_notifier(gts->ts_gms);
280 kfree(gts);
281 STAT(gts_free);
282 }
283}
284
285/*
286 * Locate the GTS structure for the current thread.
287 */
288static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
289 *vdata, int tsid)
290{
291 struct gru_thread_state *gts;
292
293 list_for_each_entry(gts, &vdata->vd_head, ts_next)
294 if (gts->ts_tsid == tsid)
295 return gts;
296 return NULL;
297}
298
299/*
300 * Allocate a thread state structure.
301 */
Jack Steiner364b76d2009-06-17 16:28:20 -0700302struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
303 int cbr_au_count, int dsr_au_count, int options, int tsid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700304{
305 struct gru_thread_state *gts;
306 int bytes;
307
Jack Steiner364b76d2009-06-17 16:28:20 -0700308 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700309 bytes += sizeof(struct gru_thread_state);
Jack Steiner940229b2009-06-17 16:28:24 -0700310 gts = kmalloc(bytes, GFP_KERNEL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700311 if (!gts)
312 return NULL;
313
314 STAT(gts_alloc);
Jack Steiner940229b2009-06-17 16:28:24 -0700315 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700316 atomic_set(&gts->ts_refcnt, 1);
317 mutex_init(&gts->ts_ctxlock);
Jack Steiner364b76d2009-06-17 16:28:20 -0700318 gts->ts_cbr_au_count = cbr_au_count;
319 gts->ts_dsr_au_count = dsr_au_count;
320 gts->ts_user_options = options;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700321 gts->ts_tsid = tsid;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700322 gts->ts_ctxnum = NULLCTX;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700323 gts->ts_tlb_int_select = -1;
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700324 gts->ts_cch_req_slice = -1;
Jack Steiner7b8274e2009-04-02 16:59:12 -0700325 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
Jack Steiner364b76d2009-06-17 16:28:20 -0700326 if (vma) {
327 gts->ts_mm = current->mm;
328 gts->ts_vma = vma;
329 gts->ts_gms = gru_register_mmu_notifier();
330 if (!gts->ts_gms)
331 goto err;
332 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700333
Jack Steiner364b76d2009-06-17 16:28:20 -0700334 gru_dbg(grudev, "alloc gts %p\n", gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700335 return gts;
336
337err:
338 gts_drop(gts);
339 return NULL;
340}
341
342/*
343 * Allocate a vma private data structure.
344 */
345struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
346{
347 struct gru_vma_data *vdata = NULL;
348
349 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
350 if (!vdata)
351 return NULL;
352
353 INIT_LIST_HEAD(&vdata->vd_head);
354 spin_lock_init(&vdata->vd_lock);
355 gru_dbg(grudev, "alloc vdata %p\n", vdata);
356 return vdata;
357}
358
359/*
360 * Find the thread state structure for the current thread.
361 */
362struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
363 int tsid)
364{
365 struct gru_vma_data *vdata = vma->vm_private_data;
366 struct gru_thread_state *gts;
367
368 spin_lock(&vdata->vd_lock);
369 gts = gru_find_current_gts_nolock(vdata, tsid);
370 spin_unlock(&vdata->vd_lock);
371 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
372 return gts;
373}
374
375/*
376 * Allocate a new thread state for a GSEG. Note that races may allow
377 * another thread to race to create a gts.
378 */
379struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
380 int tsid)
381{
382 struct gru_vma_data *vdata = vma->vm_private_data;
383 struct gru_thread_state *gts, *ngts;
384
Jack Steiner364b76d2009-06-17 16:28:20 -0700385 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
386 vdata->vd_user_options, tsid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700387 if (!gts)
388 return NULL;
389
390 spin_lock(&vdata->vd_lock);
391 ngts = gru_find_current_gts_nolock(vdata, tsid);
392 if (ngts) {
393 gts_drop(gts);
394 gts = ngts;
395 STAT(gts_double_allocate);
396 } else {
397 list_add(&gts->ts_next, &vdata->vd_head);
398 }
399 spin_unlock(&vdata->vd_lock);
400 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
401 return gts;
402}
403
404/*
405 * Free the GRU context assigned to the thread state.
406 */
407static void gru_free_gru_context(struct gru_thread_state *gts)
408{
409 struct gru_state *gru;
410
411 gru = gts->ts_gru;
Jack Steiner43884602009-04-02 16:59:05 -0700412 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700413
414 spin_lock(&gru->gs_lock);
415 gru->gs_gts[gts->ts_ctxnum] = NULL;
416 free_gru_resources(gru, gts);
417 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
418 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
419 gts->ts_ctxnum = NULLCTX;
420 gts->ts_gru = NULL;
Jack Steiner87419412009-04-02 16:59:08 -0700421 gts->ts_blade = -1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700422 spin_unlock(&gru->gs_lock);
423
424 gts_drop(gts);
425 STAT(free_context);
426}
427
428/*
429 * Prefetching cachelines help hardware performance.
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700430 * (Strictly a performance enhancement. Not functionally required).
Jack Steiner9a0deec2008-07-29 22:33:58 -0700431 */
432static void prefetch_data(void *p, int num, int stride)
433{
434 while (num-- > 0) {
435 prefetchw(p);
436 p += stride;
437 }
438}
439
440static inline long gru_copy_handle(void *d, void *s)
441{
442 memcpy(d, s, GRU_HANDLE_BYTES);
443 return GRU_HANDLE_BYTES;
444}
445
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700446static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
447 unsigned long cbrmap, unsigned long length)
Jack Steiner923f7f62008-10-15 22:05:13 -0700448{
449 int i, scr;
450
451 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
452 GRU_CACHE_LINE_BYTES);
453
454 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
455 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
456 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
457 GRU_CACHE_LINE_BYTES);
458 cb += GRU_HANDLE_STRIDE;
459 }
460}
461
Jack Steiner9a0deec2008-07-29 22:33:58 -0700462static void gru_load_context_data(void *save, void *grubase, int ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700463 unsigned long cbrmap, unsigned long dsrmap,
464 int data_valid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700465{
466 void *gseg, *cb, *cbe;
467 unsigned long length;
468 int i, scr;
469
470 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700471 cb = gseg + GRU_CB_BASE;
472 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700473 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
474 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700475
Jack Steiner9a0deec2008-07-29 22:33:58 -0700476 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
Jack Steiner940229b2009-06-17 16:28:24 -0700477 if (data_valid) {
478 save += gru_copy_handle(cb, save);
479 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
480 save);
481 } else {
482 memset(cb, 0, GRU_CACHE_LINE_BYTES);
483 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
484 GRU_CACHE_LINE_BYTES);
485 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700486 cb += GRU_HANDLE_STRIDE;
487 }
488
Jack Steiner940229b2009-06-17 16:28:24 -0700489 if (data_valid)
490 memcpy(gseg + GRU_DS_BASE, save, length);
491 else
492 memset(gseg + GRU_DS_BASE, 0, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700493}
494
495static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
496 unsigned long cbrmap, unsigned long dsrmap)
497{
498 void *gseg, *cb, *cbe;
499 unsigned long length;
500 int i, scr;
501
502 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700503 cb = gseg + GRU_CB_BASE;
504 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700505 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
506 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
507
Jack Steiner9a0deec2008-07-29 22:33:58 -0700508 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
509 save += gru_copy_handle(save, cb);
510 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
511 cb += GRU_HANDLE_STRIDE;
512 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700513 memcpy(save, gseg + GRU_DS_BASE, length);
514}
515
516void gru_unload_context(struct gru_thread_state *gts, int savestate)
517{
518 struct gru_state *gru = gts->ts_gru;
519 struct gru_context_configuration_handle *cch;
520 int ctxnum = gts->ts_ctxnum;
521
Jack Steiner836ce672009-06-17 16:28:22 -0700522 if (!is_kernel_context(gts))
523 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700524 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
525
Jack Steiner43884602009-04-02 16:59:05 -0700526 gru_dbg(grudev, "gts %p\n", gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700527 lock_cch_handle(cch);
528 if (cch_interrupt_sync(cch))
529 BUG();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700530
Jack Steiner836ce672009-06-17 16:28:22 -0700531 if (!is_kernel_context(gts))
532 gru_unload_mm_tracker(gru, gts);
Jack Steiner940229b2009-06-17 16:28:24 -0700533 if (savestate) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700534 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
535 ctxnum, gts->ts_cbr_map,
536 gts->ts_dsr_map);
Jack Steiner940229b2009-06-17 16:28:24 -0700537 gts->ts_data_valid = 1;
538 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700539
540 if (cch_deallocate(cch))
541 BUG();
542 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
543 unlock_cch_handle(cch);
544
545 gru_free_gru_context(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700546}
547
548/*
549 * Load a GRU context by copying it from the thread data structure in memory
550 * to the GRU.
551 */
Jack Steinerd57c82b2009-06-17 16:28:20 -0700552void gru_load_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700553{
554 struct gru_state *gru = gts->ts_gru;
555 struct gru_context_configuration_handle *cch;
Jack Steiner6e910072009-06-17 16:28:21 -0700556 int i, err, asid, ctxnum = gts->ts_ctxnum;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700557
558 gru_dbg(grudev, "gts %p\n", gts);
559 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
560
561 lock_cch_handle(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700562 cch->tfm_fault_bit_enable =
563 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
564 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
565 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
566 if (cch->tlb_int_enable) {
567 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
568 cch->tlb_int_select = gts->ts_tlb_int_select;
569 }
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700570 if (gts->ts_cch_req_slice >= 0) {
571 cch->req_slice_set_enable = 1;
572 cch->req_slice = gts->ts_cch_req_slice;
573 } else {
574 cch->req_slice_set_enable =0;
575 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700576 cch->tfm_done_bit_enable = 0;
Jack Steiner6e910072009-06-17 16:28:21 -0700577 cch->dsr_allocation_map = gts->ts_dsr_map;
578 cch->cbr_allocation_map = gts->ts_cbr_map;
Jack Steiner836ce672009-06-17 16:28:22 -0700579
580 if (is_kernel_context(gts)) {
581 cch->unmap_enable = 1;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700582 cch->tfm_done_bit_enable = 1;
583 cch->cb_int_enable = 1;
Jack Steiner836ce672009-06-17 16:28:22 -0700584 } else {
585 cch->unmap_enable = 0;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700586 cch->tfm_done_bit_enable = 0;
587 cch->cb_int_enable = 0;
Jack Steiner836ce672009-06-17 16:28:22 -0700588 asid = gru_load_mm_tracker(gru, gts);
589 for (i = 0; i < 8; i++) {
590 cch->asid[i] = asid + i;
591 cch->sizeavail[i] = gts->ts_sizeavail;
592 }
Jack Steiner6e910072009-06-17 16:28:21 -0700593 }
594
595 err = cch_allocate(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700596 if (err) {
597 gru_dbg(grudev,
598 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
599 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
600 BUG();
601 }
602
603 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700604 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700605
606 if (cch_start(cch))
607 BUG();
608 unlock_cch_handle(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700609}
610
611/*
612 * Update fields in an active CCH:
613 * - retarget interrupts on local blade
Jack Steiner7b8274e2009-04-02 16:59:12 -0700614 * - update sizeavail mask
Jack Steiner9a0deec2008-07-29 22:33:58 -0700615 * - force a delayed context unload by clearing the CCH asids. This
616 * forces TLB misses for new GRU instructions. The context is unloaded
617 * when the next TLB miss occurs.
618 */
Jack Steiner7b8274e2009-04-02 16:59:12 -0700619int gru_update_cch(struct gru_thread_state *gts, int force_unload)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700620{
621 struct gru_context_configuration_handle *cch;
622 struct gru_state *gru = gts->ts_gru;
623 int i, ctxnum = gts->ts_ctxnum, ret = 0;
624
625 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
626
627 lock_cch_handle(cch);
628 if (cch->state == CCHSTATE_ACTIVE) {
629 if (gru->gs_gts[gts->ts_ctxnum] != gts)
630 goto exit;
631 if (cch_interrupt(cch))
632 BUG();
Jack Steiner7b8274e2009-04-02 16:59:12 -0700633 if (!force_unload) {
634 for (i = 0; i < 8; i++)
635 cch->sizeavail[i] = gts->ts_sizeavail;
636 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
637 cch->tlb_int_select = gru_cpu_fault_map_id();
Jack Steinercd1334f2009-06-17 16:28:19 -0700638 cch->tfm_fault_bit_enable =
Jack Steiner940229b2009-06-17 16:28:24 -0700639 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
640 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700641 } else {
642 for (i = 0; i < 8; i++)
643 cch->asid[i] = 0;
644 cch->tfm_fault_bit_enable = 0;
645 cch->tlb_int_enable = 0;
646 gts->ts_force_unload = 1;
647 }
648 if (cch_start(cch))
649 BUG();
650 ret = 1;
651 }
652exit:
653 unlock_cch_handle(cch);
654 return ret;
655}
656
657/*
658 * Update CCH tlb interrupt select. Required when all the following is true:
659 * - task's GRU context is loaded into a GRU
660 * - task is using interrupt notification for TLB faults
661 * - task has migrated to a different cpu on the same blade where
662 * it was previously running.
663 */
664static int gru_retarget_intr(struct gru_thread_state *gts)
665{
666 if (gts->ts_tlb_int_select < 0
667 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
668 return 0;
669
670 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
671 gru_cpu_fault_map_id());
Jack Steiner7b8274e2009-04-02 16:59:12 -0700672 return gru_update_cch(gts, 0);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700673}
674
675
676/*
677 * Insufficient GRU resources available on the local blade. Steal a context from
678 * a process. This is a hack until a _real_ resource scheduler is written....
679 */
680#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
681#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
682 ((g)+1) : &(b)->bs_grus[0])
683
Jack Steiner836ce672009-06-17 16:28:22 -0700684static int is_gts_stealable(struct gru_thread_state *gts,
685 struct gru_blade_state *bs)
686{
687 if (is_kernel_context(gts))
688 return down_write_trylock(&bs->bs_kgts_sema);
689 else
690 return mutex_trylock(&gts->ts_ctxlock);
691}
692
693static void gts_stolen(struct gru_thread_state *gts,
694 struct gru_blade_state *bs)
695{
696 if (is_kernel_context(gts)) {
697 up_write(&bs->bs_kgts_sema);
698 STAT(steal_kernel_context);
699 } else {
700 mutex_unlock(&gts->ts_ctxlock);
701 STAT(steal_user_context);
702 }
703}
704
Jack Steinerd57c82b2009-06-17 16:28:20 -0700705void gru_steal_context(struct gru_thread_state *gts, int blade_id)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700706{
707 struct gru_blade_state *blade;
708 struct gru_state *gru, *gru0;
709 struct gru_thread_state *ngts = NULL;
710 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
711
712 cbr = gts->ts_cbr_au_count;
713 dsr = gts->ts_dsr_au_count;
714
Jack Steiner364b76d2009-06-17 16:28:20 -0700715 blade = gru_base[blade_id];
Jack Steiner9a0deec2008-07-29 22:33:58 -0700716 spin_lock(&blade->bs_lock);
717
718 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
719 gru = blade->bs_lru_gru;
720 if (ctxnum == 0)
721 gru = next_gru(blade, gru);
722 ctxnum0 = ctxnum;
723 gru0 = gru;
724 while (1) {
725 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
726 break;
727 spin_lock(&gru->gs_lock);
728 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
729 if (flag && gru == gru0 && ctxnum == ctxnum0)
730 break;
731 ngts = gru->gs_gts[ctxnum];
732 /*
733 * We are grabbing locks out of order, so trylock is
734 * needed. GTSs are usually not locked, so the odds of
735 * success are high. If trylock fails, try to steal a
736 * different GSEG.
737 */
Jack Steiner836ce672009-06-17 16:28:22 -0700738 if (ngts && is_gts_stealable(ngts, blade))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700739 break;
740 ngts = NULL;
741 flag = 1;
742 }
743 spin_unlock(&gru->gs_lock);
744 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
745 break;
746 ctxnum = 0;
747 gru = next_gru(blade, gru);
748 }
749 blade->bs_lru_gru = gru;
750 blade->bs_lru_ctxnum = ctxnum;
751 spin_unlock(&blade->bs_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700752
753 if (ngts) {
Jack Steiner7e796a72009-06-17 16:28:30 -0700754 gts->ustats.context_stolen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700755 ngts->ts_steal_jiffies = jiffies;
Jack Steiner836ce672009-06-17 16:28:22 -0700756 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
757 gts_stolen(ngts, blade);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700758 } else {
759 STAT(steal_context_failed);
760 }
761 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700762 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
Jack Steiner9a0deec2008-07-29 22:33:58 -0700763 " avail cb %ld, ds %ld\n",
764 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
765 hweight64(gru->gs_dsr_map));
766}
767
768/*
769 * Scan the GRUs on the local blade & assign a GRU context.
770 */
Jack Steinerd57c82b2009-06-17 16:28:20 -0700771struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
Jack Steiner364b76d2009-06-17 16:28:20 -0700772 int blade)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700773{
774 struct gru_state *gru, *grux;
775 int i, max_active_contexts;
776
Jack Steiner9a0deec2008-07-29 22:33:58 -0700777
778again:
779 gru = NULL;
780 max_active_contexts = GRU_NUM_CCH;
Jack Steiner364b76d2009-06-17 16:28:20 -0700781 for_each_gru_on_blade(grux, blade, i) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700782 if (check_gru_resources(grux, gts->ts_cbr_au_count,
783 gts->ts_dsr_au_count,
784 max_active_contexts)) {
785 gru = grux;
786 max_active_contexts = grux->gs_active_contexts;
787 if (max_active_contexts == 0)
788 break;
789 }
790 }
791
792 if (gru) {
793 spin_lock(&gru->gs_lock);
794 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
795 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
796 spin_unlock(&gru->gs_lock);
797 goto again;
798 }
799 reserve_gru_resources(gru, gts);
800 gts->ts_gru = gru;
Jack Steiner87419412009-04-02 16:59:08 -0700801 gts->ts_blade = gru->gs_blade_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700802 gts->ts_ctxnum =
803 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
804 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
805 atomic_inc(&gts->ts_refcnt);
806 gru->gs_gts[gts->ts_ctxnum] = gts;
807 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
808 spin_unlock(&gru->gs_lock);
809
810 STAT(assign_context);
811 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700812 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
Jack Steiner9a0deec2008-07-29 22:33:58 -0700813 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
814 gts->ts_gru->gs_gid, gts->ts_ctxnum,
815 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
816 } else {
817 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
818 STAT(assign_context_failed);
819 }
820
Jack Steiner9a0deec2008-07-29 22:33:58 -0700821 return gru;
822}
823
824/*
825 * gru_nopage
826 *
827 * Map the user's GRU segment
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700828 *
829 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
Jack Steiner9a0deec2008-07-29 22:33:58 -0700830 */
831int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
832{
833 struct gru_thread_state *gts;
834 unsigned long paddr, vaddr;
Jack Steiner364b76d2009-06-17 16:28:20 -0700835 int blade_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700836
837 vaddr = (unsigned long)vmf->virtual_address;
838 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
839 vma, vaddr, GSEG_BASE(vaddr));
840 STAT(nopfn);
841
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700842 /* The following check ensures vaddr is a valid address in the VMA */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700843 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
844 if (!gts)
845 return VM_FAULT_SIGBUS;
846
847again:
Jack Steiner9a0deec2008-07-29 22:33:58 -0700848 mutex_lock(&gts->ts_ctxlock);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700849 preempt_disable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700850 blade_id = uv_numa_blade_id();
851
Jack Steiner9a0deec2008-07-29 22:33:58 -0700852 if (gts->ts_gru) {
Jack Steiner364b76d2009-06-17 16:28:20 -0700853 if (gts->ts_gru->gs_blade_id != blade_id) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700854 STAT(migrated_nopfn_unload);
855 gru_unload_context(gts, 1);
856 } else {
857 if (gru_retarget_intr(gts))
858 STAT(migrated_nopfn_retarget);
859 }
860 }
861
862 if (!gts->ts_gru) {
Jack Steiner836ce672009-06-17 16:28:22 -0700863 STAT(load_user_context);
Jack Steiner364b76d2009-06-17 16:28:20 -0700864 if (!gru_assign_gru_context(gts, blade_id)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700865 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700866 mutex_unlock(&gts->ts_ctxlock);
867 set_current_state(TASK_INTERRUPTIBLE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700868 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
Jack Steiner364b76d2009-06-17 16:28:20 -0700869 blade_id = uv_numa_blade_id();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700870 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
Jack Steiner364b76d2009-06-17 16:28:20 -0700871 gru_steal_context(gts, blade_id);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700872 goto again;
873 }
874 gru_load_context(gts);
875 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
876 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
877 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
878 vma->vm_page_prot);
879 }
880
Jack Steiner9a0deec2008-07-29 22:33:58 -0700881 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700882 mutex_unlock(&gts->ts_ctxlock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700883
884 return VM_FAULT_NOPAGE;
885}
886