blob: a383271d391240a4042dfee31869b6b113437a3e [file] [log] [blame]
Jack Steiner9a0deec2008-07-29 22:33:58 -07001/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
Jack Steiner8820f272009-06-17 16:28:36 -07006 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
Jack Steiner9a0deec2008-07-29 22:33:58 -07007 *
Jack Steiner8820f272009-06-17 16:28:36 -07008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Jack Steiner9a0deec2008-07-29 22:33:58 -070021 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/device.h>
29#include <linux/list.h>
Jack Steiner7f2251b2009-12-15 16:48:08 -080030#include <linux/err.h>
Jack Steiner9a0deec2008-07-29 22:33:58 -070031#include <asm/uv/uv_hub.h>
32#include "gru.h"
33#include "grutables.h"
34#include "gruhandles.h"
35
Jack Steiner9ca8e40c12008-07-29 22:34:02 -070036unsigned long gru_options __read_mostly;
Jack Steiner9a0deec2008-07-29 22:33:58 -070037
38static struct device_driver gru_driver = {
39 .name = "gru"
40};
41
42static struct device gru_device = {
Kay Sieversbb0dc432009-01-06 10:44:37 -080043 .init_name = "",
Jack Steiner9a0deec2008-07-29 22:33:58 -070044 .driver = &gru_driver,
45};
46
47struct device *grudev = &gru_device;
48
49/*
50 * Select a gru fault map to be used by the current cpu. Note that
51 * multiple cpus may be using the same map.
52 * ZZZ should "shift" be used?? Depends on HT cpu numbering
53 * ZZZ should be inline but did not work on emulator
54 */
55int gru_cpu_fault_map_id(void)
56{
57 return uv_blade_processor_id() % GRU_NUM_TFM;
58}
59
60/*--------- ASID Management -------------------------------------------
61 *
62 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
63 * Once MAX is reached, flush the TLB & start over. However,
64 * some asids may still be in use. There won't be many (percentage wise) still
65 * in use. Search active contexts & determine the value of the first
66 * asid in use ("x"s below). Set "limit" to this value.
67 * This defines a block of assignable asids.
68 *
69 * When "limit" is reached, search forward from limit+1 and determine the
70 * next block of assignable asids.
71 *
72 * Repeat until MAX_ASID is reached, then start over again.
73 *
74 * Each time MAX_ASID is reached, increment the asid generation. Since
75 * the search for in-use asids only checks contexts with GRUs currently
76 * assigned, asids in some contexts will be missed. Prior to loading
77 * a context, the asid generation of the GTS asid is rechecked. If it
78 * doesn't match the current generation, a new asid will be assigned.
79 *
80 * 0---------------x------------x---------------------x----|
81 * ^-next ^-limit ^-MAX_ASID
82 *
83 * All asid manipulation & context loading/unloading is protected by the
84 * gs_lock.
85 */
86
87/* Hit the asid limit. Start over */
88static int gru_wrap_asid(struct gru_state *gru)
89{
Jack Steiner43884602009-04-02 16:59:05 -070090 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -070091 STAT(asid_wrap);
92 gru->gs_asid_gen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -070093 return MIN_ASID;
94}
95
96/* Find the next chunk of unused asids */
97static int gru_reset_asid_limit(struct gru_state *gru, int asid)
98{
99 int i, gid, inuse_asid, limit;
100
Jack Steiner43884602009-04-02 16:59:05 -0700101 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700102 STAT(asid_next);
103 limit = MAX_ASID;
104 if (asid >= limit)
105 asid = gru_wrap_asid(gru);
Jack Steiner87419412009-04-02 16:59:08 -0700106 gru_flush_all_tlb(gru);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700107 gid = gru->gs_gid;
108again:
109 for (i = 0; i < GRU_NUM_CCH; i++) {
Jack Steiner836ce672009-06-17 16:28:22 -0700110 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700111 continue;
112 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
Jack Steiner43884602009-04-02 16:59:05 -0700113 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
114 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
115 inuse_asid, i);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700116 if (inuse_asid == asid) {
117 asid += ASID_INC;
118 if (asid >= limit) {
119 /*
120 * empty range: reset the range limit and
121 * start over
122 */
123 limit = MAX_ASID;
124 if (asid >= MAX_ASID)
125 asid = gru_wrap_asid(gru);
126 goto again;
127 }
128 }
129
130 if ((inuse_asid > asid) && (inuse_asid < limit))
131 limit = inuse_asid;
132 }
133 gru->gs_asid_limit = limit;
134 gru->gs_asid = asid;
Jack Steiner43884602009-04-02 16:59:05 -0700135 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
136 asid, limit);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700137 return asid;
138}
139
140/* Assign a new ASID to a thread context. */
141static int gru_assign_asid(struct gru_state *gru)
142{
143 int asid;
144
Jack Steiner9a0deec2008-07-29 22:33:58 -0700145 gru->gs_asid += ASID_INC;
146 asid = gru->gs_asid;
147 if (asid >= gru->gs_asid_limit)
148 asid = gru_reset_asid_limit(gru, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700149
Jack Steiner43884602009-04-02 16:59:05 -0700150 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700151 return asid;
152}
153
154/*
155 * Clear n bits in a word. Return a word indicating the bits that were cleared.
156 * Optionally, build an array of chars that contain the bit numbers allocated.
157 */
158static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
159 char *idx)
160{
161 unsigned long bits = 0;
162 int i;
163
Jack Steiner3eac2e92009-06-17 16:28:23 -0700164 while (n--) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700165 i = find_first_bit(p, mmax);
166 if (i == mmax)
167 BUG();
168 __clear_bit(i, p);
169 __set_bit(i, &bits);
170 if (idx)
171 *idx++ = i;
Jack Steiner3eac2e92009-06-17 16:28:23 -0700172 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700173 return bits;
174}
175
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700176unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700177 char *cbmap)
178{
179 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
180 cbmap);
181}
182
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700183unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700184 char *dsmap)
185{
186 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
187 dsmap);
188}
189
190static void reserve_gru_resources(struct gru_state *gru,
191 struct gru_thread_state *gts)
192{
193 gru->gs_active_contexts++;
194 gts->ts_cbr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700195 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700196 gts->ts_cbr_idx);
197 gts->ts_dsr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700198 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700199}
200
201static void free_gru_resources(struct gru_state *gru,
202 struct gru_thread_state *gts)
203{
204 gru->gs_active_contexts--;
205 gru->gs_cbr_map |= gts->ts_cbr_map;
206 gru->gs_dsr_map |= gts->ts_dsr_map;
207}
208
209/*
210 * Check if a GRU has sufficient free resources to satisfy an allocation
211 * request. Note: GRU locks may or may not be held when this is called. If
212 * not held, recheck after acquiring the appropriate locks.
213 *
214 * Returns 1 if sufficient resources, 0 if not
215 */
216static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
217 int dsr_au_count, int max_active_contexts)
218{
219 return hweight64(gru->gs_cbr_map) >= cbr_au_count
220 && hweight64(gru->gs_dsr_map) >= dsr_au_count
221 && gru->gs_active_contexts < max_active_contexts;
222}
223
224/*
225 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
226 * context.
227 */
Jack Steiner43884602009-04-02 16:59:05 -0700228static int gru_load_mm_tracker(struct gru_state *gru,
229 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700230{
Jack Steiner43884602009-04-02 16:59:05 -0700231 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700232 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700233 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700234 int asid;
235
236 spin_lock(&gms->ms_asid_lock);
237 asid = asids->mt_asid;
238
Jack Steiner87419412009-04-02 16:59:08 -0700239 spin_lock(&gru->gs_asid_lock);
240 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
241 gru->gs_asid_gen)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700242 asid = gru_assign_asid(gru);
243 asids->mt_asid = asid;
244 asids->mt_asid_gen = gru->gs_asid_gen;
245 STAT(asid_new);
246 } else {
247 STAT(asid_reuse);
248 }
Jack Steiner87419412009-04-02 16:59:08 -0700249 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700250
251 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
252 asids->mt_ctxbitmap |= ctxbitmap;
253 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
254 __set_bit(gru->gs_gid, gms->ms_asidmap);
255 spin_unlock(&gms->ms_asid_lock);
256
257 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700258 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
259 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
260 gms->ms_asidmap[0]);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700261 return asid;
262}
263
264static void gru_unload_mm_tracker(struct gru_state *gru,
Jack Steiner43884602009-04-02 16:59:05 -0700265 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700266{
Jack Steiner43884602009-04-02 16:59:05 -0700267 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700268 struct gru_mm_tracker *asids;
269 unsigned short ctxbitmap;
270
271 asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700272 ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700273 spin_lock(&gms->ms_asid_lock);
Jack Steiner87419412009-04-02 16:59:08 -0700274 spin_lock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700275 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
276 asids->mt_ctxbitmap ^= ctxbitmap;
Jack Steiner43884602009-04-02 16:59:05 -0700277 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
278 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
Jack Steiner87419412009-04-02 16:59:08 -0700279 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700280 spin_unlock(&gms->ms_asid_lock);
281}
282
283/*
284 * Decrement the reference count on a GTS structure. Free the structure
285 * if the reference count goes to zero.
286 */
287void gts_drop(struct gru_thread_state *gts)
288{
289 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
Jack Steiner7f2251b2009-12-15 16:48:08 -0800290 if (gts->ts_gms)
291 gru_drop_mmu_notifier(gts->ts_gms);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700292 kfree(gts);
293 STAT(gts_free);
294 }
295}
296
297/*
298 * Locate the GTS structure for the current thread.
299 */
300static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
301 *vdata, int tsid)
302{
303 struct gru_thread_state *gts;
304
305 list_for_each_entry(gts, &vdata->vd_head, ts_next)
306 if (gts->ts_tsid == tsid)
307 return gts;
308 return NULL;
309}
310
311/*
312 * Allocate a thread state structure.
313 */
Jack Steiner364b76d2009-06-17 16:28:20 -0700314struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
315 int cbr_au_count, int dsr_au_count, int options, int tsid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700316{
317 struct gru_thread_state *gts;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800318 struct gru_mm_struct *gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700319 int bytes;
320
Jack Steiner364b76d2009-06-17 16:28:20 -0700321 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700322 bytes += sizeof(struct gru_thread_state);
Jack Steiner940229b2009-06-17 16:28:24 -0700323 gts = kmalloc(bytes, GFP_KERNEL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700324 if (!gts)
Jack Steiner7f2251b2009-12-15 16:48:08 -0800325 return ERR_PTR(-ENOMEM);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700326
327 STAT(gts_alloc);
Jack Steiner940229b2009-06-17 16:28:24 -0700328 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700329 atomic_set(&gts->ts_refcnt, 1);
330 mutex_init(&gts->ts_ctxlock);
Jack Steiner364b76d2009-06-17 16:28:20 -0700331 gts->ts_cbr_au_count = cbr_au_count;
332 gts->ts_dsr_au_count = dsr_au_count;
333 gts->ts_user_options = options;
Jack Steiner518e5cd2009-12-15 16:48:04 -0800334 gts->ts_user_blade_id = -1;
335 gts->ts_user_chiplet_id = -1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700336 gts->ts_tsid = tsid;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700337 gts->ts_ctxnum = NULLCTX;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700338 gts->ts_tlb_int_select = -1;
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700339 gts->ts_cch_req_slice = -1;
Jack Steiner7b8274e2009-04-02 16:59:12 -0700340 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
Jack Steiner364b76d2009-06-17 16:28:20 -0700341 if (vma) {
342 gts->ts_mm = current->mm;
343 gts->ts_vma = vma;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800344 gms = gru_register_mmu_notifier();
345 if (IS_ERR(gms))
Jack Steiner364b76d2009-06-17 16:28:20 -0700346 goto err;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800347 gts->ts_gms = gms;
Jack Steiner364b76d2009-06-17 16:28:20 -0700348 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700349
Jack Steiner364b76d2009-06-17 16:28:20 -0700350 gru_dbg(grudev, "alloc gts %p\n", gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700351 return gts;
352
353err:
354 gts_drop(gts);
Jack Steiner7f2251b2009-12-15 16:48:08 -0800355 return ERR_CAST(gms);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700356}
357
358/*
359 * Allocate a vma private data structure.
360 */
361struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
362{
363 struct gru_vma_data *vdata = NULL;
364
365 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
366 if (!vdata)
367 return NULL;
368
369 INIT_LIST_HEAD(&vdata->vd_head);
370 spin_lock_init(&vdata->vd_lock);
371 gru_dbg(grudev, "alloc vdata %p\n", vdata);
372 return vdata;
373}
374
375/*
376 * Find the thread state structure for the current thread.
377 */
378struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
379 int tsid)
380{
381 struct gru_vma_data *vdata = vma->vm_private_data;
382 struct gru_thread_state *gts;
383
384 spin_lock(&vdata->vd_lock);
385 gts = gru_find_current_gts_nolock(vdata, tsid);
386 spin_unlock(&vdata->vd_lock);
387 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
388 return gts;
389}
390
391/*
392 * Allocate a new thread state for a GSEG. Note that races may allow
393 * another thread to race to create a gts.
394 */
395struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
396 int tsid)
397{
398 struct gru_vma_data *vdata = vma->vm_private_data;
399 struct gru_thread_state *gts, *ngts;
400
Jack Steiner364b76d2009-06-17 16:28:20 -0700401 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
402 vdata->vd_user_options, tsid);
Jack Steiner7f2251b2009-12-15 16:48:08 -0800403 if (IS_ERR(gts))
404 return gts;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700405
406 spin_lock(&vdata->vd_lock);
407 ngts = gru_find_current_gts_nolock(vdata, tsid);
408 if (ngts) {
409 gts_drop(gts);
410 gts = ngts;
411 STAT(gts_double_allocate);
412 } else {
413 list_add(&gts->ts_next, &vdata->vd_head);
414 }
415 spin_unlock(&vdata->vd_lock);
416 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
417 return gts;
418}
419
420/*
421 * Free the GRU context assigned to the thread state.
422 */
423static void gru_free_gru_context(struct gru_thread_state *gts)
424{
425 struct gru_state *gru;
426
427 gru = gts->ts_gru;
Jack Steiner43884602009-04-02 16:59:05 -0700428 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700429
430 spin_lock(&gru->gs_lock);
431 gru->gs_gts[gts->ts_ctxnum] = NULL;
432 free_gru_resources(gru, gts);
433 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
434 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
435 gts->ts_ctxnum = NULLCTX;
436 gts->ts_gru = NULL;
Jack Steiner87419412009-04-02 16:59:08 -0700437 gts->ts_blade = -1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700438 spin_unlock(&gru->gs_lock);
439
440 gts_drop(gts);
441 STAT(free_context);
442}
443
444/*
445 * Prefetching cachelines help hardware performance.
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700446 * (Strictly a performance enhancement. Not functionally required).
Jack Steiner9a0deec2008-07-29 22:33:58 -0700447 */
448static void prefetch_data(void *p, int num, int stride)
449{
450 while (num-- > 0) {
451 prefetchw(p);
452 p += stride;
453 }
454}
455
456static inline long gru_copy_handle(void *d, void *s)
457{
458 memcpy(d, s, GRU_HANDLE_BYTES);
459 return GRU_HANDLE_BYTES;
460}
461
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700462static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
463 unsigned long cbrmap, unsigned long length)
Jack Steiner923f7f62008-10-15 22:05:13 -0700464{
465 int i, scr;
466
467 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
468 GRU_CACHE_LINE_BYTES);
469
470 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
471 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
472 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
473 GRU_CACHE_LINE_BYTES);
474 cb += GRU_HANDLE_STRIDE;
475 }
476}
477
Jack Steiner9a0deec2008-07-29 22:33:58 -0700478static void gru_load_context_data(void *save, void *grubase, int ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700479 unsigned long cbrmap, unsigned long dsrmap,
480 int data_valid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700481{
482 void *gseg, *cb, *cbe;
483 unsigned long length;
484 int i, scr;
485
486 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700487 cb = gseg + GRU_CB_BASE;
488 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700489 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
490 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700491
Jack Steiner9a0deec2008-07-29 22:33:58 -0700492 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
Jack Steiner940229b2009-06-17 16:28:24 -0700493 if (data_valid) {
494 save += gru_copy_handle(cb, save);
495 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
496 save);
497 } else {
498 memset(cb, 0, GRU_CACHE_LINE_BYTES);
499 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
500 GRU_CACHE_LINE_BYTES);
501 }
Jack Steiner67bf04a2009-12-15 16:48:11 -0800502 /* Flush CBE to hide race in context restart */
503 mb();
504 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700505 cb += GRU_HANDLE_STRIDE;
506 }
507
Jack Steiner940229b2009-06-17 16:28:24 -0700508 if (data_valid)
509 memcpy(gseg + GRU_DS_BASE, save, length);
510 else
511 memset(gseg + GRU_DS_BASE, 0, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700512}
513
514static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
515 unsigned long cbrmap, unsigned long dsrmap)
516{
517 void *gseg, *cb, *cbe;
518 unsigned long length;
519 int i, scr;
520
521 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700522 cb = gseg + GRU_CB_BASE;
523 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700524 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
Jack Steiner67bf04a2009-12-15 16:48:11 -0800525
526 /* CBEs may not be coherent. Flush them from cache */
527 for_each_cbr_in_allocation_map(i, &cbrmap, scr)
528 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
529 mb(); /* Let the CL flush complete */
530
Jack Steiner923f7f62008-10-15 22:05:13 -0700531 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
532
Jack Steiner9a0deec2008-07-29 22:33:58 -0700533 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
534 save += gru_copy_handle(save, cb);
535 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
536 cb += GRU_HANDLE_STRIDE;
537 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700538 memcpy(save, gseg + GRU_DS_BASE, length);
539}
540
541void gru_unload_context(struct gru_thread_state *gts, int savestate)
542{
543 struct gru_state *gru = gts->ts_gru;
544 struct gru_context_configuration_handle *cch;
545 int ctxnum = gts->ts_ctxnum;
546
Jack Steiner836ce672009-06-17 16:28:22 -0700547 if (!is_kernel_context(gts))
548 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700549 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
550
Jack Steiner43884602009-04-02 16:59:05 -0700551 gru_dbg(grudev, "gts %p\n", gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700552 lock_cch_handle(cch);
553 if (cch_interrupt_sync(cch))
554 BUG();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700555
Jack Steiner836ce672009-06-17 16:28:22 -0700556 if (!is_kernel_context(gts))
557 gru_unload_mm_tracker(gru, gts);
Jack Steiner940229b2009-06-17 16:28:24 -0700558 if (savestate) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700559 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
560 ctxnum, gts->ts_cbr_map,
561 gts->ts_dsr_map);
Jack Steiner940229b2009-06-17 16:28:24 -0700562 gts->ts_data_valid = 1;
563 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700564
565 if (cch_deallocate(cch))
566 BUG();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700567 unlock_cch_handle(cch);
568
569 gru_free_gru_context(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700570}
571
572/*
573 * Load a GRU context by copying it from the thread data structure in memory
574 * to the GRU.
575 */
Jack Steinerd57c82b2009-06-17 16:28:20 -0700576void gru_load_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700577{
578 struct gru_state *gru = gts->ts_gru;
579 struct gru_context_configuration_handle *cch;
Jack Steiner6e910072009-06-17 16:28:21 -0700580 int i, err, asid, ctxnum = gts->ts_ctxnum;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700581
582 gru_dbg(grudev, "gts %p\n", gts);
583 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
584
585 lock_cch_handle(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700586 cch->tfm_fault_bit_enable =
587 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
588 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
589 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
590 if (cch->tlb_int_enable) {
591 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
592 cch->tlb_int_select = gts->ts_tlb_int_select;
593 }
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700594 if (gts->ts_cch_req_slice >= 0) {
595 cch->req_slice_set_enable = 1;
596 cch->req_slice = gts->ts_cch_req_slice;
597 } else {
598 cch->req_slice_set_enable =0;
599 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700600 cch->tfm_done_bit_enable = 0;
Jack Steiner6e910072009-06-17 16:28:21 -0700601 cch->dsr_allocation_map = gts->ts_dsr_map;
602 cch->cbr_allocation_map = gts->ts_cbr_map;
Jack Steiner836ce672009-06-17 16:28:22 -0700603
604 if (is_kernel_context(gts)) {
605 cch->unmap_enable = 1;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700606 cch->tfm_done_bit_enable = 1;
607 cch->cb_int_enable = 1;
Jack Steiner836ce672009-06-17 16:28:22 -0700608 } else {
609 cch->unmap_enable = 0;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700610 cch->tfm_done_bit_enable = 0;
611 cch->cb_int_enable = 0;
Jack Steiner836ce672009-06-17 16:28:22 -0700612 asid = gru_load_mm_tracker(gru, gts);
613 for (i = 0; i < 8; i++) {
614 cch->asid[i] = asid + i;
615 cch->sizeavail[i] = gts->ts_sizeavail;
616 }
Jack Steiner6e910072009-06-17 16:28:21 -0700617 }
618
619 err = cch_allocate(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700620 if (err) {
621 gru_dbg(grudev,
622 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
623 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
624 BUG();
625 }
626
627 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700628 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700629
630 if (cch_start(cch))
631 BUG();
632 unlock_cch_handle(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700633}
634
635/*
636 * Update fields in an active CCH:
637 * - retarget interrupts on local blade
Jack Steiner7b8274e2009-04-02 16:59:12 -0700638 * - update sizeavail mask
Jack Steiner9a0deec2008-07-29 22:33:58 -0700639 */
Jack Steiner99f7c222009-12-15 16:48:06 -0800640int gru_update_cch(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700641{
642 struct gru_context_configuration_handle *cch;
643 struct gru_state *gru = gts->ts_gru;
644 int i, ctxnum = gts->ts_ctxnum, ret = 0;
645
646 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
647
648 lock_cch_handle(cch);
649 if (cch->state == CCHSTATE_ACTIVE) {
650 if (gru->gs_gts[gts->ts_ctxnum] != gts)
651 goto exit;
652 if (cch_interrupt(cch))
653 BUG();
Jack Steiner99f7c222009-12-15 16:48:06 -0800654 for (i = 0; i < 8; i++)
655 cch->sizeavail[i] = gts->ts_sizeavail;
656 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
657 cch->tlb_int_select = gru_cpu_fault_map_id();
658 cch->tfm_fault_bit_enable =
659 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
660 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700661 if (cch_start(cch))
662 BUG();
663 ret = 1;
664 }
665exit:
666 unlock_cch_handle(cch);
667 return ret;
668}
669
670/*
671 * Update CCH tlb interrupt select. Required when all the following is true:
672 * - task's GRU context is loaded into a GRU
673 * - task is using interrupt notification for TLB faults
674 * - task has migrated to a different cpu on the same blade where
675 * it was previously running.
676 */
677static int gru_retarget_intr(struct gru_thread_state *gts)
678{
679 if (gts->ts_tlb_int_select < 0
680 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
681 return 0;
682
683 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
684 gru_cpu_fault_map_id());
Jack Steiner99f7c222009-12-15 16:48:06 -0800685 return gru_update_cch(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700686}
687
Jack Steiner55484c42009-12-15 16:48:05 -0800688/*
689 * Unload the gru context if it is not assigned to the correct blade or
690 * chiplet. Misassignment can occur if the process migrates to a different
691 * blade or if the user changes the selected blade/chiplet.
692 * Return 0 if context correct placed, otherwise 1
693 */
694void gru_check_context_placement(struct gru_thread_state *gts)
695{
696 struct gru_state *gru;
697 int blade_id, chiplet_id;
698
699 /*
700 * If the current task is the context owner, verify that the
701 * context is correctly placed. This test is skipped for non-owner
702 * references. Pthread apps use non-owner references to the CBRs.
703 */
704 gru = gts->ts_gru;
705 if (!gru || gts->ts_tgid_owner != current->tgid)
706 return;
707
708 blade_id = gts->ts_user_blade_id;
709 if (blade_id < 0)
710 blade_id = uv_numa_blade_id();
711
712 chiplet_id = gts->ts_user_chiplet_id;
713 if (gru->gs_blade_id != blade_id ||
714 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) {
715 STAT(check_context_unload);
716 gru_unload_context(gts, 1);
717 } else if (gru_retarget_intr(gts)) {
718 STAT(check_context_retarget_intr);
719 }
720}
721
Jack Steiner9a0deec2008-07-29 22:33:58 -0700722
723/*
724 * Insufficient GRU resources available on the local blade. Steal a context from
725 * a process. This is a hack until a _real_ resource scheduler is written....
726 */
727#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
728#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
729 ((g)+1) : &(b)->bs_grus[0])
730
Jack Steiner836ce672009-06-17 16:28:22 -0700731static int is_gts_stealable(struct gru_thread_state *gts,
732 struct gru_blade_state *bs)
733{
734 if (is_kernel_context(gts))
735 return down_write_trylock(&bs->bs_kgts_sema);
736 else
737 return mutex_trylock(&gts->ts_ctxlock);
738}
739
740static void gts_stolen(struct gru_thread_state *gts,
741 struct gru_blade_state *bs)
742{
743 if (is_kernel_context(gts)) {
744 up_write(&bs->bs_kgts_sema);
745 STAT(steal_kernel_context);
746 } else {
747 mutex_unlock(&gts->ts_ctxlock);
748 STAT(steal_user_context);
749 }
750}
751
Jack Steiner55484c42009-12-15 16:48:05 -0800752void gru_steal_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700753{
754 struct gru_blade_state *blade;
755 struct gru_state *gru, *gru0;
756 struct gru_thread_state *ngts = NULL;
757 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
Jack Steiner55484c42009-12-15 16:48:05 -0800758 int blade_id = gts->ts_user_blade_id;
759 int chiplet_id = gts->ts_user_chiplet_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700760
Jack Steiner55484c42009-12-15 16:48:05 -0800761 if (blade_id < 0)
762 blade_id = uv_numa_blade_id();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700763 cbr = gts->ts_cbr_au_count;
764 dsr = gts->ts_dsr_au_count;
765
Jack Steiner364b76d2009-06-17 16:28:20 -0700766 blade = gru_base[blade_id];
Jack Steiner9a0deec2008-07-29 22:33:58 -0700767 spin_lock(&blade->bs_lock);
768
769 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
770 gru = blade->bs_lru_gru;
771 if (ctxnum == 0)
772 gru = next_gru(blade, gru);
Jack Steiner55484c42009-12-15 16:48:05 -0800773 blade->bs_lru_gru = gru;
774 blade->bs_lru_ctxnum = ctxnum;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700775 ctxnum0 = ctxnum;
776 gru0 = gru;
777 while (1) {
Jack Steiner55484c42009-12-15 16:48:05 -0800778 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) {
779 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700780 break;
Jack Steiner55484c42009-12-15 16:48:05 -0800781 spin_lock(&gru->gs_lock);
782 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
783 if (flag && gru == gru0 && ctxnum == ctxnum0)
784 break;
785 ngts = gru->gs_gts[ctxnum];
786 /*
787 * We are grabbing locks out of order, so trylock is
788 * needed. GTSs are usually not locked, so the odds of
789 * success are high. If trylock fails, try to steal a
790 * different GSEG.
791 */
792 if (ngts && is_gts_stealable(ngts, blade))
793 break;
794 ngts = NULL;
795 }
796 spin_unlock(&gru->gs_lock);
797 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700798 break;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700799 }
Jack Steiner55484c42009-12-15 16:48:05 -0800800 if (flag && gru == gru0)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700801 break;
Jack Steiner55484c42009-12-15 16:48:05 -0800802 flag = 1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700803 ctxnum = 0;
804 gru = next_gru(blade, gru);
805 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700806 spin_unlock(&blade->bs_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700807
808 if (ngts) {
Jack Steiner7e796a72009-06-17 16:28:30 -0700809 gts->ustats.context_stolen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700810 ngts->ts_steal_jiffies = jiffies;
Jack Steiner836ce672009-06-17 16:28:22 -0700811 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
812 gts_stolen(ngts, blade);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700813 } else {
814 STAT(steal_context_failed);
815 }
816 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700817 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
Jack Steiner9a0deec2008-07-29 22:33:58 -0700818 " avail cb %ld, ds %ld\n",
819 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
820 hweight64(gru->gs_dsr_map));
821}
822
823/*
Jack Steiner55484c42009-12-15 16:48:05 -0800824 * Assign a gru context.
825 */
826static int gru_assign_context_number(struct gru_state *gru)
827{
828 int ctxnum;
829
830 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
831 __set_bit(ctxnum, &gru->gs_context_map);
832 return ctxnum;
833}
834
835/*
Jack Steiner9a0deec2008-07-29 22:33:58 -0700836 * Scan the GRUs on the local blade & assign a GRU context.
837 */
Jack Steiner55484c42009-12-15 16:48:05 -0800838struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700839{
840 struct gru_state *gru, *grux;
841 int i, max_active_contexts;
Jack Steiner55484c42009-12-15 16:48:05 -0800842 int blade_id = gts->ts_user_blade_id;
843 int chiplet_id = gts->ts_user_chiplet_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700844
Jack Steiner55484c42009-12-15 16:48:05 -0800845 if (blade_id < 0)
846 blade_id = uv_numa_blade_id();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700847again:
848 gru = NULL;
849 max_active_contexts = GRU_NUM_CCH;
Jack Steiner55484c42009-12-15 16:48:05 -0800850 for_each_gru_on_blade(grux, blade_id, i) {
851 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id)
852 continue;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700853 if (check_gru_resources(grux, gts->ts_cbr_au_count,
854 gts->ts_dsr_au_count,
855 max_active_contexts)) {
856 gru = grux;
857 max_active_contexts = grux->gs_active_contexts;
858 if (max_active_contexts == 0)
859 break;
860 }
861 }
862
863 if (gru) {
864 spin_lock(&gru->gs_lock);
865 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
866 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
867 spin_unlock(&gru->gs_lock);
868 goto again;
869 }
870 reserve_gru_resources(gru, gts);
871 gts->ts_gru = gru;
Jack Steiner87419412009-04-02 16:59:08 -0700872 gts->ts_blade = gru->gs_blade_id;
Jack Steiner55484c42009-12-15 16:48:05 -0800873 gts->ts_ctxnum = gru_assign_context_number(gru);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700874 atomic_inc(&gts->ts_refcnt);
875 gru->gs_gts[gts->ts_ctxnum] = gts;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700876 spin_unlock(&gru->gs_lock);
877
878 STAT(assign_context);
879 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700880 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
Jack Steiner9a0deec2008-07-29 22:33:58 -0700881 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
882 gts->ts_gru->gs_gid, gts->ts_ctxnum,
883 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
884 } else {
885 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
886 STAT(assign_context_failed);
887 }
888
Jack Steiner9a0deec2008-07-29 22:33:58 -0700889 return gru;
890}
891
892/*
893 * gru_nopage
894 *
895 * Map the user's GRU segment
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700896 *
897 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
Jack Steiner9a0deec2008-07-29 22:33:58 -0700898 */
899int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
900{
901 struct gru_thread_state *gts;
902 unsigned long paddr, vaddr;
903
904 vaddr = (unsigned long)vmf->virtual_address;
905 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
906 vma, vaddr, GSEG_BASE(vaddr));
907 STAT(nopfn);
908
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700909 /* The following check ensures vaddr is a valid address in the VMA */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700910 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
911 if (!gts)
912 return VM_FAULT_SIGBUS;
913
914again:
Jack Steiner9a0deec2008-07-29 22:33:58 -0700915 mutex_lock(&gts->ts_ctxlock);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700916 preempt_disable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700917
Jack Steiner55484c42009-12-15 16:48:05 -0800918 gru_check_context_placement(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700919
920 if (!gts->ts_gru) {
Jack Steiner836ce672009-06-17 16:28:22 -0700921 STAT(load_user_context);
Jack Steiner55484c42009-12-15 16:48:05 -0800922 if (!gru_assign_gru_context(gts)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700923 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700924 mutex_unlock(&gts->ts_ctxlock);
925 set_current_state(TASK_INTERRUPTIBLE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700926 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
927 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
Jack Steiner55484c42009-12-15 16:48:05 -0800928 gru_steal_context(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700929 goto again;
930 }
931 gru_load_context(gts);
932 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
933 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
934 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
935 vma->vm_page_prot);
936 }
937
Jack Steiner9a0deec2008-07-29 22:33:58 -0700938 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700939 mutex_unlock(&gts->ts_ctxlock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700940
941 return VM_FAULT_NOPAGE;
942}
943