blob: eedbf9c32760f68b76a34726a8358244e1834389 [file] [log] [blame]
Jack Steiner28bffaf2008-07-29 22:33:57 -07001/*
2 * SN Platform GRU Driver
3 *
4 * KERNEL SERVICES THAT USE THE GRU
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27#include <linux/smp_lock.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/proc_fs.h>
32#include <linux/interrupt.h>
33#include <linux/uaccess.h>
Jack Steiner836ce672009-06-17 16:28:22 -070034#include <linux/delay.h>
Jack Steiner28bffaf2008-07-29 22:33:57 -070035#include "gru.h"
36#include "grulib.h"
37#include "grutables.h"
38#include "grukservices.h"
39#include "gru_instructions.h"
40#include <asm/uv/uv_hub.h>
41
42/*
43 * Kernel GRU Usage
44 *
45 * The following is an interim algorithm for management of kernel GRU
46 * resources. This will likely be replaced when we better understand the
47 * kernel/user requirements.
48 *
Jack Steiner836ce672009-06-17 16:28:22 -070049 * Blade percpu resources reserved for kernel use. These resources are
50 * reserved whenever the the kernel context for the blade is loaded. Note
51 * that the kernel context is not guaranteed to be always available. It is
52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
Jack Steiner9120dec2009-06-17 16:28:25 -070055 *
56 * Async Overview:
57 *
58 * Each blade has one "kernel context" that owns GRU kernel resources
59 * located on the blade. Kernel drivers use GRU resources in this context
60 * for sending messages, zeroing memory, etc.
61 *
62 * The kernel context is dynamically loaded on demand. If it is not in
63 * use by the kernel, the kernel context can be unloaded & given to a user.
64 * The kernel context will be reloaded when needed. This may require that
65 * a context be stolen from a user.
66 * NOTE: frequent unloading/reloading of the kernel context is
67 * expensive. We are depending on batch schedulers, cpusets, sane
68 * drivers or some other mechanism to prevent the need for frequent
69 * stealing/reloading.
70 *
71 * The kernel context consists of two parts:
72 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
73 * Each cpu has it's own private resources & does not share them
74 * with other cpus. These resources are used serially, ie,
75 * locked, used & unlocked on each call to a function in
76 * grukservices.
77 * (Now that we have dynamic loading of kernel contexts, I
78 * may rethink this & allow sharing between cpus....)
79 *
80 * - Additional resources can be reserved long term & used directly
81 * by UV drivers located in the kernel. Drivers using these GRU
82 * resources can use asynchronous GRU instructions that send
83 * interrupts on completion.
84 * - these resources must be explicitly locked/unlocked
85 * - locked resources prevent (obviously) the kernel
86 * context from being unloaded.
87 * - drivers using these resource directly issue their own
88 * GRU instruction and must wait/check completion.
89 *
90 * When these resources are reserved, the caller can optionally
91 * associate a wait_queue with the resources and use asynchronous
92 * GRU instructions. When an async GRU instruction completes, the
93 * driver will do a wakeup on the event.
94 *
Jack Steiner28bffaf2008-07-29 22:33:57 -070095 */
Jack Steiner9120dec2009-06-17 16:28:25 -070096
97
98#define ASYNC_HAN_TO_BID(h) ((h) - 1)
99#define ASYNC_BID_TO_HAN(b) ((b) + 1)
100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700101#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
102 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
103#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
Jack Steiner9120dec2009-06-17 16:28:25 -0700104
Jack Steiner6f2584f2009-04-02 16:59:10 -0700105#define GRU_NUM_KERNEL_CBR 1
Jack Steiner28bffaf2008-07-29 22:33:57 -0700106#define GRU_NUM_KERNEL_DSR_BYTES 256
Jack Steiner6f2584f2009-04-02 16:59:10 -0700107#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
108 GRU_CACHE_LINE_BYTES)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700109
110/* GRU instruction attributes for all instructions */
111#define IMA IMA_CB_DELAY
112
113/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
114#define __gru_cacheline_aligned__ \
115 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
116
117#define MAGIC 0x1234567887654321UL
118
119/* Default retry count for GRU errors on kernel instructions */
120#define EXCEPTION_RETRY_LIMIT 3
121
122/* Status of message queue sections */
123#define MQS_EMPTY 0
124#define MQS_FULL 1
125#define MQS_NOOP 2
126
127/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
128/* optimized for x86_64 */
129struct message_queue {
130 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
131 int qlines; /* DW 1 */
132 long hstatus[2];
133 void *next __gru_cacheline_aligned__;/* CL 1 */
134 void *limit;
135 void *start;
136 void *start2;
137 char data ____cacheline_aligned; /* CL 2 */
138};
139
140/* First word in every message - used by mesq interface */
141struct message_header {
142 char present;
143 char present2;
144 char lines;
145 char fill;
146};
147
Jack Steiner28bffaf2008-07-29 22:33:57 -0700148#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
149
Jack Steiner836ce672009-06-17 16:28:22 -0700150/*
Jack Steiner836ce672009-06-17 16:28:22 -0700151 * Reload the blade's kernel context into a GRU chiplet. Called holding
152 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
153 */
154static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
155{
156 struct gru_state *gru;
157 struct gru_thread_state *kgts;
158 void *vaddr;
Jack Steiner9120dec2009-06-17 16:28:25 -0700159 int ctxnum, ncpus;
Jack Steiner836ce672009-06-17 16:28:22 -0700160
161 up_read(&bs->bs_kgts_sema);
162 down_write(&bs->bs_kgts_sema);
163
164 if (!bs->bs_kgts)
Jack Steiner9120dec2009-06-17 16:28:25 -0700165 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
Jack Steiner836ce672009-06-17 16:28:22 -0700166 kgts = bs->bs_kgts;
167
168 if (!kgts->ts_gru) {
169 STAT(load_kernel_context);
Jack Steiner9120dec2009-06-17 16:28:25 -0700170 ncpus = uv_blade_nr_possible_cpus(blade_id);
171 kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
172 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
173 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
174 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
175 bs->bs_async_dsr_bytes);
Jack Steiner836ce672009-06-17 16:28:22 -0700176 while (!gru_assign_gru_context(kgts, blade_id)) {
177 msleep(1);
178 gru_steal_context(kgts, blade_id);
179 }
180 gru_load_context(kgts);
181 gru = bs->bs_kgts->ts_gru;
182 vaddr = gru->gs_gru_base_vaddr;
183 ctxnum = kgts->ts_ctxnum;
184 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
185 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
186 }
187 downgrade_write(&bs->bs_kgts_sema);
188}
189
190/*
Jack Steinerd5826dd2009-06-17 16:28:28 -0700191 * Free all kernel contexts that are not currently in use.
192 * Returns 0 if all freed, else number of inuse context.
193 */
194static int gru_free_kernel_contexts(void)
195{
196 struct gru_blade_state *bs;
197 struct gru_thread_state *kgts;
198 int bid, ret = 0;
199
200 for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
201 bs = gru_base[bid];
202 if (!bs)
203 continue;
204 if (down_write_trylock(&bs->bs_kgts_sema)) {
205 kgts = bs->bs_kgts;
206 if (kgts && kgts->ts_gru)
207 gru_unload_context(kgts, 0);
208 kfree(kgts);
209 bs->bs_kgts = NULL;
210 up_write(&bs->bs_kgts_sema);
211 } else {
212 ret++;
213 }
214 }
215 return ret;
216}
217
218/*
Jack Steiner836ce672009-06-17 16:28:22 -0700219 * Lock & load the kernel context for the specified blade.
220 */
221static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
222{
223 struct gru_blade_state *bs;
224
225 STAT(lock_kernel_context);
226 bs = gru_base[blade_id];
227
228 down_read(&bs->bs_kgts_sema);
229 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
230 gru_load_kernel_context(bs, blade_id);
231 return bs;
232
233}
234
235/*
236 * Unlock the kernel context for the specified blade. Context is not
237 * unloaded but may be stolen before next use.
238 */
239static void gru_unlock_kernel_context(int blade_id)
240{
241 struct gru_blade_state *bs;
242
243 bs = gru_base[blade_id];
244 up_read(&bs->bs_kgts_sema);
245 STAT(unlock_kernel_context);
246}
247
248/*
249 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
250 * - returns with preemption disabled
251 */
Jack Steiner28bffaf2008-07-29 22:33:57 -0700252static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
253{
254 struct gru_blade_state *bs;
255 int lcpu;
256
257 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
258 preempt_disable();
Jack Steiner836ce672009-06-17 16:28:22 -0700259 bs = gru_lock_kernel_context(uv_numa_blade_id());
Jack Steiner28bffaf2008-07-29 22:33:57 -0700260 lcpu = uv_blade_processor_id();
261 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
262 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
263 return 0;
264}
265
Jack Steiner836ce672009-06-17 16:28:22 -0700266/*
267 * Free the current cpus reserved DSR/CBR resources.
268 */
Jack Steiner28bffaf2008-07-29 22:33:57 -0700269static void gru_free_cpu_resources(void *cb, void *dsr)
270{
Jack Steiner836ce672009-06-17 16:28:22 -0700271 gru_unlock_kernel_context(uv_numa_blade_id());
Jack Steiner28bffaf2008-07-29 22:33:57 -0700272 preempt_enable();
273}
274
Jack Steiner9120dec2009-06-17 16:28:25 -0700275/*
276 * Reserve GRU resources to be used asynchronously.
277 * Note: currently supports only 1 reservation per blade.
278 *
279 * input:
280 * blade_id - blade on which resources should be reserved
281 * cbrs - number of CBRs
282 * dsr_bytes - number of DSR bytes needed
283 * output:
284 * handle to identify resource
285 * (0 = async resources already reserved)
286 */
287unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
288 struct completion *cmp)
289{
290 struct gru_blade_state *bs;
291 struct gru_thread_state *kgts;
292 int ret = 0;
293
294 bs = gru_base[blade_id];
295
296 down_write(&bs->bs_kgts_sema);
297
298 /* Verify no resources already reserved */
299 if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
300 goto done;
301 bs->bs_async_dsr_bytes = dsr_bytes;
302 bs->bs_async_cbrs = cbrs;
303 bs->bs_async_wq = cmp;
304 kgts = bs->bs_kgts;
305
306 /* Resources changed. Unload context if already loaded */
307 if (kgts && kgts->ts_gru)
308 gru_unload_context(kgts, 0);
309 ret = ASYNC_BID_TO_HAN(blade_id);
310
311done:
312 up_write(&bs->bs_kgts_sema);
313 return ret;
314}
315
316/*
317 * Release async resources previously reserved.
318 *
319 * input:
320 * han - handle to identify resources
321 */
322void gru_release_async_resources(unsigned long han)
323{
324 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
325
326 down_write(&bs->bs_kgts_sema);
327 bs->bs_async_dsr_bytes = 0;
328 bs->bs_async_cbrs = 0;
329 bs->bs_async_wq = NULL;
330 up_write(&bs->bs_kgts_sema);
331}
332
333/*
334 * Wait for async GRU instructions to complete.
335 *
336 * input:
337 * han - handle to identify resources
338 */
339void gru_wait_async_cbr(unsigned long han)
340{
341 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
342
343 wait_for_completion(bs->bs_async_wq);
344 mb();
345}
346
347/*
348 * Lock previous reserved async GRU resources
349 *
350 * input:
351 * han - handle to identify resources
352 * output:
353 * cb - pointer to first CBR
354 * dsr - pointer to first DSR
355 */
356void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
357{
358 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
359 int blade_id = ASYNC_HAN_TO_BID(han);
360 int ncpus;
361
362 gru_lock_kernel_context(blade_id);
363 ncpus = uv_blade_nr_possible_cpus(blade_id);
364 if (cb)
365 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
366 if (dsr)
367 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
368}
369
370/*
371 * Unlock previous reserved async GRU resources
372 *
373 * input:
374 * han - handle to identify resources
375 */
376void gru_unlock_async_resource(unsigned long han)
377{
378 int blade_id = ASYNC_HAN_TO_BID(han);
379
380 gru_unlock_kernel_context(blade_id);
381}
382
Jack Steiner836ce672009-06-17 16:28:22 -0700383/*----------------------------------------------------------------------*/
Jack Steiner28bffaf2008-07-29 22:33:57 -0700384int gru_get_cb_exception_detail(void *cb,
385 struct control_block_extended_exc_detail *excdet)
386{
387 struct gru_control_block_extended *cbe;
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700388 struct gru_blade_state *bs;
389 int cbrnum;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700390
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700391 bs = KCB_TO_BS(cb);
392 cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
393 cbe = get_cbe(GRUBASE(cb), cbrnum);
394 gru_flush_cache(cbe); /* CBE not coherent */
Jack Steiner28bffaf2008-07-29 22:33:57 -0700395 excdet->opc = cbe->opccpy;
396 excdet->exopc = cbe->exopccpy;
397 excdet->ecause = cbe->ecause;
398 excdet->exceptdet0 = cbe->idef1upd;
399 excdet->exceptdet1 = cbe->idef3upd;
Jack Steiner1a2c09e2009-06-17 16:28:28 -0700400 gru_flush_cache(cbe);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700401 return 0;
402}
403
404char *gru_get_cb_exception_detail_str(int ret, void *cb,
405 char *buf, int size)
406{
407 struct gru_control_block_status *gen = (void *)cb;
408 struct control_block_extended_exc_detail excdet;
409
410 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
411 gru_get_cb_exception_detail(cb, &excdet);
412 snprintf(buf, size,
413 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
414 "excdet0 0x%lx, excdet1 0x%x",
415 gen, excdet.opc, excdet.exopc, excdet.ecause,
416 excdet.exceptdet0, excdet.exceptdet1);
417 } else {
418 snprintf(buf, size, "No exception");
419 }
420 return buf;
421}
422
423static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
424{
425 while (gen->istatus >= CBS_ACTIVE) {
426 cpu_relax();
427 barrier();
428 }
429 return gen->istatus;
430}
431
432static int gru_retry_exception(void *cb)
433{
434 struct gru_control_block_status *gen = (void *)cb;
435 struct control_block_extended_exc_detail excdet;
436 int retry = EXCEPTION_RETRY_LIMIT;
437
438 while (1) {
Jack Steiner28bffaf2008-07-29 22:33:57 -0700439 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
440 return CBS_IDLE;
Jack Steinerd6e2fbc2009-06-17 16:28:29 -0700441 if (gru_get_cb_message_queue_substatus(cb))
442 return CBS_EXCEPTION;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700443 gru_get_cb_exception_detail(cb, &excdet);
Jack Steiner270952a2009-06-17 16:28:27 -0700444 if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
445 (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
Jack Steiner28bffaf2008-07-29 22:33:57 -0700446 break;
447 if (retry-- == 0)
448 break;
449 gen->icmd = 1;
450 gru_flush_cache(gen);
451 }
452 return CBS_EXCEPTION;
453}
454
455int gru_check_status_proc(void *cb)
456{
457 struct gru_control_block_status *gen = (void *)cb;
458 int ret;
459
460 ret = gen->istatus;
461 if (ret != CBS_EXCEPTION)
462 return ret;
463 return gru_retry_exception(cb);
464
465}
466
467int gru_wait_proc(void *cb)
468{
469 struct gru_control_block_status *gen = (void *)cb;
470 int ret;
471
472 ret = gru_wait_idle_or_exception(gen);
473 if (ret == CBS_EXCEPTION)
474 ret = gru_retry_exception(cb);
475
476 return ret;
477}
478
479void gru_abort(int ret, void *cb, char *str)
480{
481 char buf[GRU_EXC_STR_SIZE];
482
483 panic("GRU FATAL ERROR: %s - %s\n", str,
484 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
485}
486
487void gru_wait_abort_proc(void *cb)
488{
489 int ret;
490
491 ret = gru_wait_proc(cb);
492 if (ret)
493 gru_abort(ret, cb, "gru_wait_abort");
494}
495
496
497/*------------------------------ MESSAGE QUEUES -----------------------------*/
498
499/* Internal status . These are NOT returned to the user. */
500#define MQIE_AGAIN -1 /* try again */
501
502
503/*
504 * Save/restore the "present" flag that is in the second line of 2-line
505 * messages
506 */
507static inline int get_present2(void *p)
508{
509 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
510 return mhdr->present;
511}
512
513static inline void restore_present2(void *p, int val)
514{
515 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
516 mhdr->present = val;
517}
518
519/*
520 * Create a message queue.
521 * qlines - message queue size in cache lines. Includes 2-line header.
522 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700523int gru_create_message_queue(struct gru_message_queue_desc *mqd,
524 void *p, unsigned int bytes, int nasid, int vector, int apicid)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700525{
526 struct message_queue *mq = p;
527 unsigned int qlines;
528
529 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
530 memset(mq, 0, bytes);
531 mq->start = &mq->data;
532 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
533 mq->next = &mq->data;
534 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
535 mq->qlines = qlines;
536 mq->hstatus[0] = 0;
537 mq->hstatus[1] = 1;
538 mq->head = gru_mesq_head(2, qlines / 2 + 1);
Jack Steiner6f2584f2009-04-02 16:59:10 -0700539 mqd->mq = mq;
540 mqd->mq_gpa = uv_gpa(mq);
541 mqd->qlines = qlines;
542 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
543 mqd->interrupt_vector = vector;
544 mqd->interrupt_apicid = apicid;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700545 return 0;
546}
547EXPORT_SYMBOL_GPL(gru_create_message_queue);
548
549/*
550 * Send a NOOP message to a message queue
551 * Returns:
552 * 0 - if queue is full after the send. This is the normal case
553 * but various races can change this.
554 * -1 - if mesq sent successfully but queue not full
555 * >0 - unexpected error. MQE_xxx returned
556 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700557static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
558 void *mesg)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700559{
560 const struct message_header noop_header = {
561 .present = MQS_NOOP, .lines = 1};
562 unsigned long m;
563 int substatus, ret;
564 struct message_header save_mhdr, *mhdr = mesg;
565
566 STAT(mesq_noop);
567 save_mhdr = *mhdr;
568 *mhdr = noop_header;
Jack Steiner6f2584f2009-04-02 16:59:10 -0700569 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700570 ret = gru_wait(cb);
571
572 if (ret) {
573 substatus = gru_get_cb_message_queue_substatus(cb);
574 switch (substatus) {
575 case CBSS_NO_ERROR:
576 STAT(mesq_noop_unexpected_error);
577 ret = MQE_UNEXPECTED_CB_ERR;
578 break;
579 case CBSS_LB_OVERFLOWED:
580 STAT(mesq_noop_lb_overflow);
581 ret = MQE_CONGESTION;
582 break;
583 case CBSS_QLIMIT_REACHED:
584 STAT(mesq_noop_qlimit_reached);
585 ret = 0;
586 break;
587 case CBSS_AMO_NACKED:
588 STAT(mesq_noop_amo_nacked);
589 ret = MQE_CONGESTION;
590 break;
591 case CBSS_PUT_NACKED:
592 STAT(mesq_noop_put_nacked);
Jack Steiner6f2584f2009-04-02 16:59:10 -0700593 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700594 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
595 IMA);
596 if (gru_wait(cb) == CBS_IDLE)
597 ret = MQIE_AGAIN;
598 else
599 ret = MQE_UNEXPECTED_CB_ERR;
600 break;
601 case CBSS_PAGE_OVERFLOW:
602 default:
603 BUG();
604 }
605 }
606 *mhdr = save_mhdr;
607 return ret;
608}
609
610/*
611 * Handle a gru_mesq full.
612 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700613static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
614 void *mesg, int lines)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700615{
616 union gru_mesqhead mqh;
617 unsigned int limit, head;
618 unsigned long avalue;
Jack Steiner6f2584f2009-04-02 16:59:10 -0700619 int half, qlines;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700620
621 /* Determine if switching to first/second half of q */
622 avalue = gru_get_amo_value(cb);
623 head = gru_get_amo_value_head(cb);
624 limit = gru_get_amo_value_limit(cb);
625
Jack Steiner6f2584f2009-04-02 16:59:10 -0700626 qlines = mqd->qlines;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700627 half = (limit != qlines);
628
629 if (half)
630 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
631 else
632 mqh = gru_mesq_head(2, qlines / 2 + 1);
633
634 /* Try to get lock for switching head pointer */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700635 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700636 if (gru_wait(cb) != CBS_IDLE)
637 goto cberr;
638 if (!gru_get_amo_value(cb)) {
639 STAT(mesq_qf_locked);
640 return MQE_QUEUE_FULL;
641 }
642
643 /* Got the lock. Send optional NOP if queue not full, */
644 if (head != limit) {
Jack Steiner6f2584f2009-04-02 16:59:10 -0700645 if (send_noop_message(cb, mqd, mesg)) {
646 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
Jack Steiner28bffaf2008-07-29 22:33:57 -0700647 XTYPE_DW, IMA);
648 if (gru_wait(cb) != CBS_IDLE)
649 goto cberr;
650 STAT(mesq_qf_noop_not_full);
651 return MQIE_AGAIN;
652 }
653 avalue++;
654 }
655
656 /* Then flip queuehead to other half of queue. */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700657 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
658 IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700659 if (gru_wait(cb) != CBS_IDLE)
660 goto cberr;
661
662 /* If not successfully in swapping queue head, clear the hstatus lock */
663 if (gru_get_amo_value(cb) != avalue) {
664 STAT(mesq_qf_switch_head_failed);
Jack Steiner6f2584f2009-04-02 16:59:10 -0700665 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
666 IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700667 if (gru_wait(cb) != CBS_IDLE)
668 goto cberr;
669 }
670 return MQIE_AGAIN;
671cberr:
672 STAT(mesq_qf_unexpected_error);
673 return MQE_UNEXPECTED_CB_ERR;
674}
675
Jack Steiner6f2584f2009-04-02 16:59:10 -0700676/*
677 * Send a cross-partition interrupt to the SSI that contains the target
678 * message queue. Normally, the interrupt is automatically delivered by hardware
679 * but some error conditions require explicit delivery.
680 */
681static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
682{
683 if (mqd->interrupt_vector)
684 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
685 mqd->interrupt_vector);
686}
687
Jack Steiner17b49a62009-06-17 16:28:23 -0700688/*
689 * Handle a PUT failure. Note: if message was a 2-line message, one of the
690 * lines might have successfully have been written. Before sending the
691 * message, "present" must be cleared in BOTH lines to prevent the receiver
692 * from prematurely seeing the full message.
693 */
694static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
695 void *mesg, int lines)
696{
697 unsigned long m;
698
699 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
700 if (lines == 2) {
701 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
702 if (gru_wait(cb) != CBS_IDLE)
703 return MQE_UNEXPECTED_CB_ERR;
704 }
705 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
706 if (gru_wait(cb) != CBS_IDLE)
707 return MQE_UNEXPECTED_CB_ERR;
708 send_message_queue_interrupt(mqd);
709 return MQE_OK;
710}
Jack Steiner28bffaf2008-07-29 22:33:57 -0700711
712/*
713 * Handle a gru_mesq failure. Some of these failures are software recoverable
714 * or retryable.
715 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700716static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
717 void *mesg, int lines)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700718{
719 int substatus, ret = 0;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700720
721 substatus = gru_get_cb_message_queue_substatus(cb);
722 switch (substatus) {
723 case CBSS_NO_ERROR:
724 STAT(mesq_send_unexpected_error);
725 ret = MQE_UNEXPECTED_CB_ERR;
726 break;
727 case CBSS_LB_OVERFLOWED:
728 STAT(mesq_send_lb_overflow);
729 ret = MQE_CONGESTION;
730 break;
731 case CBSS_QLIMIT_REACHED:
732 STAT(mesq_send_qlimit_reached);
Jack Steiner6f2584f2009-04-02 16:59:10 -0700733 ret = send_message_queue_full(cb, mqd, mesg, lines);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700734 break;
735 case CBSS_AMO_NACKED:
736 STAT(mesq_send_amo_nacked);
737 ret = MQE_CONGESTION;
738 break;
739 case CBSS_PUT_NACKED:
740 STAT(mesq_send_put_nacked);
Jack Steiner17b49a62009-06-17 16:28:23 -0700741 ret = send_message_put_nacked(cb, mqd, mesg, lines);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700742 break;
743 default:
744 BUG();
745 }
746 return ret;
747}
748
749/*
750 * Send a message to a message queue
Jack Steiner6f2584f2009-04-02 16:59:10 -0700751 * mqd message queue descriptor
Jack Steiner28bffaf2008-07-29 22:33:57 -0700752 * mesg message. ust be vaddr within a GSEG
753 * bytes message size (<= 2 CL)
754 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700755int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
756 unsigned int bytes)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700757{
758 struct message_header *mhdr;
759 void *cb;
760 void *dsr;
761 int istatus, clines, ret;
762
763 STAT(mesq_send);
764 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
765
Julia Lawallcbf330b2008-10-15 22:01:27 -0700766 clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700767 if (gru_get_cpu_resources(bytes, &cb, &dsr))
768 return MQE_BUG_NO_RESOURCES;
769 memcpy(dsr, mesg, bytes);
770 mhdr = dsr;
771 mhdr->present = MQS_FULL;
772 mhdr->lines = clines;
773 if (clines == 2) {
774 mhdr->present2 = get_present2(mhdr);
775 restore_present2(mhdr, MQS_FULL);
776 }
777
778 do {
779 ret = MQE_OK;
Jack Steiner6f2584f2009-04-02 16:59:10 -0700780 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700781 istatus = gru_wait(cb);
782 if (istatus != CBS_IDLE)
Jack Steiner6f2584f2009-04-02 16:59:10 -0700783 ret = send_message_failure(cb, mqd, dsr, clines);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700784 } while (ret == MQIE_AGAIN);
785 gru_free_cpu_resources(cb, dsr);
786
787 if (ret)
788 STAT(mesq_send_failed);
789 return ret;
790}
791EXPORT_SYMBOL_GPL(gru_send_message_gpa);
792
793/*
794 * Advance the receive pointer for the queue to the next message.
795 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700796void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700797{
Jack Steiner6f2584f2009-04-02 16:59:10 -0700798 struct message_queue *mq = mqd->mq;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700799 struct message_header *mhdr = mq->next;
800 void *next, *pnext;
801 int half = -1;
802 int lines = mhdr->lines;
803
804 if (lines == 2)
805 restore_present2(mhdr, MQS_EMPTY);
806 mhdr->present = MQS_EMPTY;
807
808 pnext = mq->next;
809 next = pnext + GRU_CACHE_LINE_BYTES * lines;
810 if (next == mq->limit) {
811 next = mq->start;
812 half = 1;
813 } else if (pnext < mq->start2 && next >= mq->start2) {
814 half = 0;
815 }
816
817 if (half >= 0)
818 mq->hstatus[half] = 1;
819 mq->next = next;
820}
821EXPORT_SYMBOL_GPL(gru_free_message);
822
823/*
824 * Get next message from message queue. Return NULL if no message
825 * present. User must call next_message() to move to next message.
826 * rmq message queue
827 */
Jack Steiner6f2584f2009-04-02 16:59:10 -0700828void *gru_get_next_message(struct gru_message_queue_desc *mqd)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700829{
Jack Steiner6f2584f2009-04-02 16:59:10 -0700830 struct message_queue *mq = mqd->mq;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700831 struct message_header *mhdr = mq->next;
832 int present = mhdr->present;
833
834 /* skip NOOP messages */
835 STAT(mesq_receive);
836 while (present == MQS_NOOP) {
Jack Steiner6f2584f2009-04-02 16:59:10 -0700837 gru_free_message(mqd, mhdr);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700838 mhdr = mq->next;
839 present = mhdr->present;
840 }
841
842 /* Wait for both halves of 2 line messages */
843 if (present == MQS_FULL && mhdr->lines == 2 &&
844 get_present2(mhdr) == MQS_EMPTY)
845 present = MQS_EMPTY;
846
847 if (!present) {
848 STAT(mesq_receive_none);
849 return NULL;
850 }
851
852 if (mhdr->lines == 2)
853 restore_present2(mhdr, mhdr->present2);
854
855 return mhdr;
856}
857EXPORT_SYMBOL_GPL(gru_get_next_message);
858
859/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
860
861/*
862 * Copy a block of data using the GRU resources
863 */
864int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
865 unsigned int bytes)
866{
867 void *cb;
868 void *dsr;
869 int ret;
870
871 STAT(copy_gpa);
872 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
873 return MQE_BUG_NO_RESOURCES;
874 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
Jack Steiner6f2584f2009-04-02 16:59:10 -0700875 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
Jack Steiner28bffaf2008-07-29 22:33:57 -0700876 ret = gru_wait(cb);
877 gru_free_cpu_resources(cb, dsr);
878 return ret;
879}
880EXPORT_SYMBOL_GPL(gru_copy_gpa);
881
882/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
883/* Temp - will delete after we gain confidence in the GRU */
Jack Steiner28bffaf2008-07-29 22:33:57 -0700884
Jack Steinereb5bd5e2009-06-17 16:28:26 -0700885static int quicktest0(unsigned long arg)
Jack Steiner28bffaf2008-07-29 22:33:57 -0700886{
Jack Steiner836ce672009-06-17 16:28:22 -0700887 unsigned long word0;
888 unsigned long word1;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700889 void *cb;
Jack Steiner836ce672009-06-17 16:28:22 -0700890 void *dsr;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700891 unsigned long *p;
Jack Steinereb5bd5e2009-06-17 16:28:26 -0700892 int ret = -EIO;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700893
Jack Steiner836ce672009-06-17 16:28:22 -0700894 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
895 return MQE_BUG_NO_RESOURCES;
896 p = dsr;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700897 word0 = MAGIC;
Jack Steiner836ce672009-06-17 16:28:22 -0700898 word1 = 0;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700899
Jack Steiner836ce672009-06-17 16:28:22 -0700900 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
Jack Steinereb5bd5e2009-06-17 16:28:26 -0700901 if (gru_wait(cb) != CBS_IDLE) {
902 printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
903 goto done;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700904 }
905
Jack Steinereb5bd5e2009-06-17 16:28:26 -0700906 if (*p != MAGIC) {
907 printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
908 goto done;
909 }
910 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
911 if (gru_wait(cb) != CBS_IDLE) {
912 printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
913 goto done;
914 }
915
916 if (word0 != word1 || word1 != MAGIC) {
917 printk(KERN_DEBUG
918 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
919 word1, MAGIC);
920 goto done;
921 }
922 ret = 0;
923
924done:
925 gru_free_cpu_resources(cb, dsr);
926 return ret;
Jack Steiner28bffaf2008-07-29 22:33:57 -0700927}
928
Jack Steinereb5bd5e2009-06-17 16:28:26 -0700929#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
930
931static int quicktest1(unsigned long arg)
932{
933 struct gru_message_queue_desc mqd;
934 void *p, *mq;
935 unsigned long *dw;
936 int i, ret = -EIO;
937 char mes[GRU_CACHE_LINE_BYTES], *m;
938
939 /* Need 1K cacheline aligned that does not cross page boundary */
940 p = kmalloc(4096, 0);
941 mq = ALIGNUP(p, 1024);
942 memset(mes, 0xee, sizeof(mes));
943 dw = mq;
944
945 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
946 for (i = 0; i < 6; i++) {
947 mes[8] = i;
948 do {
949 ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
950 } while (ret == MQE_CONGESTION);
951 if (ret)
952 break;
953 }
954 if (ret != MQE_QUEUE_FULL || i != 4)
955 goto done;
956
957 for (i = 0; i < 6; i++) {
958 m = gru_get_next_message(&mqd);
959 if (!m || m[8] != i)
960 break;
961 gru_free_message(&mqd, m);
962 }
963 ret = (i == 4) ? 0 : -EIO;
964
965done:
966 kfree(p);
967 return ret;
968}
969
970static int quicktest2(unsigned long arg)
971{
972 static DECLARE_COMPLETION(cmp);
973 unsigned long han;
974 int blade_id = 0;
975 int numcb = 4;
976 int ret = 0;
977 unsigned long *buf;
978 void *cb0, *cb;
979 int i, k, istatus, bytes;
980
981 bytes = numcb * 4 * 8;
982 buf = kmalloc(bytes, GFP_KERNEL);
983 if (!buf)
984 return -ENOMEM;
985
986 ret = -EBUSY;
987 han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
988 if (!han)
989 goto done;
990
991 gru_lock_async_resource(han, &cb0, NULL);
992 memset(buf, 0xee, bytes);
993 for (i = 0; i < numcb; i++)
994 gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
995 XTYPE_DW, 4, 1, IMA_INTERRUPT);
996
997 ret = 0;
998 for (k = 0; k < numcb; k++) {
999 gru_wait_async_cbr(han);
1000 for (i = 0; i < numcb; i++) {
1001 cb = cb0 + i * GRU_HANDLE_STRIDE;
1002 istatus = gru_check_status(cb);
1003 if (istatus == CBS_ACTIVE)
1004 continue;
1005 if (istatus == CBS_EXCEPTION)
1006 ret = -EFAULT;
1007 else if (buf[i] || buf[i + 1] || buf[i + 2] ||
1008 buf[i + 3])
1009 ret = -EIO;
1010 }
1011 }
1012 BUG_ON(cmp.done);
1013
1014 gru_unlock_async_resource(han);
1015 gru_release_async_resources(han);
1016done:
1017 kfree(buf);
1018 return ret;
1019}
1020
1021/*
1022 * Debugging only. User hook for various kernel tests
1023 * of driver & gru.
1024 */
1025int gru_ktest(unsigned long arg)
1026{
1027 int ret = -EINVAL;
1028
1029 switch (arg & 0xff) {
1030 case 0:
1031 ret = quicktest0(arg);
1032 break;
1033 case 1:
1034 ret = quicktest1(arg);
1035 break;
1036 case 2:
1037 ret = quicktest2(arg);
1038 break;
Jack Steinerd5826dd2009-06-17 16:28:28 -07001039 case 99:
1040 ret = gru_free_kernel_contexts();
1041 break;
Jack Steinereb5bd5e2009-06-17 16:28:26 -07001042 }
1043 return ret;
1044
1045}
Jack Steiner28bffaf2008-07-29 22:33:57 -07001046
Jack Steinerd5826dd2009-06-17 16:28:28 -07001047int gru_kservices_init(void)
Jack Steiner28bffaf2008-07-29 22:33:57 -07001048{
Jack Steiner28bffaf2008-07-29 22:33:57 -07001049 return 0;
1050}
Jack Steiner27ca8a72009-04-02 16:59:11 -07001051
Jack Steinerd5826dd2009-06-17 16:28:28 -07001052void gru_kservices_exit(void)
Jack Steiner27ca8a72009-04-02 16:59:11 -07001053{
Jack Steinerd5826dd2009-06-17 16:28:28 -07001054 if (gru_free_kernel_contexts())
1055 BUG();
Jack Steiner27ca8a72009-04-02 16:59:11 -07001056}
1057