blob: 9ec4ac77b87f88df622d5b1d3bc3acf421d3c9b1 [file] [log] [blame]
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * These are the routines used by layered drivers, currently just the
35 * layered ethernet driver and verbs layer.
36 */
37
38#include <linux/io.h>
39#include <linux/pci.h>
40#include <asm/byteorder.h>
41
42#include "ipath_kernel.h"
43#include "ips_common.h"
44#include "ipath_layer.h"
45
46/* Acquire before ipath_devs_lock. */
47static DEFINE_MUTEX(ipath_layer_mutex);
48
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070049static int ipath_verbs_registered;
50
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080051u16 ipath_layer_rcv_opcode;
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070052
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080053static int (*layer_intr)(void *, u32);
54static int (*layer_rcv)(void *, void *, struct sk_buff *);
55static int (*layer_rcv_lid)(void *, void *);
56static int (*verbs_piobufavail)(void *);
57static void (*verbs_rcv)(void *, void *, void *, u32);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080058
59static void *(*layer_add_one)(int, struct ipath_devdata *);
60static void (*layer_remove_one)(void *);
61static void *(*verbs_add_one)(int, struct ipath_devdata *);
62static void (*verbs_remove_one)(void *);
63static void (*verbs_timer_cb)(void *);
64
65int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
66{
67 int ret = -ENODEV;
68
69 if (dd->ipath_layer.l_arg && layer_intr)
70 ret = layer_intr(dd->ipath_layer.l_arg, arg);
71
72 return ret;
73}
74
75int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
76{
77 int ret;
78
79 mutex_lock(&ipath_layer_mutex);
80
81 ret = __ipath_layer_intr(dd, arg);
82
83 mutex_unlock(&ipath_layer_mutex);
84
85 return ret;
86}
87
88int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
89 struct sk_buff *skb)
90{
91 int ret = -ENODEV;
92
93 if (dd->ipath_layer.l_arg && layer_rcv)
94 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
95
96 return ret;
97}
98
99int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
100{
101 int ret = -ENODEV;
102
103 if (dd->ipath_layer.l_arg && layer_rcv_lid)
104 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
105
106 return ret;
107}
108
109int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
110{
111 int ret = -ENODEV;
112
113 if (dd->verbs_layer.l_arg && verbs_piobufavail)
114 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
115
116 return ret;
117}
118
119int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
120 u32 tlen)
121{
122 int ret = -ENODEV;
123
124 if (dd->verbs_layer.l_arg && verbs_rcv) {
125 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
126 ret = 0;
127 }
128
129 return ret;
130}
131
132int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
133{
134 u32 lstate;
135 int ret;
136
137 switch (newstate) {
138 case IPATH_IB_LINKDOWN:
139 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
140 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
141 /* don't wait */
142 ret = 0;
143 goto bail;
144
145 case IPATH_IB_LINKDOWN_SLEEP:
146 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
147 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
148 /* don't wait */
149 ret = 0;
150 goto bail;
151
152 case IPATH_IB_LINKDOWN_DISABLE:
153 ipath_set_ib_lstate(dd,
154 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
155 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
156 /* don't wait */
157 ret = 0;
158 goto bail;
159
160 case IPATH_IB_LINKINIT:
161 if (dd->ipath_flags & IPATH_LINKINIT) {
162 ret = 0;
163 goto bail;
164 }
165 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
166 INFINIPATH_IBCC_LINKCMD_SHIFT);
167 lstate = IPATH_LINKINIT;
168 break;
169
170 case IPATH_IB_LINKARM:
171 if (dd->ipath_flags & IPATH_LINKARMED) {
172 ret = 0;
173 goto bail;
174 }
175 if (!(dd->ipath_flags &
176 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
177 ret = -EINVAL;
178 goto bail;
179 }
180 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
181 INFINIPATH_IBCC_LINKCMD_SHIFT);
182 /*
183 * Since the port can transition to ACTIVE by receiving
184 * a non VL 15 packet, wait for either state.
185 */
186 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
187 break;
188
189 case IPATH_IB_LINKACTIVE:
190 if (dd->ipath_flags & IPATH_LINKACTIVE) {
191 ret = 0;
192 goto bail;
193 }
194 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
195 ret = -EINVAL;
196 goto bail;
197 }
198 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
199 INFINIPATH_IBCC_LINKCMD_SHIFT);
200 lstate = IPATH_LINKACTIVE;
201 break;
202
203 default:
204 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
205 ret = -EINVAL;
206 goto bail;
207 }
208 ret = ipath_wait_linkstate(dd, lstate, 2000);
209
210bail:
211 return ret;
212}
213
214EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
215
216/**
217 * ipath_layer_set_mtu - set the MTU
218 * @dd: the infinipath device
219 * @arg: the new MTU
220 *
221 * we can handle "any" incoming size, the issue here is whether we
222 * need to restrict our outgoing size. For now, we don't do any
223 * sanity checking on this, and we don't deal with what happens to
224 * programs that are already running when the size changes.
225 * NOTE: changing the MTU will usually cause the IBC to go back to
226 * link initialize (IPATH_IBSTATE_INIT) state...
227 */
228int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
229{
230 u32 piosize;
231 int changed = 0;
232 int ret;
233
234 /*
235 * mtu is IB data payload max. It's the largest power of 2 less
236 * than piosize (or even larger, since it only really controls the
237 * largest we can receive; we can send the max of the mtu and
238 * piosize). We check that it's one of the valid IB sizes.
239 */
240 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
241 arg != 4096) {
242 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
243 ret = -EINVAL;
244 goto bail;
245 }
246 if (dd->ipath_ibmtu == arg) {
247 ret = 0; /* same as current */
248 goto bail;
249 }
250
251 piosize = dd->ipath_ibmaxlen;
252 dd->ipath_ibmtu = arg;
253
254 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
255 /* Only if it's not the initial value (or reset to it) */
256 if (piosize != dd->ipath_init_ibmaxlen) {
257 dd->ipath_ibmaxlen = piosize;
258 changed = 1;
259 }
260 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
261 piosize = arg + IPATH_PIO_MAXIBHDR;
262 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
263 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
264 arg);
265 dd->ipath_ibmaxlen = piosize;
266 changed = 1;
267 }
268
269 if (changed) {
270 /*
271 * set the IBC maxpktlength to the size of our pio
272 * buffers in words
273 */
274 u64 ibc = dd->ipath_ibcctrl;
275 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
276 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
277
278 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
279 dd->ipath_ibmaxlen = piosize;
280 piosize /= sizeof(u32); /* in words */
281 /*
282 * for ICRC, which we only send in diag test pkt mode, and
283 * we don't need to worry about that for mtu
284 */
285 piosize += 1;
286
287 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
288 dd->ipath_ibcctrl = ibc;
289 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
290 dd->ipath_ibcctrl);
291 dd->ipath_f_tidtemplate(dd);
292 }
293
294 ret = 0;
295
296bail:
297 return ret;
298}
299
300EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
301
302int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
303{
304 ipath_stats.sps_lid[dd->ipath_unit] = arg;
305 dd->ipath_lid = arg;
306 dd->ipath_lmc = lmc;
307
308 mutex_lock(&ipath_layer_mutex);
309
310 if (dd->ipath_layer.l_arg && layer_intr)
311 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
312
313 mutex_unlock(&ipath_layer_mutex);
314
315 return 0;
316}
317
318EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
319
320int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
321{
322 /* XXX - need to inform anyone who cares this just happened. */
323 dd->ipath_guid = guid;
324 return 0;
325}
326
327EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
328
329__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
330{
331 return dd->ipath_guid;
332}
333
334EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
335
336u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
337{
338 return dd->ipath_nguid;
339}
340
341EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
342
343int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
344 u32 * boardrev, u32 * majrev, u32 * minrev)
345{
346 *vendor = dd->ipath_vendorid;
347 *boardrev = dd->ipath_boardrev;
348 *majrev = dd->ipath_majrev;
349 *minrev = dd->ipath_minrev;
350
351 return 0;
352}
353
354EXPORT_SYMBOL_GPL(ipath_layer_query_device);
355
356u32 ipath_layer_get_flags(struct ipath_devdata *dd)
357{
358 return dd->ipath_flags;
359}
360
361EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
362
363struct device *ipath_layer_get_device(struct ipath_devdata *dd)
364{
365 return &dd->pcidev->dev;
366}
367
368EXPORT_SYMBOL_GPL(ipath_layer_get_device);
369
370u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
371{
372 return dd->ipath_deviceid;
373}
374
375EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
376
377u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
378{
379 return dd->ipath_lastibcstat;
380}
381
382EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
383
384u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
385{
386 return dd->ipath_ibmtu;
387}
388
389EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
390
391void ipath_layer_add(struct ipath_devdata *dd)
392{
393 mutex_lock(&ipath_layer_mutex);
394
395 if (layer_add_one)
396 dd->ipath_layer.l_arg =
397 layer_add_one(dd->ipath_unit, dd);
398
399 if (verbs_add_one)
400 dd->verbs_layer.l_arg =
401 verbs_add_one(dd->ipath_unit, dd);
402
403 mutex_unlock(&ipath_layer_mutex);
404}
405
406void ipath_layer_del(struct ipath_devdata *dd)
407{
408 mutex_lock(&ipath_layer_mutex);
409
410 if (dd->ipath_layer.l_arg && layer_remove_one) {
411 layer_remove_one(dd->ipath_layer.l_arg);
412 dd->ipath_layer.l_arg = NULL;
413 }
414
415 if (dd->verbs_layer.l_arg && verbs_remove_one) {
416 verbs_remove_one(dd->verbs_layer.l_arg);
417 dd->verbs_layer.l_arg = NULL;
418 }
419
420 mutex_unlock(&ipath_layer_mutex);
421}
422
423int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
424 void (*l_remove)(void *),
425 int (*l_intr)(void *, u32),
426 int (*l_rcv)(void *, void *, struct sk_buff *),
427 u16 l_rcv_opcode,
428 int (*l_rcv_lid)(void *, void *))
429{
430 struct ipath_devdata *dd, *tmp;
431 unsigned long flags;
432
433 mutex_lock(&ipath_layer_mutex);
434
435 layer_add_one = l_add;
436 layer_remove_one = l_remove;
437 layer_intr = l_intr;
438 layer_rcv = l_rcv;
439 layer_rcv_lid = l_rcv_lid;
440 ipath_layer_rcv_opcode = l_rcv_opcode;
441
442 spin_lock_irqsave(&ipath_devs_lock, flags);
443
444 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
445 if (!(dd->ipath_flags & IPATH_INITTED))
446 continue;
447
448 if (dd->ipath_layer.l_arg)
449 continue;
450
451 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
452 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
453
454 spin_unlock_irqrestore(&ipath_devs_lock, flags);
455 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
456 spin_lock_irqsave(&ipath_devs_lock, flags);
457 }
458
459 spin_unlock_irqrestore(&ipath_devs_lock, flags);
460 mutex_unlock(&ipath_layer_mutex);
461
462 return 0;
463}
464
465EXPORT_SYMBOL_GPL(ipath_layer_register);
466
467void ipath_layer_unregister(void)
468{
469 struct ipath_devdata *dd, *tmp;
470 unsigned long flags;
471
472 mutex_lock(&ipath_layer_mutex);
473 spin_lock_irqsave(&ipath_devs_lock, flags);
474
475 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
476 if (dd->ipath_layer.l_arg && layer_remove_one) {
477 spin_unlock_irqrestore(&ipath_devs_lock, flags);
478 layer_remove_one(dd->ipath_layer.l_arg);
479 spin_lock_irqsave(&ipath_devs_lock, flags);
480 dd->ipath_layer.l_arg = NULL;
481 }
482 }
483
484 spin_unlock_irqrestore(&ipath_devs_lock, flags);
485
486 layer_add_one = NULL;
487 layer_remove_one = NULL;
488 layer_intr = NULL;
489 layer_rcv = NULL;
490 layer_rcv_lid = NULL;
491
492 mutex_unlock(&ipath_layer_mutex);
493}
494
495EXPORT_SYMBOL_GPL(ipath_layer_unregister);
496
497static void __ipath_verbs_timer(unsigned long arg)
498{
499 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
500
501 /*
502 * If port 0 receive packet interrupts are not available, or
503 * can be missed, poll the receive queue
504 */
505 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
506 ipath_kreceive(dd);
507
508 /* Handle verbs layer timeouts. */
509 if (dd->verbs_layer.l_arg && verbs_timer_cb)
510 verbs_timer_cb(dd->verbs_layer.l_arg);
511
512 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
513}
514
515/**
516 * ipath_verbs_register - verbs layer registration
517 * @l_piobufavail: callback for when PIO buffers become available
518 * @l_rcv: callback for receiving a packet
519 * @l_timer_cb: timer callback
520 * @ipath_devdata: device data structure is put here
521 */
522int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
523 void (*l_remove)(void *arg),
524 int (*l_piobufavail) (void *arg),
525 void (*l_rcv) (void *arg, void *rhdr,
526 void *data, u32 tlen),
527 void (*l_timer_cb) (void *arg))
528{
529 struct ipath_devdata *dd, *tmp;
530 unsigned long flags;
531
532 mutex_lock(&ipath_layer_mutex);
533
534 verbs_add_one = l_add;
535 verbs_remove_one = l_remove;
536 verbs_piobufavail = l_piobufavail;
537 verbs_rcv = l_rcv;
538 verbs_timer_cb = l_timer_cb;
539
540 spin_lock_irqsave(&ipath_devs_lock, flags);
541
542 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
543 if (!(dd->ipath_flags & IPATH_INITTED))
544 continue;
545
546 if (dd->verbs_layer.l_arg)
547 continue;
548
549 spin_unlock_irqrestore(&ipath_devs_lock, flags);
550 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
551 spin_lock_irqsave(&ipath_devs_lock, flags);
552 }
553
554 spin_unlock_irqrestore(&ipath_devs_lock, flags);
555 mutex_unlock(&ipath_layer_mutex);
556
557 ipath_verbs_registered = 1;
558
559 return 0;
560}
561
562EXPORT_SYMBOL_GPL(ipath_verbs_register);
563
564void ipath_verbs_unregister(void)
565{
566 struct ipath_devdata *dd, *tmp;
567 unsigned long flags;
568
569 mutex_lock(&ipath_layer_mutex);
570 spin_lock_irqsave(&ipath_devs_lock, flags);
571
572 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
573 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
574
575 if (dd->verbs_layer.l_arg && verbs_remove_one) {
576 spin_unlock_irqrestore(&ipath_devs_lock, flags);
577 verbs_remove_one(dd->verbs_layer.l_arg);
578 spin_lock_irqsave(&ipath_devs_lock, flags);
579 dd->verbs_layer.l_arg = NULL;
580 }
581 }
582
583 spin_unlock_irqrestore(&ipath_devs_lock, flags);
584
585 verbs_add_one = NULL;
586 verbs_remove_one = NULL;
587 verbs_piobufavail = NULL;
588 verbs_rcv = NULL;
589 verbs_timer_cb = NULL;
590
Bryan O'Sullivanfccea662006-04-24 14:23:02 -0700591 ipath_verbs_registered = 0;
592
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800593 mutex_unlock(&ipath_layer_mutex);
594}
595
596EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
597
598int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
599{
600 int ret;
601 u32 intval = 0;
602
603 mutex_lock(&ipath_layer_mutex);
604
605 if (!dd->ipath_layer.l_arg) {
606 ret = -EINVAL;
607 goto bail;
608 }
609
610 ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
611
612 if (ret < 0)
613 goto bail;
614
615 *pktmax = dd->ipath_ibmaxlen;
616
617 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
618 intval |= IPATH_LAYER_INT_IF_UP;
619 if (ipath_stats.sps_lid[dd->ipath_unit])
620 intval |= IPATH_LAYER_INT_LID;
621 if (ipath_stats.sps_mlid[dd->ipath_unit])
622 intval |= IPATH_LAYER_INT_BCAST;
623 /*
624 * do this on open, in case low level is already up and
625 * just layered driver was reloaded, etc.
626 */
627 if (intval)
628 layer_intr(dd->ipath_layer.l_arg, intval);
629
630 ret = 0;
631bail:
632 mutex_unlock(&ipath_layer_mutex);
633
634 return ret;
635}
636
637EXPORT_SYMBOL_GPL(ipath_layer_open);
638
639u16 ipath_layer_get_lid(struct ipath_devdata *dd)
640{
641 return dd->ipath_lid;
642}
643
644EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
645
646/**
647 * ipath_layer_get_mac - get the MAC address
648 * @dd: the infinipath device
649 * @mac: the MAC is put here
650 *
651 * This is the EUID-64 OUI octets (top 3), then
652 * skip the next 2 (which should both be zero or 0xff).
653 * The returned MAC is in network order
654 * mac points to at least 6 bytes of buffer
655 * We assume that by the time the LID is set, that the GUID is as valid
656 * as it's ever going to be, rather than adding yet another status bit.
657 */
658
659int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
660{
661 u8 *guid;
662
663 guid = (u8 *) &dd->ipath_guid;
664
665 mac[0] = guid[0];
666 mac[1] = guid[1];
667 mac[2] = guid[2];
668 mac[3] = guid[5];
669 mac[4] = guid[6];
670 mac[5] = guid[7];
671 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
672 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
673 "%x %x\n", guid[3], guid[4]);
674 return 0;
675}
676
677EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
678
679u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
680{
681 return dd->ipath_mlid;
682}
683
684EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
685
686u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
687{
688 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
689}
690
691EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
692
693static void update_sge(struct ipath_sge_state *ss, u32 length)
694{
695 struct ipath_sge *sge = &ss->sge;
696
697 sge->vaddr += length;
698 sge->length -= length;
699 sge->sge_length -= length;
700 if (sge->sge_length == 0) {
701 if (--ss->num_sge)
702 *sge = *ss->sg_list++;
703 } else if (sge->length == 0 && sge->mr != NULL) {
704 if (++sge->n >= IPATH_SEGSZ) {
705 if (++sge->m >= sge->mr->mapsz)
706 return;
707 sge->n = 0;
708 }
709 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
710 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
711 }
712}
713
714#ifdef __LITTLE_ENDIAN
715static inline u32 get_upper_bits(u32 data, u32 shift)
716{
717 return data >> shift;
718}
719
720static inline u32 set_upper_bits(u32 data, u32 shift)
721{
722 return data << shift;
723}
724
725static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
726{
727 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
728 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
729 return data;
730}
731#else
732static inline u32 get_upper_bits(u32 data, u32 shift)
733{
734 return data << shift;
735}
736
737static inline u32 set_upper_bits(u32 data, u32 shift)
738{
739 return data >> shift;
740}
741
742static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
743{
744 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
745 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
746 return data;
747}
748#endif
749
750static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
751 u32 length)
752{
753 u32 extra = 0;
754 u32 data = 0;
755 u32 last;
756
757 while (1) {
758 u32 len = ss->sge.length;
759 u32 off;
760
761 BUG_ON(len == 0);
762 if (len > length)
763 len = length;
764 if (len > ss->sge.sge_length)
765 len = ss->sge.sge_length;
766 /* If the source address is not aligned, try to align it. */
767 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
768 if (off) {
769 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
770 ~(sizeof(u32) - 1));
771 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
772 u32 y;
773
774 y = sizeof(u32) - off;
775 if (len > y)
776 len = y;
777 if (len + extra >= sizeof(u32)) {
778 data |= set_upper_bits(v, extra *
779 BITS_PER_BYTE);
780 len = sizeof(u32) - extra;
781 if (len == length) {
782 last = data;
783 break;
784 }
785 __raw_writel(data, piobuf);
786 piobuf++;
787 extra = 0;
788 data = 0;
789 } else {
790 /* Clear unused upper bytes */
791 data |= clear_upper_bytes(v, len, extra);
792 if (len == length) {
793 last = data;
794 break;
795 }
796 extra += len;
797 }
798 } else if (extra) {
799 /* Source address is aligned. */
800 u32 *addr = (u32 *) ss->sge.vaddr;
801 int shift = extra * BITS_PER_BYTE;
802 int ushift = 32 - shift;
803 u32 l = len;
804
805 while (l >= sizeof(u32)) {
806 u32 v = *addr;
807
808 data |= set_upper_bits(v, shift);
809 __raw_writel(data, piobuf);
810 data = get_upper_bits(v, ushift);
811 piobuf++;
812 addr++;
813 l -= sizeof(u32);
814 }
815 /*
816 * We still have 'extra' number of bytes leftover.
817 */
818 if (l) {
819 u32 v = *addr;
820
821 if (l + extra >= sizeof(u32)) {
822 data |= set_upper_bits(v, shift);
823 len -= l + extra - sizeof(u32);
824 if (len == length) {
825 last = data;
826 break;
827 }
828 __raw_writel(data, piobuf);
829 piobuf++;
830 extra = 0;
831 data = 0;
832 } else {
833 /* Clear unused upper bytes */
834 data |= clear_upper_bytes(v, l,
835 extra);
836 if (len == length) {
837 last = data;
838 break;
839 }
840 extra += l;
841 }
842 } else if (len == length) {
843 last = data;
844 break;
845 }
846 } else if (len == length) {
847 u32 w;
848
849 /*
850 * Need to round up for the last dword in the
851 * packet.
852 */
853 w = (len + 3) >> 2;
854 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
855 piobuf += w - 1;
856 last = ((u32 *) ss->sge.vaddr)[w - 1];
857 break;
858 } else {
859 u32 w = len >> 2;
860
861 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
862 piobuf += w;
863
864 extra = len & (sizeof(u32) - 1);
865 if (extra) {
866 u32 v = ((u32 *) ss->sge.vaddr)[w];
867
868 /* Clear unused upper bytes */
869 data = clear_upper_bytes(v, extra, 0);
870 }
871 }
872 update_sge(ss, len);
873 length -= len;
874 }
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700875 /* Update address before sending packet. */
876 update_sge(ss, length);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800877 /* must flush early everything before trigger word */
878 ipath_flush_wc();
879 __raw_writel(last, piobuf);
880 /* be sure trigger word is written */
881 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800882}
883
884/**
885 * ipath_verbs_send - send a packet from the verbs layer
886 * @dd: the infinipath device
887 * @hdrwords: the number of works in the header
888 * @hdr: the packet header
889 * @len: the length of the packet in bytes
890 * @ss: the SGE to send
891 *
892 * This is like ipath_sma_send_pkt() in that we need to be able to send
893 * packets after the chip is initialized (MADs) but also like
894 * ipath_layer_send_hdr() since its used by the verbs layer.
895 */
896int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
897 u32 *hdr, u32 len, struct ipath_sge_state *ss)
898{
899 u32 __iomem *piobuf;
900 u32 plen;
901 int ret;
902
903 /* +1 is for the qword padding of pbc */
904 plen = hdrwords + ((len + 3) >> 2) + 1;
905 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
906 ipath_dbg("packet len 0x%x too long, failing\n", plen);
907 ret = -EINVAL;
908 goto bail;
909 }
910
911 /* Get a PIO buffer to use. */
912 piobuf = ipath_getpiobuf(dd, NULL);
913 if (unlikely(piobuf == NULL)) {
914 ret = -EBUSY;
915 goto bail;
916 }
917
918 /*
919 * Write len to control qword, no flags.
920 * We have to flush after the PBC for correctness on some cpus
921 * or WC buffer can be written out of order.
922 */
923 writeq(plen, piobuf);
924 ipath_flush_wc();
925 piobuf += 2;
926 if (len == 0) {
927 /*
928 * If there is just the header portion, must flush before
929 * writing last word of header for correctness, and after
930 * the last header word (trigger word).
931 */
932 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
933 ipath_flush_wc();
934 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
935 ipath_flush_wc();
936 ret = 0;
937 goto bail;
938 }
939
940 __iowrite32_copy(piobuf, hdr, hdrwords);
941 piobuf += hdrwords;
942
943 /* The common case is aligned and contained in one segment. */
944 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
945 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
946 u32 w;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700947 u32 *addr = (u32 *) ss->sge.vaddr;
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800948
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700949 /* Update address before sending packet. */
950 update_sge(ss, len);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800951 /* Need to round up for the last dword in the packet. */
952 w = (len + 3) >> 2;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700953 __iowrite32_copy(piobuf, addr, w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800954 /* must flush early everything before trigger word */
955 ipath_flush_wc();
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700956 __raw_writel(addr[w - 1], piobuf + w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800957 /* be sure trigger word is written */
958 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800959 ret = 0;
960 goto bail;
961 }
962 copy_io(piobuf, ss, len);
963 ret = 0;
964
965bail:
966 return ret;
967}
968
969EXPORT_SYMBOL_GPL(ipath_verbs_send);
970
971int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
972 u64 *rwords, u64 *spkts, u64 *rpkts,
973 u64 *xmit_wait)
974{
975 int ret;
976
977 if (!(dd->ipath_flags & IPATH_INITTED)) {
978 /* no hardware, freeze, etc. */
979 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
980 ret = -EINVAL;
981 goto bail;
982 }
983 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
984 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
985 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
986 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
987 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
988
989 ret = 0;
990
991bail:
992 return ret;
993}
994
995EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
996
997/**
998 * ipath_layer_get_counters - get various chip counters
999 * @dd: the infinipath device
1000 * @cntrs: counters are placed here
1001 *
1002 * Return the counters needed by recv_pma_get_portcounters().
1003 */
1004int ipath_layer_get_counters(struct ipath_devdata *dd,
1005 struct ipath_layer_counters *cntrs)
1006{
1007 int ret;
1008
1009 if (!(dd->ipath_flags & IPATH_INITTED)) {
1010 /* no hardware, freeze, etc. */
1011 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1012 ret = -EINVAL;
1013 goto bail;
1014 }
1015 cntrs->symbol_error_counter =
1016 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1017 cntrs->link_error_recovery_counter =
1018 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1019 cntrs->link_downed_counter =
1020 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1021 cntrs->port_rcv_errors =
1022 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1023 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1024 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1025 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
1026 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1027 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1028 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1029 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1030 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1031 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
1032 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1033 cntrs->port_rcv_remphys_errors =
1034 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1035 cntrs->port_xmit_discards =
1036 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1037 cntrs->port_xmit_data =
1038 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1039 cntrs->port_rcv_data =
1040 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1041 cntrs->port_xmit_packets =
1042 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1043 cntrs->port_rcv_packets =
1044 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1045
1046 ret = 0;
1047
1048bail:
1049 return ret;
1050}
1051
1052EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1053
1054int ipath_layer_want_buffer(struct ipath_devdata *dd)
1055{
1056 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1057 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1058 dd->ipath_sendctrl);
1059
1060 return 0;
1061}
1062
1063EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1064
1065int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1066{
1067 int ret = 0;
1068 u32 __iomem *piobuf;
1069 u32 plen, *uhdr;
1070 size_t count;
1071 __be16 vlsllnh;
1072
1073 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1074 ipath_dbg("send while not open\n");
1075 ret = -EINVAL;
1076 } else
1077 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1078 dd->ipath_lid == 0) {
1079 /*
1080 * lid check is for when sma hasn't yet configured
1081 */
1082 ret = -ENETDOWN;
1083 ipath_cdbg(VERBOSE, "send while not ready, "
1084 "mylid=%u, flags=0x%x\n",
1085 dd->ipath_lid, dd->ipath_flags);
1086 }
1087
1088 vlsllnh = *((__be16 *) hdr);
1089 if (vlsllnh != htons(IPS_LRH_BTH)) {
1090 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1091 "not sending\n", be16_to_cpu(vlsllnh),
1092 IPS_LRH_BTH);
1093 ret = -EINVAL;
1094 }
1095 if (ret)
1096 goto done;
1097
1098 /* Get a PIO buffer to use. */
1099 piobuf = ipath_getpiobuf(dd, NULL);
1100 if (piobuf == NULL) {
1101 ret = -EBUSY;
1102 goto done;
1103 }
1104
1105 plen = (sizeof(*hdr) >> 2); /* actual length */
1106 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1107
1108 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1109 ipath_flush_wc();
1110 piobuf += 2;
1111 uhdr = (u32 *)hdr;
1112 count = plen-1; /* amount we can copy before trigger word */
1113 __iowrite32_copy(piobuf, uhdr, count);
1114 ipath_flush_wc();
1115 __raw_writel(uhdr[count], piobuf + count);
1116 ipath_flush_wc(); /* ensure it's sent, now */
1117
1118 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1119
1120done:
1121 return ret;
1122}
1123
1124EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1125
1126int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1127{
1128 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1129
1130 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1131 dd->ipath_sendctrl);
1132 return 0;
1133}
1134
1135EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1136
1137int ipath_layer_enable_timer(struct ipath_devdata *dd)
1138{
1139 /*
1140 * HT-400 has a design flaw where the chip and kernel idea
1141 * of the tail register don't always agree, and therefore we won't
1142 * get an interrupt on the next packet received.
1143 * If the board supports per packet receive interrupts, use it.
1144 * Otherwise, the timer function periodically checks for packets
1145 * to cover this case.
1146 * Either way, the timer is needed for verbs layer related
1147 * processing.
1148 */
1149 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1150 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1151 0x2074076542310ULL);
1152 /* Enable GPIO bit 2 interrupt */
1153 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1154 (u64) (1 << 2));
1155 }
1156
1157 init_timer(&dd->verbs_layer.l_timer);
1158 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1159 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1160 dd->verbs_layer.l_timer.expires = jiffies + 1;
1161 add_timer(&dd->verbs_layer.l_timer);
1162
1163 return 0;
1164}
1165
1166EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1167
1168int ipath_layer_disable_timer(struct ipath_devdata *dd)
1169{
1170 /* Disable GPIO bit 2 interrupt */
1171 if (dd->ipath_flags & IPATH_GPIO_INTR)
1172 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1173
1174 del_timer_sync(&dd->verbs_layer.l_timer);
1175
1176 return 0;
1177}
1178
1179EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1180
1181/**
1182 * ipath_layer_set_verbs_flags - set the verbs layer flags
1183 * @dd: the infinipath device
1184 * @flags: the flags to set
1185 */
1186int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1187{
1188 struct ipath_devdata *ss;
1189 unsigned long lflags;
1190
1191 spin_lock_irqsave(&ipath_devs_lock, lflags);
1192
1193 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1194 if (!(ss->ipath_flags & IPATH_INITTED))
1195 continue;
1196 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1197 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1198 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1199 else
1200 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1201 }
1202
1203 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1204
1205 return 0;
1206}
1207
1208EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1209
1210/**
1211 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1212 * @dd: the infinipath device
1213 */
1214unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1215{
1216 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1217}
1218
1219EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1220
1221/**
1222 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1223 * @dd: the infinipath device
1224 * @index: the PKEY index
1225 */
1226unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1227{
1228 unsigned ret;
1229
1230 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1231 ret = 0;
1232 else
1233 ret = dd->ipath_pd[0]->port_pkeys[index];
1234
1235 return ret;
1236}
1237
1238EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1239
1240/**
1241 * ipath_layer_get_pkeys - return the PKEY table for port 0
1242 * @dd: the infinipath device
1243 * @pkeys: the pkey table is placed here
1244 */
1245int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1246{
1247 struct ipath_portdata *pd = dd->ipath_pd[0];
1248
1249 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1250
1251 return 0;
1252}
1253
1254EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1255
1256/**
1257 * rm_pkey - decrecment the reference count for the given PKEY
1258 * @dd: the infinipath device
1259 * @key: the PKEY index
1260 *
1261 * Return true if this was the last reference and the hardware table entry
1262 * needs to be changed.
1263 */
1264static int rm_pkey(struct ipath_devdata *dd, u16 key)
1265{
1266 int i;
1267 int ret;
1268
1269 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1270 if (dd->ipath_pkeys[i] != key)
1271 continue;
1272 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1273 dd->ipath_pkeys[i] = 0;
1274 ret = 1;
1275 goto bail;
1276 }
1277 break;
1278 }
1279
1280 ret = 0;
1281
1282bail:
1283 return ret;
1284}
1285
1286/**
1287 * add_pkey - add the given PKEY to the hardware table
1288 * @dd: the infinipath device
1289 * @key: the PKEY
1290 *
1291 * Return an error code if unable to add the entry, zero if no change,
1292 * or 1 if the hardware PKEY register needs to be updated.
1293 */
1294static int add_pkey(struct ipath_devdata *dd, u16 key)
1295{
1296 int i;
1297 u16 lkey = key & 0x7FFF;
1298 int any = 0;
1299 int ret;
1300
1301 if (lkey == 0x7FFF) {
1302 ret = 0;
1303 goto bail;
1304 }
1305
1306 /* Look for an empty slot or a matching PKEY. */
1307 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1308 if (!dd->ipath_pkeys[i]) {
1309 any++;
1310 continue;
1311 }
1312 /* If it matches exactly, try to increment the ref count */
1313 if (dd->ipath_pkeys[i] == key) {
1314 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1315 ret = 0;
1316 goto bail;
1317 }
1318 /* Lost the race. Look for an empty slot below. */
1319 atomic_dec(&dd->ipath_pkeyrefs[i]);
1320 any++;
1321 }
1322 /*
1323 * It makes no sense to have both the limited and unlimited
1324 * PKEY set at the same time since the unlimited one will
1325 * disable the limited one.
1326 */
1327 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1328 ret = -EEXIST;
1329 goto bail;
1330 }
1331 }
1332 if (!any) {
1333 ret = -EBUSY;
1334 goto bail;
1335 }
1336 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1337 if (!dd->ipath_pkeys[i] &&
1338 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1339 /* for ipathstats, etc. */
1340 ipath_stats.sps_pkeys[i] = lkey;
1341 dd->ipath_pkeys[i] = key;
1342 ret = 1;
1343 goto bail;
1344 }
1345 }
1346 ret = -EBUSY;
1347
1348bail:
1349 return ret;
1350}
1351
1352/**
1353 * ipath_layer_set_pkeys - set the PKEY table for port 0
1354 * @dd: the infinipath device
1355 * @pkeys: the PKEY table
1356 */
1357int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1358{
1359 struct ipath_portdata *pd;
1360 int i;
1361 int changed = 0;
1362
1363 pd = dd->ipath_pd[0];
1364
1365 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1366 u16 key = pkeys[i];
1367 u16 okey = pd->port_pkeys[i];
1368
1369 if (key == okey)
1370 continue;
1371 /*
1372 * The value of this PKEY table entry is changing.
1373 * Remove the old entry in the hardware's array of PKEYs.
1374 */
1375 if (okey & 0x7FFF)
1376 changed |= rm_pkey(dd, okey);
1377 if (key & 0x7FFF) {
1378 int ret = add_pkey(dd, key);
1379
1380 if (ret < 0)
1381 key = 0;
1382 else
1383 changed |= ret;
1384 }
1385 pd->port_pkeys[i] = key;
1386 }
1387 if (changed) {
1388 u64 pkey;
1389
1390 pkey = (u64) dd->ipath_pkeys[0] |
1391 ((u64) dd->ipath_pkeys[1] << 16) |
1392 ((u64) dd->ipath_pkeys[2] << 32) |
1393 ((u64) dd->ipath_pkeys[3] << 48);
1394 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1395 (unsigned long long) pkey);
1396 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1397 pkey);
1398 }
1399 return 0;
1400}
1401
1402EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1403
1404/**
1405 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1406 * @dd: the infinipath device
1407 *
1408 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1409 */
1410int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1411{
1412 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1413}
1414
1415EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1416
1417/**
1418 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1419 * @dd: the infinipath device
1420 * @sleep: the new state
1421 *
1422 * Note that this will only take effect when the link state changes.
1423 */
1424int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1425 int sleep)
1426{
1427 if (sleep)
1428 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1429 else
1430 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1431 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1432 dd->ipath_ibcctrl);
1433 return 0;
1434}
1435
1436EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1437
1438int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1439{
1440 return (dd->ipath_ibcctrl >>
1441 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1442 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1443}
1444
1445EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1446
1447/**
1448 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1449 * @dd: the infinipath device
1450 * @n: the new threshold
1451 *
1452 * Note that this will only take effect when the link state changes.
1453 */
1454int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1455{
1456 unsigned v;
1457
1458 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1459 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1460 if (v != n) {
1461 dd->ipath_ibcctrl &=
1462 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1463 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1464 dd->ipath_ibcctrl |=
1465 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1466 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1467 dd->ipath_ibcctrl);
1468 }
1469 return 0;
1470}
1471
1472EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1473
1474int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1475{
1476 return (dd->ipath_ibcctrl >>
1477 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1478 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1479}
1480
1481EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1482
1483/**
1484 * ipath_layer_set_overrunthreshold - set the overrun threshold
1485 * @dd: the infinipath device
1486 * @n: the new threshold
1487 *
1488 * Note that this will only take effect when the link state changes.
1489 */
1490int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1491{
1492 unsigned v;
1493
1494 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1495 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1496 if (v != n) {
1497 dd->ipath_ibcctrl &=
1498 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1499 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1500 dd->ipath_ibcctrl |=
1501 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1502 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1503 dd->ipath_ibcctrl);
1504 }
1505 return 0;
1506}
1507
1508EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1509
1510int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1511 size_t namelen)
1512{
1513 return dd->ipath_f_get_boardname(dd, name, namelen);
1514}
1515EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1516
1517u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1518{
1519 return dd->ipath_rcvhdrentsize;
1520}
1521EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);