blob: 6e3d9bf3354faeeea56bb837ea06bd243460ec3f [file] [log] [blame]
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001/*
Bryan O'Sullivan759d5762006-07-01 04:35:49 -07002 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08003 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
37 */
38
39#include <linux/io.h>
40#include <linux/pci.h>
41#include <asm/byteorder.h>
42
43#include "ipath_kernel.h"
44#include "ips_common.h"
45#include "ipath_layer.h"
46
47/* Acquire before ipath_devs_lock. */
48static DEFINE_MUTEX(ipath_layer_mutex);
49
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070050static int ipath_verbs_registered;
51
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080052u16 ipath_layer_rcv_opcode;
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070053
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080054static int (*layer_intr)(void *, u32);
55static int (*layer_rcv)(void *, void *, struct sk_buff *);
56static int (*layer_rcv_lid)(void *, void *);
57static int (*verbs_piobufavail)(void *);
58static void (*verbs_rcv)(void *, void *, void *, u32);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080059
60static void *(*layer_add_one)(int, struct ipath_devdata *);
61static void (*layer_remove_one)(void *);
62static void *(*verbs_add_one)(int, struct ipath_devdata *);
63static void (*verbs_remove_one)(void *);
64static void (*verbs_timer_cb)(void *);
65
66int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
67{
68 int ret = -ENODEV;
69
70 if (dd->ipath_layer.l_arg && layer_intr)
71 ret = layer_intr(dd->ipath_layer.l_arg, arg);
72
73 return ret;
74}
75
76int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
77{
78 int ret;
79
80 mutex_lock(&ipath_layer_mutex);
81
82 ret = __ipath_layer_intr(dd, arg);
83
84 mutex_unlock(&ipath_layer_mutex);
85
86 return ret;
87}
88
89int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
90 struct sk_buff *skb)
91{
92 int ret = -ENODEV;
93
94 if (dd->ipath_layer.l_arg && layer_rcv)
95 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
96
97 return ret;
98}
99
100int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
101{
102 int ret = -ENODEV;
103
104 if (dd->ipath_layer.l_arg && layer_rcv_lid)
105 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
106
107 return ret;
108}
109
110int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
111{
112 int ret = -ENODEV;
113
114 if (dd->verbs_layer.l_arg && verbs_piobufavail)
115 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
116
117 return ret;
118}
119
120int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
121 u32 tlen)
122{
123 int ret = -ENODEV;
124
125 if (dd->verbs_layer.l_arg && verbs_rcv) {
126 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
127 ret = 0;
128 }
129
130 return ret;
131}
132
133int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
134{
135 u32 lstate;
136 int ret;
137
138 switch (newstate) {
139 case IPATH_IB_LINKDOWN:
140 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
142 /* don't wait */
143 ret = 0;
144 goto bail;
145
146 case IPATH_IB_LINKDOWN_SLEEP:
147 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
149 /* don't wait */
150 ret = 0;
151 goto bail;
152
153 case IPATH_IB_LINKDOWN_DISABLE:
154 ipath_set_ib_lstate(dd,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
157 /* don't wait */
158 ret = 0;
159 goto bail;
160
161 case IPATH_IB_LINKINIT:
162 if (dd->ipath_flags & IPATH_LINKINIT) {
163 ret = 0;
164 goto bail;
165 }
166 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
167 INFINIPATH_IBCC_LINKCMD_SHIFT);
168 lstate = IPATH_LINKINIT;
169 break;
170
171 case IPATH_IB_LINKARM:
172 if (dd->ipath_flags & IPATH_LINKARMED) {
173 ret = 0;
174 goto bail;
175 }
176 if (!(dd->ipath_flags &
177 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
178 ret = -EINVAL;
179 goto bail;
180 }
181 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
182 INFINIPATH_IBCC_LINKCMD_SHIFT);
183 /*
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
186 */
187 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
188 break;
189
190 case IPATH_IB_LINKACTIVE:
191 if (dd->ipath_flags & IPATH_LINKACTIVE) {
192 ret = 0;
193 goto bail;
194 }
195 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
196 ret = -EINVAL;
197 goto bail;
198 }
199 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
200 INFINIPATH_IBCC_LINKCMD_SHIFT);
201 lstate = IPATH_LINKACTIVE;
202 break;
203
204 default:
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
206 ret = -EINVAL;
207 goto bail;
208 }
209 ret = ipath_wait_linkstate(dd, lstate, 2000);
210
211bail:
212 return ret;
213}
214
215EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
216
217/**
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
220 * @arg: the new MTU
221 *
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
228 */
229int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
230{
231 u32 piosize;
232 int changed = 0;
233 int ret;
234
235 /*
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
240 */
241 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
242 arg != 4096) {
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
244 ret = -EINVAL;
245 goto bail;
246 }
247 if (dd->ipath_ibmtu == arg) {
248 ret = 0; /* same as current */
249 goto bail;
250 }
251
252 piosize = dd->ipath_ibmaxlen;
253 dd->ipath_ibmtu = arg;
254
255 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize != dd->ipath_init_ibmaxlen) {
258 dd->ipath_ibmaxlen = piosize;
259 changed = 1;
260 }
261 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
262 piosize = arg + IPATH_PIO_MAXIBHDR;
263 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
265 arg);
266 dd->ipath_ibmaxlen = piosize;
267 changed = 1;
268 }
269
270 if (changed) {
271 /*
272 * set the IBC maxpktlength to the size of our pio
273 * buffers in words
274 */
275 u64 ibc = dd->ipath_ibcctrl;
276 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
278
279 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
280 dd->ipath_ibmaxlen = piosize;
281 piosize /= sizeof(u32); /* in words */
282 /*
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
285 */
286 piosize += 1;
287
288 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
289 dd->ipath_ibcctrl = ibc;
290 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
291 dd->ipath_ibcctrl);
292 dd->ipath_f_tidtemplate(dd);
293 }
294
295 ret = 0;
296
297bail:
298 return ret;
299}
300
301EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
302
303int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
304{
305 ipath_stats.sps_lid[dd->ipath_unit] = arg;
306 dd->ipath_lid = arg;
307 dd->ipath_lmc = lmc;
308
309 mutex_lock(&ipath_layer_mutex);
310
311 if (dd->ipath_layer.l_arg && layer_intr)
312 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
313
314 mutex_unlock(&ipath_layer_mutex);
315
316 return 0;
317}
318
319EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
320
321int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
322{
323 /* XXX - need to inform anyone who cares this just happened. */
324 dd->ipath_guid = guid;
325 return 0;
326}
327
328EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
329
330__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
331{
332 return dd->ipath_guid;
333}
334
335EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
336
337u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
338{
339 return dd->ipath_nguid;
340}
341
342EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
343
Bryan O'Sullivane8a88f02006-07-01 04:35:57 -0700344u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800345{
Bryan O'Sullivane8a88f02006-07-01 04:35:57 -0700346 return dd->ipath_majrev;
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800347}
348
Bryan O'Sullivane8a88f02006-07-01 04:35:57 -0700349EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
350
351u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
352{
353 return dd->ipath_minrev;
354}
355
356EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
357
358u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
359{
360 return dd->ipath_pcirev;
361}
362
363EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800364
365u32 ipath_layer_get_flags(struct ipath_devdata *dd)
366{
367 return dd->ipath_flags;
368}
369
370EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
371
372struct device *ipath_layer_get_device(struct ipath_devdata *dd)
373{
374 return &dd->pcidev->dev;
375}
376
377EXPORT_SYMBOL_GPL(ipath_layer_get_device);
378
379u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
380{
381 return dd->ipath_deviceid;
382}
383
384EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
385
Bryan O'Sullivane8a88f02006-07-01 04:35:57 -0700386u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
387{
388 return dd->ipath_vendorid;
389}
390
391EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
392
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800393u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
394{
395 return dd->ipath_lastibcstat;
396}
397
398EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
399
400u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
401{
402 return dd->ipath_ibmtu;
403}
404
405EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
406
407void ipath_layer_add(struct ipath_devdata *dd)
408{
409 mutex_lock(&ipath_layer_mutex);
410
411 if (layer_add_one)
412 dd->ipath_layer.l_arg =
413 layer_add_one(dd->ipath_unit, dd);
414
415 if (verbs_add_one)
416 dd->verbs_layer.l_arg =
417 verbs_add_one(dd->ipath_unit, dd);
418
419 mutex_unlock(&ipath_layer_mutex);
420}
421
Bryan O'Sullivana2acb2f2006-07-01 04:35:52 -0700422void ipath_layer_remove(struct ipath_devdata *dd)
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800423{
424 mutex_lock(&ipath_layer_mutex);
425
426 if (dd->ipath_layer.l_arg && layer_remove_one) {
427 layer_remove_one(dd->ipath_layer.l_arg);
428 dd->ipath_layer.l_arg = NULL;
429 }
430
431 if (dd->verbs_layer.l_arg && verbs_remove_one) {
432 verbs_remove_one(dd->verbs_layer.l_arg);
433 dd->verbs_layer.l_arg = NULL;
434 }
435
436 mutex_unlock(&ipath_layer_mutex);
437}
438
439int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
440 void (*l_remove)(void *),
441 int (*l_intr)(void *, u32),
442 int (*l_rcv)(void *, void *, struct sk_buff *),
443 u16 l_rcv_opcode,
444 int (*l_rcv_lid)(void *, void *))
445{
446 struct ipath_devdata *dd, *tmp;
447 unsigned long flags;
448
449 mutex_lock(&ipath_layer_mutex);
450
451 layer_add_one = l_add;
452 layer_remove_one = l_remove;
453 layer_intr = l_intr;
454 layer_rcv = l_rcv;
455 layer_rcv_lid = l_rcv_lid;
456 ipath_layer_rcv_opcode = l_rcv_opcode;
457
458 spin_lock_irqsave(&ipath_devs_lock, flags);
459
460 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
461 if (!(dd->ipath_flags & IPATH_INITTED))
462 continue;
463
464 if (dd->ipath_layer.l_arg)
465 continue;
466
467 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
468 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
469
470 spin_unlock_irqrestore(&ipath_devs_lock, flags);
471 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
472 spin_lock_irqsave(&ipath_devs_lock, flags);
473 }
474
475 spin_unlock_irqrestore(&ipath_devs_lock, flags);
476 mutex_unlock(&ipath_layer_mutex);
477
478 return 0;
479}
480
481EXPORT_SYMBOL_GPL(ipath_layer_register);
482
483void ipath_layer_unregister(void)
484{
485 struct ipath_devdata *dd, *tmp;
486 unsigned long flags;
487
488 mutex_lock(&ipath_layer_mutex);
489 spin_lock_irqsave(&ipath_devs_lock, flags);
490
491 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
492 if (dd->ipath_layer.l_arg && layer_remove_one) {
493 spin_unlock_irqrestore(&ipath_devs_lock, flags);
494 layer_remove_one(dd->ipath_layer.l_arg);
495 spin_lock_irqsave(&ipath_devs_lock, flags);
496 dd->ipath_layer.l_arg = NULL;
497 }
498 }
499
500 spin_unlock_irqrestore(&ipath_devs_lock, flags);
501
502 layer_add_one = NULL;
503 layer_remove_one = NULL;
504 layer_intr = NULL;
505 layer_rcv = NULL;
506 layer_rcv_lid = NULL;
507
508 mutex_unlock(&ipath_layer_mutex);
509}
510
511EXPORT_SYMBOL_GPL(ipath_layer_unregister);
512
513static void __ipath_verbs_timer(unsigned long arg)
514{
515 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
516
517 /*
518 * If port 0 receive packet interrupts are not available, or
519 * can be missed, poll the receive queue
520 */
521 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
522 ipath_kreceive(dd);
523
524 /* Handle verbs layer timeouts. */
525 if (dd->verbs_layer.l_arg && verbs_timer_cb)
526 verbs_timer_cb(dd->verbs_layer.l_arg);
527
528 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
529}
530
531/**
532 * ipath_verbs_register - verbs layer registration
533 * @l_piobufavail: callback for when PIO buffers become available
534 * @l_rcv: callback for receiving a packet
535 * @l_timer_cb: timer callback
536 * @ipath_devdata: device data structure is put here
537 */
538int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
539 void (*l_remove)(void *arg),
540 int (*l_piobufavail) (void *arg),
541 void (*l_rcv) (void *arg, void *rhdr,
542 void *data, u32 tlen),
543 void (*l_timer_cb) (void *arg))
544{
545 struct ipath_devdata *dd, *tmp;
546 unsigned long flags;
547
548 mutex_lock(&ipath_layer_mutex);
549
550 verbs_add_one = l_add;
551 verbs_remove_one = l_remove;
552 verbs_piobufavail = l_piobufavail;
553 verbs_rcv = l_rcv;
554 verbs_timer_cb = l_timer_cb;
555
556 spin_lock_irqsave(&ipath_devs_lock, flags);
557
558 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
559 if (!(dd->ipath_flags & IPATH_INITTED))
560 continue;
561
562 if (dd->verbs_layer.l_arg)
563 continue;
564
565 spin_unlock_irqrestore(&ipath_devs_lock, flags);
566 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
567 spin_lock_irqsave(&ipath_devs_lock, flags);
568 }
569
570 spin_unlock_irqrestore(&ipath_devs_lock, flags);
571 mutex_unlock(&ipath_layer_mutex);
572
573 ipath_verbs_registered = 1;
574
575 return 0;
576}
577
578EXPORT_SYMBOL_GPL(ipath_verbs_register);
579
580void ipath_verbs_unregister(void)
581{
582 struct ipath_devdata *dd, *tmp;
583 unsigned long flags;
584
585 mutex_lock(&ipath_layer_mutex);
586 spin_lock_irqsave(&ipath_devs_lock, flags);
587
588 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
589 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
590
591 if (dd->verbs_layer.l_arg && verbs_remove_one) {
592 spin_unlock_irqrestore(&ipath_devs_lock, flags);
593 verbs_remove_one(dd->verbs_layer.l_arg);
594 spin_lock_irqsave(&ipath_devs_lock, flags);
595 dd->verbs_layer.l_arg = NULL;
596 }
597 }
598
599 spin_unlock_irqrestore(&ipath_devs_lock, flags);
600
601 verbs_add_one = NULL;
602 verbs_remove_one = NULL;
603 verbs_piobufavail = NULL;
604 verbs_rcv = NULL;
605 verbs_timer_cb = NULL;
606
Bryan O'Sullivanfccea662006-04-24 14:23:02 -0700607 ipath_verbs_registered = 0;
608
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800609 mutex_unlock(&ipath_layer_mutex);
610}
611
612EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
613
614int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
615{
616 int ret;
617 u32 intval = 0;
618
619 mutex_lock(&ipath_layer_mutex);
620
621 if (!dd->ipath_layer.l_arg) {
622 ret = -EINVAL;
623 goto bail;
624 }
625
626 ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
627
628 if (ret < 0)
629 goto bail;
630
631 *pktmax = dd->ipath_ibmaxlen;
632
633 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
634 intval |= IPATH_LAYER_INT_IF_UP;
635 if (ipath_stats.sps_lid[dd->ipath_unit])
636 intval |= IPATH_LAYER_INT_LID;
637 if (ipath_stats.sps_mlid[dd->ipath_unit])
638 intval |= IPATH_LAYER_INT_BCAST;
639 /*
640 * do this on open, in case low level is already up and
641 * just layered driver was reloaded, etc.
642 */
643 if (intval)
644 layer_intr(dd->ipath_layer.l_arg, intval);
645
646 ret = 0;
647bail:
648 mutex_unlock(&ipath_layer_mutex);
649
650 return ret;
651}
652
653EXPORT_SYMBOL_GPL(ipath_layer_open);
654
655u16 ipath_layer_get_lid(struct ipath_devdata *dd)
656{
657 return dd->ipath_lid;
658}
659
660EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
661
662/**
663 * ipath_layer_get_mac - get the MAC address
664 * @dd: the infinipath device
665 * @mac: the MAC is put here
666 *
667 * This is the EUID-64 OUI octets (top 3), then
668 * skip the next 2 (which should both be zero or 0xff).
669 * The returned MAC is in network order
670 * mac points to at least 6 bytes of buffer
671 * We assume that by the time the LID is set, that the GUID is as valid
672 * as it's ever going to be, rather than adding yet another status bit.
673 */
674
675int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
676{
677 u8 *guid;
678
679 guid = (u8 *) &dd->ipath_guid;
680
681 mac[0] = guid[0];
682 mac[1] = guid[1];
683 mac[2] = guid[2];
684 mac[3] = guid[5];
685 mac[4] = guid[6];
686 mac[5] = guid[7];
687 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
688 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
689 "%x %x\n", guid[3], guid[4]);
690 return 0;
691}
692
693EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
694
695u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
696{
697 return dd->ipath_mlid;
698}
699
700EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
701
702u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
703{
704 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
705}
706
707EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
708
709static void update_sge(struct ipath_sge_state *ss, u32 length)
710{
711 struct ipath_sge *sge = &ss->sge;
712
713 sge->vaddr += length;
714 sge->length -= length;
715 sge->sge_length -= length;
716 if (sge->sge_length == 0) {
717 if (--ss->num_sge)
718 *sge = *ss->sg_list++;
719 } else if (sge->length == 0 && sge->mr != NULL) {
720 if (++sge->n >= IPATH_SEGSZ) {
721 if (++sge->m >= sge->mr->mapsz)
722 return;
723 sge->n = 0;
724 }
725 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
726 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
727 }
728}
729
730#ifdef __LITTLE_ENDIAN
731static inline u32 get_upper_bits(u32 data, u32 shift)
732{
733 return data >> shift;
734}
735
736static inline u32 set_upper_bits(u32 data, u32 shift)
737{
738 return data << shift;
739}
740
741static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
742{
743 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
744 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
745 return data;
746}
747#else
748static inline u32 get_upper_bits(u32 data, u32 shift)
749{
750 return data << shift;
751}
752
753static inline u32 set_upper_bits(u32 data, u32 shift)
754{
755 return data >> shift;
756}
757
758static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
759{
760 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
761 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
762 return data;
763}
764#endif
765
766static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
767 u32 length)
768{
769 u32 extra = 0;
770 u32 data = 0;
771 u32 last;
772
773 while (1) {
774 u32 len = ss->sge.length;
775 u32 off;
776
777 BUG_ON(len == 0);
778 if (len > length)
779 len = length;
780 if (len > ss->sge.sge_length)
781 len = ss->sge.sge_length;
782 /* If the source address is not aligned, try to align it. */
783 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
784 if (off) {
785 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
786 ~(sizeof(u32) - 1));
787 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
788 u32 y;
789
790 y = sizeof(u32) - off;
791 if (len > y)
792 len = y;
793 if (len + extra >= sizeof(u32)) {
794 data |= set_upper_bits(v, extra *
795 BITS_PER_BYTE);
796 len = sizeof(u32) - extra;
797 if (len == length) {
798 last = data;
799 break;
800 }
801 __raw_writel(data, piobuf);
802 piobuf++;
803 extra = 0;
804 data = 0;
805 } else {
806 /* Clear unused upper bytes */
807 data |= clear_upper_bytes(v, len, extra);
808 if (len == length) {
809 last = data;
810 break;
811 }
812 extra += len;
813 }
814 } else if (extra) {
815 /* Source address is aligned. */
816 u32 *addr = (u32 *) ss->sge.vaddr;
817 int shift = extra * BITS_PER_BYTE;
818 int ushift = 32 - shift;
819 u32 l = len;
820
821 while (l >= sizeof(u32)) {
822 u32 v = *addr;
823
824 data |= set_upper_bits(v, shift);
825 __raw_writel(data, piobuf);
826 data = get_upper_bits(v, ushift);
827 piobuf++;
828 addr++;
829 l -= sizeof(u32);
830 }
831 /*
832 * We still have 'extra' number of bytes leftover.
833 */
834 if (l) {
835 u32 v = *addr;
836
837 if (l + extra >= sizeof(u32)) {
838 data |= set_upper_bits(v, shift);
839 len -= l + extra - sizeof(u32);
840 if (len == length) {
841 last = data;
842 break;
843 }
844 __raw_writel(data, piobuf);
845 piobuf++;
846 extra = 0;
847 data = 0;
848 } else {
849 /* Clear unused upper bytes */
850 data |= clear_upper_bytes(v, l,
851 extra);
852 if (len == length) {
853 last = data;
854 break;
855 }
856 extra += l;
857 }
858 } else if (len == length) {
859 last = data;
860 break;
861 }
862 } else if (len == length) {
863 u32 w;
864
865 /*
866 * Need to round up for the last dword in the
867 * packet.
868 */
869 w = (len + 3) >> 2;
870 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
871 piobuf += w - 1;
872 last = ((u32 *) ss->sge.vaddr)[w - 1];
873 break;
874 } else {
875 u32 w = len >> 2;
876
877 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
878 piobuf += w;
879
880 extra = len & (sizeof(u32) - 1);
881 if (extra) {
882 u32 v = ((u32 *) ss->sge.vaddr)[w];
883
884 /* Clear unused upper bytes */
885 data = clear_upper_bytes(v, extra, 0);
886 }
887 }
888 update_sge(ss, len);
889 length -= len;
890 }
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700891 /* Update address before sending packet. */
892 update_sge(ss, length);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800893 /* must flush early everything before trigger word */
894 ipath_flush_wc();
895 __raw_writel(last, piobuf);
896 /* be sure trigger word is written */
897 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800898}
899
900/**
901 * ipath_verbs_send - send a packet from the verbs layer
902 * @dd: the infinipath device
Bryan O'Sullivan685f97e2006-07-01 04:35:53 -0700903 * @hdrwords: the number of words in the header
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800904 * @hdr: the packet header
905 * @len: the length of the packet in bytes
906 * @ss: the SGE to send
907 *
908 * This is like ipath_sma_send_pkt() in that we need to be able to send
909 * packets after the chip is initialized (MADs) but also like
910 * ipath_layer_send_hdr() since its used by the verbs layer.
911 */
912int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
913 u32 *hdr, u32 len, struct ipath_sge_state *ss)
914{
915 u32 __iomem *piobuf;
916 u32 plen;
917 int ret;
918
919 /* +1 is for the qword padding of pbc */
920 plen = hdrwords + ((len + 3) >> 2) + 1;
921 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
922 ipath_dbg("packet len 0x%x too long, failing\n", plen);
923 ret = -EINVAL;
924 goto bail;
925 }
926
927 /* Get a PIO buffer to use. */
928 piobuf = ipath_getpiobuf(dd, NULL);
929 if (unlikely(piobuf == NULL)) {
930 ret = -EBUSY;
931 goto bail;
932 }
933
934 /*
935 * Write len to control qword, no flags.
936 * We have to flush after the PBC for correctness on some cpus
937 * or WC buffer can be written out of order.
938 */
939 writeq(plen, piobuf);
940 ipath_flush_wc();
941 piobuf += 2;
942 if (len == 0) {
943 /*
944 * If there is just the header portion, must flush before
945 * writing last word of header for correctness, and after
946 * the last header word (trigger word).
947 */
948 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
949 ipath_flush_wc();
950 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
951 ipath_flush_wc();
952 ret = 0;
953 goto bail;
954 }
955
956 __iowrite32_copy(piobuf, hdr, hdrwords);
957 piobuf += hdrwords;
958
959 /* The common case is aligned and contained in one segment. */
960 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
961 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
962 u32 w;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700963 u32 *addr = (u32 *) ss->sge.vaddr;
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800964
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700965 /* Update address before sending packet. */
966 update_sge(ss, len);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800967 /* Need to round up for the last dword in the packet. */
968 w = (len + 3) >> 2;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700969 __iowrite32_copy(piobuf, addr, w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800970 /* must flush early everything before trigger word */
971 ipath_flush_wc();
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700972 __raw_writel(addr[w - 1], piobuf + w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800973 /* be sure trigger word is written */
974 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800975 ret = 0;
976 goto bail;
977 }
978 copy_io(piobuf, ss, len);
979 ret = 0;
980
981bail:
982 return ret;
983}
984
985EXPORT_SYMBOL_GPL(ipath_verbs_send);
986
987int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
988 u64 *rwords, u64 *spkts, u64 *rpkts,
989 u64 *xmit_wait)
990{
991 int ret;
992
993 if (!(dd->ipath_flags & IPATH_INITTED)) {
994 /* no hardware, freeze, etc. */
995 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
996 ret = -EINVAL;
997 goto bail;
998 }
999 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1000 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1001 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1002 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1003 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1004
1005 ret = 0;
1006
1007bail:
1008 return ret;
1009}
1010
1011EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
1012
1013/**
1014 * ipath_layer_get_counters - get various chip counters
1015 * @dd: the infinipath device
1016 * @cntrs: counters are placed here
1017 *
1018 * Return the counters needed by recv_pma_get_portcounters().
1019 */
1020int ipath_layer_get_counters(struct ipath_devdata *dd,
1021 struct ipath_layer_counters *cntrs)
1022{
1023 int ret;
1024
1025 if (!(dd->ipath_flags & IPATH_INITTED)) {
1026 /* no hardware, freeze, etc. */
1027 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1028 ret = -EINVAL;
1029 goto bail;
1030 }
1031 cntrs->symbol_error_counter =
1032 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1033 cntrs->link_error_recovery_counter =
1034 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
Bryan O'Sullivanfba75202006-07-01 04:36:09 -07001035 /*
1036 * The link downed counter counts when the other side downs the
1037 * connection. We add in the number of times we downed the link
1038 * due to local link integrity errors to compensate.
1039 */
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001040 cntrs->link_downed_counter =
1041 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1042 cntrs->port_rcv_errors =
1043 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1044 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1045 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001046 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1047 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1048 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1049 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1050 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001051 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1052 cntrs->port_rcv_remphys_errors =
1053 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1054 cntrs->port_xmit_discards =
1055 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1056 cntrs->port_xmit_data =
1057 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1058 cntrs->port_rcv_data =
1059 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1060 cntrs->port_xmit_packets =
1061 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1062 cntrs->port_rcv_packets =
1063 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
Bryan O'Sullivanfba75202006-07-01 04:36:09 -07001064 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
1065 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001066
1067 ret = 0;
1068
1069bail:
1070 return ret;
1071}
1072
1073EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1074
1075int ipath_layer_want_buffer(struct ipath_devdata *dd)
1076{
1077 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1078 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1079 dd->ipath_sendctrl);
1080
1081 return 0;
1082}
1083
1084EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1085
1086int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1087{
1088 int ret = 0;
1089 u32 __iomem *piobuf;
1090 u32 plen, *uhdr;
1091 size_t count;
1092 __be16 vlsllnh;
1093
1094 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1095 ipath_dbg("send while not open\n");
1096 ret = -EINVAL;
1097 } else
1098 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1099 dd->ipath_lid == 0) {
1100 /*
1101 * lid check is for when sma hasn't yet configured
1102 */
1103 ret = -ENETDOWN;
1104 ipath_cdbg(VERBOSE, "send while not ready, "
1105 "mylid=%u, flags=0x%x\n",
1106 dd->ipath_lid, dd->ipath_flags);
1107 }
1108
1109 vlsllnh = *((__be16 *) hdr);
1110 if (vlsllnh != htons(IPS_LRH_BTH)) {
1111 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1112 "not sending\n", be16_to_cpu(vlsllnh),
1113 IPS_LRH_BTH);
1114 ret = -EINVAL;
1115 }
1116 if (ret)
1117 goto done;
1118
1119 /* Get a PIO buffer to use. */
1120 piobuf = ipath_getpiobuf(dd, NULL);
1121 if (piobuf == NULL) {
1122 ret = -EBUSY;
1123 goto done;
1124 }
1125
1126 plen = (sizeof(*hdr) >> 2); /* actual length */
1127 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1128
1129 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1130 ipath_flush_wc();
1131 piobuf += 2;
1132 uhdr = (u32 *)hdr;
1133 count = plen-1; /* amount we can copy before trigger word */
1134 __iowrite32_copy(piobuf, uhdr, count);
1135 ipath_flush_wc();
1136 __raw_writel(uhdr[count], piobuf + count);
1137 ipath_flush_wc(); /* ensure it's sent, now */
1138
1139 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1140
1141done:
1142 return ret;
1143}
1144
1145EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1146
1147int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1148{
1149 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1150
1151 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1152 dd->ipath_sendctrl);
1153 return 0;
1154}
1155
1156EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1157
1158int ipath_layer_enable_timer(struct ipath_devdata *dd)
1159{
1160 /*
1161 * HT-400 has a design flaw where the chip and kernel idea
1162 * of the tail register don't always agree, and therefore we won't
1163 * get an interrupt on the next packet received.
1164 * If the board supports per packet receive interrupts, use it.
1165 * Otherwise, the timer function periodically checks for packets
1166 * to cover this case.
1167 * Either way, the timer is needed for verbs layer related
1168 * processing.
1169 */
1170 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1171 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1172 0x2074076542310ULL);
1173 /* Enable GPIO bit 2 interrupt */
1174 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1175 (u64) (1 << 2));
1176 }
1177
1178 init_timer(&dd->verbs_layer.l_timer);
1179 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1180 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1181 dd->verbs_layer.l_timer.expires = jiffies + 1;
1182 add_timer(&dd->verbs_layer.l_timer);
1183
1184 return 0;
1185}
1186
1187EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1188
1189int ipath_layer_disable_timer(struct ipath_devdata *dd)
1190{
1191 /* Disable GPIO bit 2 interrupt */
1192 if (dd->ipath_flags & IPATH_GPIO_INTR)
1193 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1194
1195 del_timer_sync(&dd->verbs_layer.l_timer);
1196
1197 return 0;
1198}
1199
1200EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1201
1202/**
1203 * ipath_layer_set_verbs_flags - set the verbs layer flags
1204 * @dd: the infinipath device
1205 * @flags: the flags to set
1206 */
1207int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1208{
1209 struct ipath_devdata *ss;
1210 unsigned long lflags;
1211
1212 spin_lock_irqsave(&ipath_devs_lock, lflags);
1213
1214 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1215 if (!(ss->ipath_flags & IPATH_INITTED))
1216 continue;
1217 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1218 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1219 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1220 else
1221 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1222 }
1223
1224 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1225
1226 return 0;
1227}
1228
1229EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1230
1231/**
1232 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1233 * @dd: the infinipath device
1234 */
1235unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1236{
1237 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1238}
1239
1240EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1241
1242/**
1243 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1244 * @dd: the infinipath device
1245 * @index: the PKEY index
1246 */
1247unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1248{
1249 unsigned ret;
1250
1251 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1252 ret = 0;
1253 else
1254 ret = dd->ipath_pd[0]->port_pkeys[index];
1255
1256 return ret;
1257}
1258
1259EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1260
1261/**
1262 * ipath_layer_get_pkeys - return the PKEY table for port 0
1263 * @dd: the infinipath device
1264 * @pkeys: the pkey table is placed here
1265 */
1266int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1267{
1268 struct ipath_portdata *pd = dd->ipath_pd[0];
1269
1270 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1271
1272 return 0;
1273}
1274
1275EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1276
1277/**
1278 * rm_pkey - decrecment the reference count for the given PKEY
1279 * @dd: the infinipath device
1280 * @key: the PKEY index
1281 *
1282 * Return true if this was the last reference and the hardware table entry
1283 * needs to be changed.
1284 */
1285static int rm_pkey(struct ipath_devdata *dd, u16 key)
1286{
1287 int i;
1288 int ret;
1289
1290 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1291 if (dd->ipath_pkeys[i] != key)
1292 continue;
1293 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1294 dd->ipath_pkeys[i] = 0;
1295 ret = 1;
1296 goto bail;
1297 }
1298 break;
1299 }
1300
1301 ret = 0;
1302
1303bail:
1304 return ret;
1305}
1306
1307/**
1308 * add_pkey - add the given PKEY to the hardware table
1309 * @dd: the infinipath device
1310 * @key: the PKEY
1311 *
1312 * Return an error code if unable to add the entry, zero if no change,
1313 * or 1 if the hardware PKEY register needs to be updated.
1314 */
1315static int add_pkey(struct ipath_devdata *dd, u16 key)
1316{
1317 int i;
1318 u16 lkey = key & 0x7FFF;
1319 int any = 0;
1320 int ret;
1321
1322 if (lkey == 0x7FFF) {
1323 ret = 0;
1324 goto bail;
1325 }
1326
1327 /* Look for an empty slot or a matching PKEY. */
1328 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1329 if (!dd->ipath_pkeys[i]) {
1330 any++;
1331 continue;
1332 }
1333 /* If it matches exactly, try to increment the ref count */
1334 if (dd->ipath_pkeys[i] == key) {
1335 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1336 ret = 0;
1337 goto bail;
1338 }
1339 /* Lost the race. Look for an empty slot below. */
1340 atomic_dec(&dd->ipath_pkeyrefs[i]);
1341 any++;
1342 }
1343 /*
1344 * It makes no sense to have both the limited and unlimited
1345 * PKEY set at the same time since the unlimited one will
1346 * disable the limited one.
1347 */
1348 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1349 ret = -EEXIST;
1350 goto bail;
1351 }
1352 }
1353 if (!any) {
1354 ret = -EBUSY;
1355 goto bail;
1356 }
1357 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1358 if (!dd->ipath_pkeys[i] &&
1359 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1360 /* for ipathstats, etc. */
1361 ipath_stats.sps_pkeys[i] = lkey;
1362 dd->ipath_pkeys[i] = key;
1363 ret = 1;
1364 goto bail;
1365 }
1366 }
1367 ret = -EBUSY;
1368
1369bail:
1370 return ret;
1371}
1372
1373/**
1374 * ipath_layer_set_pkeys - set the PKEY table for port 0
1375 * @dd: the infinipath device
1376 * @pkeys: the PKEY table
1377 */
1378int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1379{
1380 struct ipath_portdata *pd;
1381 int i;
1382 int changed = 0;
1383
1384 pd = dd->ipath_pd[0];
1385
1386 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1387 u16 key = pkeys[i];
1388 u16 okey = pd->port_pkeys[i];
1389
1390 if (key == okey)
1391 continue;
1392 /*
1393 * The value of this PKEY table entry is changing.
1394 * Remove the old entry in the hardware's array of PKEYs.
1395 */
1396 if (okey & 0x7FFF)
1397 changed |= rm_pkey(dd, okey);
1398 if (key & 0x7FFF) {
1399 int ret = add_pkey(dd, key);
1400
1401 if (ret < 0)
1402 key = 0;
1403 else
1404 changed |= ret;
1405 }
1406 pd->port_pkeys[i] = key;
1407 }
1408 if (changed) {
1409 u64 pkey;
1410
1411 pkey = (u64) dd->ipath_pkeys[0] |
1412 ((u64) dd->ipath_pkeys[1] << 16) |
1413 ((u64) dd->ipath_pkeys[2] << 32) |
1414 ((u64) dd->ipath_pkeys[3] << 48);
1415 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1416 (unsigned long long) pkey);
1417 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1418 pkey);
1419 }
1420 return 0;
1421}
1422
1423EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1424
1425/**
1426 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1427 * @dd: the infinipath device
1428 *
1429 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1430 */
1431int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1432{
1433 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1434}
1435
1436EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1437
1438/**
1439 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1440 * @dd: the infinipath device
1441 * @sleep: the new state
1442 *
1443 * Note that this will only take effect when the link state changes.
1444 */
1445int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1446 int sleep)
1447{
1448 if (sleep)
1449 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1450 else
1451 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1452 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1453 dd->ipath_ibcctrl);
1454 return 0;
1455}
1456
1457EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1458
1459int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1460{
1461 return (dd->ipath_ibcctrl >>
1462 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1463 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1464}
1465
1466EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1467
1468/**
1469 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1470 * @dd: the infinipath device
1471 * @n: the new threshold
1472 *
1473 * Note that this will only take effect when the link state changes.
1474 */
1475int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1476{
1477 unsigned v;
1478
1479 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1480 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1481 if (v != n) {
1482 dd->ipath_ibcctrl &=
1483 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1484 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1485 dd->ipath_ibcctrl |=
1486 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1487 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1488 dd->ipath_ibcctrl);
1489 }
1490 return 0;
1491}
1492
1493EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1494
1495int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1496{
1497 return (dd->ipath_ibcctrl >>
1498 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1499 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1500}
1501
1502EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1503
1504/**
1505 * ipath_layer_set_overrunthreshold - set the overrun threshold
1506 * @dd: the infinipath device
1507 * @n: the new threshold
1508 *
1509 * Note that this will only take effect when the link state changes.
1510 */
1511int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1512{
1513 unsigned v;
1514
1515 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1516 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1517 if (v != n) {
1518 dd->ipath_ibcctrl &=
1519 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1520 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1521 dd->ipath_ibcctrl |=
1522 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1523 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1524 dd->ipath_ibcctrl);
1525 }
1526 return 0;
1527}
1528
1529EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1530
1531int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1532 size_t namelen)
1533{
1534 return dd->ipath_f_get_boardname(dd, name, namelen);
1535}
1536EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1537
1538u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1539{
1540 return dd->ipath_rcvhdrentsize;
1541}
1542EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);