blob: 4b51b97de899958e076f707bfb186a9d213f2104 [file] [log] [blame]
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08001/*
Bryan O'Sullivan759d5762006-07-01 04:35:49 -07002 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
Bryan O'Sullivan889ab792006-03-29 15:23:32 -08003 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
37 */
38
39#include <linux/io.h>
40#include <linux/pci.h>
41#include <asm/byteorder.h>
42
43#include "ipath_kernel.h"
44#include "ips_common.h"
45#include "ipath_layer.h"
46
47/* Acquire before ipath_devs_lock. */
48static DEFINE_MUTEX(ipath_layer_mutex);
49
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070050static int ipath_verbs_registered;
51
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080052u16 ipath_layer_rcv_opcode;
Bryan O'Sullivanfccea662006-04-24 14:23:02 -070053
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080054static int (*layer_intr)(void *, u32);
55static int (*layer_rcv)(void *, void *, struct sk_buff *);
56static int (*layer_rcv_lid)(void *, void *);
57static int (*verbs_piobufavail)(void *);
58static void (*verbs_rcv)(void *, void *, void *, u32);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -080059
60static void *(*layer_add_one)(int, struct ipath_devdata *);
61static void (*layer_remove_one)(void *);
62static void *(*verbs_add_one)(int, struct ipath_devdata *);
63static void (*verbs_remove_one)(void *);
64static void (*verbs_timer_cb)(void *);
65
66int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
67{
68 int ret = -ENODEV;
69
70 if (dd->ipath_layer.l_arg && layer_intr)
71 ret = layer_intr(dd->ipath_layer.l_arg, arg);
72
73 return ret;
74}
75
76int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
77{
78 int ret;
79
80 mutex_lock(&ipath_layer_mutex);
81
82 ret = __ipath_layer_intr(dd, arg);
83
84 mutex_unlock(&ipath_layer_mutex);
85
86 return ret;
87}
88
89int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
90 struct sk_buff *skb)
91{
92 int ret = -ENODEV;
93
94 if (dd->ipath_layer.l_arg && layer_rcv)
95 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
96
97 return ret;
98}
99
100int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
101{
102 int ret = -ENODEV;
103
104 if (dd->ipath_layer.l_arg && layer_rcv_lid)
105 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
106
107 return ret;
108}
109
110int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
111{
112 int ret = -ENODEV;
113
114 if (dd->verbs_layer.l_arg && verbs_piobufavail)
115 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
116
117 return ret;
118}
119
120int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
121 u32 tlen)
122{
123 int ret = -ENODEV;
124
125 if (dd->verbs_layer.l_arg && verbs_rcv) {
126 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
127 ret = 0;
128 }
129
130 return ret;
131}
132
133int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
134{
135 u32 lstate;
136 int ret;
137
138 switch (newstate) {
139 case IPATH_IB_LINKDOWN:
140 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
142 /* don't wait */
143 ret = 0;
144 goto bail;
145
146 case IPATH_IB_LINKDOWN_SLEEP:
147 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
149 /* don't wait */
150 ret = 0;
151 goto bail;
152
153 case IPATH_IB_LINKDOWN_DISABLE:
154 ipath_set_ib_lstate(dd,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
157 /* don't wait */
158 ret = 0;
159 goto bail;
160
161 case IPATH_IB_LINKINIT:
162 if (dd->ipath_flags & IPATH_LINKINIT) {
163 ret = 0;
164 goto bail;
165 }
166 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
167 INFINIPATH_IBCC_LINKCMD_SHIFT);
168 lstate = IPATH_LINKINIT;
169 break;
170
171 case IPATH_IB_LINKARM:
172 if (dd->ipath_flags & IPATH_LINKARMED) {
173 ret = 0;
174 goto bail;
175 }
176 if (!(dd->ipath_flags &
177 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
178 ret = -EINVAL;
179 goto bail;
180 }
181 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
182 INFINIPATH_IBCC_LINKCMD_SHIFT);
183 /*
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
186 */
187 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
188 break;
189
190 case IPATH_IB_LINKACTIVE:
191 if (dd->ipath_flags & IPATH_LINKACTIVE) {
192 ret = 0;
193 goto bail;
194 }
195 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
196 ret = -EINVAL;
197 goto bail;
198 }
199 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
200 INFINIPATH_IBCC_LINKCMD_SHIFT);
201 lstate = IPATH_LINKACTIVE;
202 break;
203
204 default:
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
206 ret = -EINVAL;
207 goto bail;
208 }
209 ret = ipath_wait_linkstate(dd, lstate, 2000);
210
211bail:
212 return ret;
213}
214
215EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
216
217/**
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
220 * @arg: the new MTU
221 *
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
228 */
229int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
230{
231 u32 piosize;
232 int changed = 0;
233 int ret;
234
235 /*
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
240 */
241 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
242 arg != 4096) {
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
244 ret = -EINVAL;
245 goto bail;
246 }
247 if (dd->ipath_ibmtu == arg) {
248 ret = 0; /* same as current */
249 goto bail;
250 }
251
252 piosize = dd->ipath_ibmaxlen;
253 dd->ipath_ibmtu = arg;
254
255 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize != dd->ipath_init_ibmaxlen) {
258 dd->ipath_ibmaxlen = piosize;
259 changed = 1;
260 }
261 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
262 piosize = arg + IPATH_PIO_MAXIBHDR;
263 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
265 arg);
266 dd->ipath_ibmaxlen = piosize;
267 changed = 1;
268 }
269
270 if (changed) {
271 /*
272 * set the IBC maxpktlength to the size of our pio
273 * buffers in words
274 */
275 u64 ibc = dd->ipath_ibcctrl;
276 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
278
279 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
280 dd->ipath_ibmaxlen = piosize;
281 piosize /= sizeof(u32); /* in words */
282 /*
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
285 */
286 piosize += 1;
287
288 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
289 dd->ipath_ibcctrl = ibc;
290 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
291 dd->ipath_ibcctrl);
292 dd->ipath_f_tidtemplate(dd);
293 }
294
295 ret = 0;
296
297bail:
298 return ret;
299}
300
301EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
302
303int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
304{
305 ipath_stats.sps_lid[dd->ipath_unit] = arg;
306 dd->ipath_lid = arg;
307 dd->ipath_lmc = lmc;
308
309 mutex_lock(&ipath_layer_mutex);
310
311 if (dd->ipath_layer.l_arg && layer_intr)
312 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
313
314 mutex_unlock(&ipath_layer_mutex);
315
316 return 0;
317}
318
319EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
320
321int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
322{
323 /* XXX - need to inform anyone who cares this just happened. */
324 dd->ipath_guid = guid;
325 return 0;
326}
327
328EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
329
330__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
331{
332 return dd->ipath_guid;
333}
334
335EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
336
337u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
338{
339 return dd->ipath_nguid;
340}
341
342EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
343
344int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
345 u32 * boardrev, u32 * majrev, u32 * minrev)
346{
347 *vendor = dd->ipath_vendorid;
348 *boardrev = dd->ipath_boardrev;
349 *majrev = dd->ipath_majrev;
350 *minrev = dd->ipath_minrev;
351
352 return 0;
353}
354
355EXPORT_SYMBOL_GPL(ipath_layer_query_device);
356
357u32 ipath_layer_get_flags(struct ipath_devdata *dd)
358{
359 return dd->ipath_flags;
360}
361
362EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
363
364struct device *ipath_layer_get_device(struct ipath_devdata *dd)
365{
366 return &dd->pcidev->dev;
367}
368
369EXPORT_SYMBOL_GPL(ipath_layer_get_device);
370
371u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
372{
373 return dd->ipath_deviceid;
374}
375
376EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
377
378u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
379{
380 return dd->ipath_lastibcstat;
381}
382
383EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
384
385u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
386{
387 return dd->ipath_ibmtu;
388}
389
390EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
391
392void ipath_layer_add(struct ipath_devdata *dd)
393{
394 mutex_lock(&ipath_layer_mutex);
395
396 if (layer_add_one)
397 dd->ipath_layer.l_arg =
398 layer_add_one(dd->ipath_unit, dd);
399
400 if (verbs_add_one)
401 dd->verbs_layer.l_arg =
402 verbs_add_one(dd->ipath_unit, dd);
403
404 mutex_unlock(&ipath_layer_mutex);
405}
406
Bryan O'Sullivana2acb2f2006-07-01 04:35:52 -0700407void ipath_layer_remove(struct ipath_devdata *dd)
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800408{
409 mutex_lock(&ipath_layer_mutex);
410
411 if (dd->ipath_layer.l_arg && layer_remove_one) {
412 layer_remove_one(dd->ipath_layer.l_arg);
413 dd->ipath_layer.l_arg = NULL;
414 }
415
416 if (dd->verbs_layer.l_arg && verbs_remove_one) {
417 verbs_remove_one(dd->verbs_layer.l_arg);
418 dd->verbs_layer.l_arg = NULL;
419 }
420
421 mutex_unlock(&ipath_layer_mutex);
422}
423
424int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
425 void (*l_remove)(void *),
426 int (*l_intr)(void *, u32),
427 int (*l_rcv)(void *, void *, struct sk_buff *),
428 u16 l_rcv_opcode,
429 int (*l_rcv_lid)(void *, void *))
430{
431 struct ipath_devdata *dd, *tmp;
432 unsigned long flags;
433
434 mutex_lock(&ipath_layer_mutex);
435
436 layer_add_one = l_add;
437 layer_remove_one = l_remove;
438 layer_intr = l_intr;
439 layer_rcv = l_rcv;
440 layer_rcv_lid = l_rcv_lid;
441 ipath_layer_rcv_opcode = l_rcv_opcode;
442
443 spin_lock_irqsave(&ipath_devs_lock, flags);
444
445 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
446 if (!(dd->ipath_flags & IPATH_INITTED))
447 continue;
448
449 if (dd->ipath_layer.l_arg)
450 continue;
451
452 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
453 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
454
455 spin_unlock_irqrestore(&ipath_devs_lock, flags);
456 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
457 spin_lock_irqsave(&ipath_devs_lock, flags);
458 }
459
460 spin_unlock_irqrestore(&ipath_devs_lock, flags);
461 mutex_unlock(&ipath_layer_mutex);
462
463 return 0;
464}
465
466EXPORT_SYMBOL_GPL(ipath_layer_register);
467
468void ipath_layer_unregister(void)
469{
470 struct ipath_devdata *dd, *tmp;
471 unsigned long flags;
472
473 mutex_lock(&ipath_layer_mutex);
474 spin_lock_irqsave(&ipath_devs_lock, flags);
475
476 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
477 if (dd->ipath_layer.l_arg && layer_remove_one) {
478 spin_unlock_irqrestore(&ipath_devs_lock, flags);
479 layer_remove_one(dd->ipath_layer.l_arg);
480 spin_lock_irqsave(&ipath_devs_lock, flags);
481 dd->ipath_layer.l_arg = NULL;
482 }
483 }
484
485 spin_unlock_irqrestore(&ipath_devs_lock, flags);
486
487 layer_add_one = NULL;
488 layer_remove_one = NULL;
489 layer_intr = NULL;
490 layer_rcv = NULL;
491 layer_rcv_lid = NULL;
492
493 mutex_unlock(&ipath_layer_mutex);
494}
495
496EXPORT_SYMBOL_GPL(ipath_layer_unregister);
497
498static void __ipath_verbs_timer(unsigned long arg)
499{
500 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
501
502 /*
503 * If port 0 receive packet interrupts are not available, or
504 * can be missed, poll the receive queue
505 */
506 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
507 ipath_kreceive(dd);
508
509 /* Handle verbs layer timeouts. */
510 if (dd->verbs_layer.l_arg && verbs_timer_cb)
511 verbs_timer_cb(dd->verbs_layer.l_arg);
512
513 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
514}
515
516/**
517 * ipath_verbs_register - verbs layer registration
518 * @l_piobufavail: callback for when PIO buffers become available
519 * @l_rcv: callback for receiving a packet
520 * @l_timer_cb: timer callback
521 * @ipath_devdata: device data structure is put here
522 */
523int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
524 void (*l_remove)(void *arg),
525 int (*l_piobufavail) (void *arg),
526 void (*l_rcv) (void *arg, void *rhdr,
527 void *data, u32 tlen),
528 void (*l_timer_cb) (void *arg))
529{
530 struct ipath_devdata *dd, *tmp;
531 unsigned long flags;
532
533 mutex_lock(&ipath_layer_mutex);
534
535 verbs_add_one = l_add;
536 verbs_remove_one = l_remove;
537 verbs_piobufavail = l_piobufavail;
538 verbs_rcv = l_rcv;
539 verbs_timer_cb = l_timer_cb;
540
541 spin_lock_irqsave(&ipath_devs_lock, flags);
542
543 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
544 if (!(dd->ipath_flags & IPATH_INITTED))
545 continue;
546
547 if (dd->verbs_layer.l_arg)
548 continue;
549
550 spin_unlock_irqrestore(&ipath_devs_lock, flags);
551 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
552 spin_lock_irqsave(&ipath_devs_lock, flags);
553 }
554
555 spin_unlock_irqrestore(&ipath_devs_lock, flags);
556 mutex_unlock(&ipath_layer_mutex);
557
558 ipath_verbs_registered = 1;
559
560 return 0;
561}
562
563EXPORT_SYMBOL_GPL(ipath_verbs_register);
564
565void ipath_verbs_unregister(void)
566{
567 struct ipath_devdata *dd, *tmp;
568 unsigned long flags;
569
570 mutex_lock(&ipath_layer_mutex);
571 spin_lock_irqsave(&ipath_devs_lock, flags);
572
573 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
574 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
575
576 if (dd->verbs_layer.l_arg && verbs_remove_one) {
577 spin_unlock_irqrestore(&ipath_devs_lock, flags);
578 verbs_remove_one(dd->verbs_layer.l_arg);
579 spin_lock_irqsave(&ipath_devs_lock, flags);
580 dd->verbs_layer.l_arg = NULL;
581 }
582 }
583
584 spin_unlock_irqrestore(&ipath_devs_lock, flags);
585
586 verbs_add_one = NULL;
587 verbs_remove_one = NULL;
588 verbs_piobufavail = NULL;
589 verbs_rcv = NULL;
590 verbs_timer_cb = NULL;
591
Bryan O'Sullivanfccea662006-04-24 14:23:02 -0700592 ipath_verbs_registered = 0;
593
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800594 mutex_unlock(&ipath_layer_mutex);
595}
596
597EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
598
599int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
600{
601 int ret;
602 u32 intval = 0;
603
604 mutex_lock(&ipath_layer_mutex);
605
606 if (!dd->ipath_layer.l_arg) {
607 ret = -EINVAL;
608 goto bail;
609 }
610
611 ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
612
613 if (ret < 0)
614 goto bail;
615
616 *pktmax = dd->ipath_ibmaxlen;
617
618 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
619 intval |= IPATH_LAYER_INT_IF_UP;
620 if (ipath_stats.sps_lid[dd->ipath_unit])
621 intval |= IPATH_LAYER_INT_LID;
622 if (ipath_stats.sps_mlid[dd->ipath_unit])
623 intval |= IPATH_LAYER_INT_BCAST;
624 /*
625 * do this on open, in case low level is already up and
626 * just layered driver was reloaded, etc.
627 */
628 if (intval)
629 layer_intr(dd->ipath_layer.l_arg, intval);
630
631 ret = 0;
632bail:
633 mutex_unlock(&ipath_layer_mutex);
634
635 return ret;
636}
637
638EXPORT_SYMBOL_GPL(ipath_layer_open);
639
640u16 ipath_layer_get_lid(struct ipath_devdata *dd)
641{
642 return dd->ipath_lid;
643}
644
645EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
646
647/**
648 * ipath_layer_get_mac - get the MAC address
649 * @dd: the infinipath device
650 * @mac: the MAC is put here
651 *
652 * This is the EUID-64 OUI octets (top 3), then
653 * skip the next 2 (which should both be zero or 0xff).
654 * The returned MAC is in network order
655 * mac points to at least 6 bytes of buffer
656 * We assume that by the time the LID is set, that the GUID is as valid
657 * as it's ever going to be, rather than adding yet another status bit.
658 */
659
660int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
661{
662 u8 *guid;
663
664 guid = (u8 *) &dd->ipath_guid;
665
666 mac[0] = guid[0];
667 mac[1] = guid[1];
668 mac[2] = guid[2];
669 mac[3] = guid[5];
670 mac[4] = guid[6];
671 mac[5] = guid[7];
672 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
673 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
674 "%x %x\n", guid[3], guid[4]);
675 return 0;
676}
677
678EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
679
680u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
681{
682 return dd->ipath_mlid;
683}
684
685EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
686
687u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
688{
689 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
690}
691
692EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
693
694static void update_sge(struct ipath_sge_state *ss, u32 length)
695{
696 struct ipath_sge *sge = &ss->sge;
697
698 sge->vaddr += length;
699 sge->length -= length;
700 sge->sge_length -= length;
701 if (sge->sge_length == 0) {
702 if (--ss->num_sge)
703 *sge = *ss->sg_list++;
704 } else if (sge->length == 0 && sge->mr != NULL) {
705 if (++sge->n >= IPATH_SEGSZ) {
706 if (++sge->m >= sge->mr->mapsz)
707 return;
708 sge->n = 0;
709 }
710 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
711 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
712 }
713}
714
715#ifdef __LITTLE_ENDIAN
716static inline u32 get_upper_bits(u32 data, u32 shift)
717{
718 return data >> shift;
719}
720
721static inline u32 set_upper_bits(u32 data, u32 shift)
722{
723 return data << shift;
724}
725
726static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
727{
728 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
729 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
730 return data;
731}
732#else
733static inline u32 get_upper_bits(u32 data, u32 shift)
734{
735 return data << shift;
736}
737
738static inline u32 set_upper_bits(u32 data, u32 shift)
739{
740 return data >> shift;
741}
742
743static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
744{
745 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
746 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
747 return data;
748}
749#endif
750
751static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
752 u32 length)
753{
754 u32 extra = 0;
755 u32 data = 0;
756 u32 last;
757
758 while (1) {
759 u32 len = ss->sge.length;
760 u32 off;
761
762 BUG_ON(len == 0);
763 if (len > length)
764 len = length;
765 if (len > ss->sge.sge_length)
766 len = ss->sge.sge_length;
767 /* If the source address is not aligned, try to align it. */
768 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
769 if (off) {
770 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
771 ~(sizeof(u32) - 1));
772 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
773 u32 y;
774
775 y = sizeof(u32) - off;
776 if (len > y)
777 len = y;
778 if (len + extra >= sizeof(u32)) {
779 data |= set_upper_bits(v, extra *
780 BITS_PER_BYTE);
781 len = sizeof(u32) - extra;
782 if (len == length) {
783 last = data;
784 break;
785 }
786 __raw_writel(data, piobuf);
787 piobuf++;
788 extra = 0;
789 data = 0;
790 } else {
791 /* Clear unused upper bytes */
792 data |= clear_upper_bytes(v, len, extra);
793 if (len == length) {
794 last = data;
795 break;
796 }
797 extra += len;
798 }
799 } else if (extra) {
800 /* Source address is aligned. */
801 u32 *addr = (u32 *) ss->sge.vaddr;
802 int shift = extra * BITS_PER_BYTE;
803 int ushift = 32 - shift;
804 u32 l = len;
805
806 while (l >= sizeof(u32)) {
807 u32 v = *addr;
808
809 data |= set_upper_bits(v, shift);
810 __raw_writel(data, piobuf);
811 data = get_upper_bits(v, ushift);
812 piobuf++;
813 addr++;
814 l -= sizeof(u32);
815 }
816 /*
817 * We still have 'extra' number of bytes leftover.
818 */
819 if (l) {
820 u32 v = *addr;
821
822 if (l + extra >= sizeof(u32)) {
823 data |= set_upper_bits(v, shift);
824 len -= l + extra - sizeof(u32);
825 if (len == length) {
826 last = data;
827 break;
828 }
829 __raw_writel(data, piobuf);
830 piobuf++;
831 extra = 0;
832 data = 0;
833 } else {
834 /* Clear unused upper bytes */
835 data |= clear_upper_bytes(v, l,
836 extra);
837 if (len == length) {
838 last = data;
839 break;
840 }
841 extra += l;
842 }
843 } else if (len == length) {
844 last = data;
845 break;
846 }
847 } else if (len == length) {
848 u32 w;
849
850 /*
851 * Need to round up for the last dword in the
852 * packet.
853 */
854 w = (len + 3) >> 2;
855 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
856 piobuf += w - 1;
857 last = ((u32 *) ss->sge.vaddr)[w - 1];
858 break;
859 } else {
860 u32 w = len >> 2;
861
862 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
863 piobuf += w;
864
865 extra = len & (sizeof(u32) - 1);
866 if (extra) {
867 u32 v = ((u32 *) ss->sge.vaddr)[w];
868
869 /* Clear unused upper bytes */
870 data = clear_upper_bytes(v, extra, 0);
871 }
872 }
873 update_sge(ss, len);
874 length -= len;
875 }
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700876 /* Update address before sending packet. */
877 update_sge(ss, length);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800878 /* must flush early everything before trigger word */
879 ipath_flush_wc();
880 __raw_writel(last, piobuf);
881 /* be sure trigger word is written */
882 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800883}
884
885/**
886 * ipath_verbs_send - send a packet from the verbs layer
887 * @dd: the infinipath device
Bryan O'Sullivan685f97e2006-07-01 04:35:53 -0700888 * @hdrwords: the number of words in the header
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800889 * @hdr: the packet header
890 * @len: the length of the packet in bytes
891 * @ss: the SGE to send
892 *
893 * This is like ipath_sma_send_pkt() in that we need to be able to send
894 * packets after the chip is initialized (MADs) but also like
895 * ipath_layer_send_hdr() since its used by the verbs layer.
896 */
897int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
898 u32 *hdr, u32 len, struct ipath_sge_state *ss)
899{
900 u32 __iomem *piobuf;
901 u32 plen;
902 int ret;
903
904 /* +1 is for the qword padding of pbc */
905 plen = hdrwords + ((len + 3) >> 2) + 1;
906 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
907 ipath_dbg("packet len 0x%x too long, failing\n", plen);
908 ret = -EINVAL;
909 goto bail;
910 }
911
912 /* Get a PIO buffer to use. */
913 piobuf = ipath_getpiobuf(dd, NULL);
914 if (unlikely(piobuf == NULL)) {
915 ret = -EBUSY;
916 goto bail;
917 }
918
919 /*
920 * Write len to control qword, no flags.
921 * We have to flush after the PBC for correctness on some cpus
922 * or WC buffer can be written out of order.
923 */
924 writeq(plen, piobuf);
925 ipath_flush_wc();
926 piobuf += 2;
927 if (len == 0) {
928 /*
929 * If there is just the header portion, must flush before
930 * writing last word of header for correctness, and after
931 * the last header word (trigger word).
932 */
933 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
934 ipath_flush_wc();
935 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
936 ipath_flush_wc();
937 ret = 0;
938 goto bail;
939 }
940
941 __iowrite32_copy(piobuf, hdr, hdrwords);
942 piobuf += hdrwords;
943
944 /* The common case is aligned and contained in one segment. */
945 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
946 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
947 u32 w;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700948 u32 *addr = (u32 *) ss->sge.vaddr;
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800949
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700950 /* Update address before sending packet. */
951 update_sge(ss, len);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800952 /* Need to round up for the last dword in the packet. */
953 w = (len + 3) >> 2;
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700954 __iowrite32_copy(piobuf, addr, w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800955 /* must flush early everything before trigger word */
956 ipath_flush_wc();
Bryan O'Sullivan39770262006-05-23 11:32:37 -0700957 __raw_writel(addr[w - 1], piobuf + w - 1);
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800958 /* be sure trigger word is written */
959 ipath_flush_wc();
Bryan O'Sullivan889ab792006-03-29 15:23:32 -0800960 ret = 0;
961 goto bail;
962 }
963 copy_io(piobuf, ss, len);
964 ret = 0;
965
966bail:
967 return ret;
968}
969
970EXPORT_SYMBOL_GPL(ipath_verbs_send);
971
972int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
973 u64 *rwords, u64 *spkts, u64 *rpkts,
974 u64 *xmit_wait)
975{
976 int ret;
977
978 if (!(dd->ipath_flags & IPATH_INITTED)) {
979 /* no hardware, freeze, etc. */
980 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
981 ret = -EINVAL;
982 goto bail;
983 }
984 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
985 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
986 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
987 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
988 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
989
990 ret = 0;
991
992bail:
993 return ret;
994}
995
996EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
997
998/**
999 * ipath_layer_get_counters - get various chip counters
1000 * @dd: the infinipath device
1001 * @cntrs: counters are placed here
1002 *
1003 * Return the counters needed by recv_pma_get_portcounters().
1004 */
1005int ipath_layer_get_counters(struct ipath_devdata *dd,
1006 struct ipath_layer_counters *cntrs)
1007{
1008 int ret;
1009
1010 if (!(dd->ipath_flags & IPATH_INITTED)) {
1011 /* no hardware, freeze, etc. */
1012 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1013 ret = -EINVAL;
1014 goto bail;
1015 }
1016 cntrs->symbol_error_counter =
1017 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1018 cntrs->link_error_recovery_counter =
1019 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1020 cntrs->link_downed_counter =
1021 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1022 cntrs->port_rcv_errors =
1023 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1024 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1025 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1026 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
1027 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1028 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1029 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1030 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1031 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1032 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
1033 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1034 cntrs->port_rcv_remphys_errors =
1035 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1036 cntrs->port_xmit_discards =
1037 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1038 cntrs->port_xmit_data =
1039 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1040 cntrs->port_rcv_data =
1041 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1042 cntrs->port_xmit_packets =
1043 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1044 cntrs->port_rcv_packets =
1045 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1046
1047 ret = 0;
1048
1049bail:
1050 return ret;
1051}
1052
1053EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1054
1055int ipath_layer_want_buffer(struct ipath_devdata *dd)
1056{
1057 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1058 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1059 dd->ipath_sendctrl);
1060
1061 return 0;
1062}
1063
1064EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1065
1066int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1067{
1068 int ret = 0;
1069 u32 __iomem *piobuf;
1070 u32 plen, *uhdr;
1071 size_t count;
1072 __be16 vlsllnh;
1073
1074 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1075 ipath_dbg("send while not open\n");
1076 ret = -EINVAL;
1077 } else
1078 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1079 dd->ipath_lid == 0) {
1080 /*
1081 * lid check is for when sma hasn't yet configured
1082 */
1083 ret = -ENETDOWN;
1084 ipath_cdbg(VERBOSE, "send while not ready, "
1085 "mylid=%u, flags=0x%x\n",
1086 dd->ipath_lid, dd->ipath_flags);
1087 }
1088
1089 vlsllnh = *((__be16 *) hdr);
1090 if (vlsllnh != htons(IPS_LRH_BTH)) {
1091 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1092 "not sending\n", be16_to_cpu(vlsllnh),
1093 IPS_LRH_BTH);
1094 ret = -EINVAL;
1095 }
1096 if (ret)
1097 goto done;
1098
1099 /* Get a PIO buffer to use. */
1100 piobuf = ipath_getpiobuf(dd, NULL);
1101 if (piobuf == NULL) {
1102 ret = -EBUSY;
1103 goto done;
1104 }
1105
1106 plen = (sizeof(*hdr) >> 2); /* actual length */
1107 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1108
1109 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1110 ipath_flush_wc();
1111 piobuf += 2;
1112 uhdr = (u32 *)hdr;
1113 count = plen-1; /* amount we can copy before trigger word */
1114 __iowrite32_copy(piobuf, uhdr, count);
1115 ipath_flush_wc();
1116 __raw_writel(uhdr[count], piobuf + count);
1117 ipath_flush_wc(); /* ensure it's sent, now */
1118
1119 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1120
1121done:
1122 return ret;
1123}
1124
1125EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1126
1127int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1128{
1129 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1130
1131 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1132 dd->ipath_sendctrl);
1133 return 0;
1134}
1135
1136EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1137
1138int ipath_layer_enable_timer(struct ipath_devdata *dd)
1139{
1140 /*
1141 * HT-400 has a design flaw where the chip and kernel idea
1142 * of the tail register don't always agree, and therefore we won't
1143 * get an interrupt on the next packet received.
1144 * If the board supports per packet receive interrupts, use it.
1145 * Otherwise, the timer function periodically checks for packets
1146 * to cover this case.
1147 * Either way, the timer is needed for verbs layer related
1148 * processing.
1149 */
1150 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1151 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1152 0x2074076542310ULL);
1153 /* Enable GPIO bit 2 interrupt */
1154 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1155 (u64) (1 << 2));
1156 }
1157
1158 init_timer(&dd->verbs_layer.l_timer);
1159 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1160 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1161 dd->verbs_layer.l_timer.expires = jiffies + 1;
1162 add_timer(&dd->verbs_layer.l_timer);
1163
1164 return 0;
1165}
1166
1167EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1168
1169int ipath_layer_disable_timer(struct ipath_devdata *dd)
1170{
1171 /* Disable GPIO bit 2 interrupt */
1172 if (dd->ipath_flags & IPATH_GPIO_INTR)
1173 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1174
1175 del_timer_sync(&dd->verbs_layer.l_timer);
1176
1177 return 0;
1178}
1179
1180EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1181
1182/**
1183 * ipath_layer_set_verbs_flags - set the verbs layer flags
1184 * @dd: the infinipath device
1185 * @flags: the flags to set
1186 */
1187int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1188{
1189 struct ipath_devdata *ss;
1190 unsigned long lflags;
1191
1192 spin_lock_irqsave(&ipath_devs_lock, lflags);
1193
1194 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1195 if (!(ss->ipath_flags & IPATH_INITTED))
1196 continue;
1197 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1198 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1199 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1200 else
1201 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1202 }
1203
1204 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1205
1206 return 0;
1207}
1208
1209EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1210
1211/**
1212 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1213 * @dd: the infinipath device
1214 */
1215unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1216{
1217 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1218}
1219
1220EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1221
1222/**
1223 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1224 * @dd: the infinipath device
1225 * @index: the PKEY index
1226 */
1227unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1228{
1229 unsigned ret;
1230
1231 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1232 ret = 0;
1233 else
1234 ret = dd->ipath_pd[0]->port_pkeys[index];
1235
1236 return ret;
1237}
1238
1239EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1240
1241/**
1242 * ipath_layer_get_pkeys - return the PKEY table for port 0
1243 * @dd: the infinipath device
1244 * @pkeys: the pkey table is placed here
1245 */
1246int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1247{
1248 struct ipath_portdata *pd = dd->ipath_pd[0];
1249
1250 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1251
1252 return 0;
1253}
1254
1255EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1256
1257/**
1258 * rm_pkey - decrecment the reference count for the given PKEY
1259 * @dd: the infinipath device
1260 * @key: the PKEY index
1261 *
1262 * Return true if this was the last reference and the hardware table entry
1263 * needs to be changed.
1264 */
1265static int rm_pkey(struct ipath_devdata *dd, u16 key)
1266{
1267 int i;
1268 int ret;
1269
1270 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1271 if (dd->ipath_pkeys[i] != key)
1272 continue;
1273 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1274 dd->ipath_pkeys[i] = 0;
1275 ret = 1;
1276 goto bail;
1277 }
1278 break;
1279 }
1280
1281 ret = 0;
1282
1283bail:
1284 return ret;
1285}
1286
1287/**
1288 * add_pkey - add the given PKEY to the hardware table
1289 * @dd: the infinipath device
1290 * @key: the PKEY
1291 *
1292 * Return an error code if unable to add the entry, zero if no change,
1293 * or 1 if the hardware PKEY register needs to be updated.
1294 */
1295static int add_pkey(struct ipath_devdata *dd, u16 key)
1296{
1297 int i;
1298 u16 lkey = key & 0x7FFF;
1299 int any = 0;
1300 int ret;
1301
1302 if (lkey == 0x7FFF) {
1303 ret = 0;
1304 goto bail;
1305 }
1306
1307 /* Look for an empty slot or a matching PKEY. */
1308 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1309 if (!dd->ipath_pkeys[i]) {
1310 any++;
1311 continue;
1312 }
1313 /* If it matches exactly, try to increment the ref count */
1314 if (dd->ipath_pkeys[i] == key) {
1315 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1316 ret = 0;
1317 goto bail;
1318 }
1319 /* Lost the race. Look for an empty slot below. */
1320 atomic_dec(&dd->ipath_pkeyrefs[i]);
1321 any++;
1322 }
1323 /*
1324 * It makes no sense to have both the limited and unlimited
1325 * PKEY set at the same time since the unlimited one will
1326 * disable the limited one.
1327 */
1328 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1329 ret = -EEXIST;
1330 goto bail;
1331 }
1332 }
1333 if (!any) {
1334 ret = -EBUSY;
1335 goto bail;
1336 }
1337 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1338 if (!dd->ipath_pkeys[i] &&
1339 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1340 /* for ipathstats, etc. */
1341 ipath_stats.sps_pkeys[i] = lkey;
1342 dd->ipath_pkeys[i] = key;
1343 ret = 1;
1344 goto bail;
1345 }
1346 }
1347 ret = -EBUSY;
1348
1349bail:
1350 return ret;
1351}
1352
1353/**
1354 * ipath_layer_set_pkeys - set the PKEY table for port 0
1355 * @dd: the infinipath device
1356 * @pkeys: the PKEY table
1357 */
1358int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1359{
1360 struct ipath_portdata *pd;
1361 int i;
1362 int changed = 0;
1363
1364 pd = dd->ipath_pd[0];
1365
1366 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1367 u16 key = pkeys[i];
1368 u16 okey = pd->port_pkeys[i];
1369
1370 if (key == okey)
1371 continue;
1372 /*
1373 * The value of this PKEY table entry is changing.
1374 * Remove the old entry in the hardware's array of PKEYs.
1375 */
1376 if (okey & 0x7FFF)
1377 changed |= rm_pkey(dd, okey);
1378 if (key & 0x7FFF) {
1379 int ret = add_pkey(dd, key);
1380
1381 if (ret < 0)
1382 key = 0;
1383 else
1384 changed |= ret;
1385 }
1386 pd->port_pkeys[i] = key;
1387 }
1388 if (changed) {
1389 u64 pkey;
1390
1391 pkey = (u64) dd->ipath_pkeys[0] |
1392 ((u64) dd->ipath_pkeys[1] << 16) |
1393 ((u64) dd->ipath_pkeys[2] << 32) |
1394 ((u64) dd->ipath_pkeys[3] << 48);
1395 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1396 (unsigned long long) pkey);
1397 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1398 pkey);
1399 }
1400 return 0;
1401}
1402
1403EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1404
1405/**
1406 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1407 * @dd: the infinipath device
1408 *
1409 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1410 */
1411int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1412{
1413 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1414}
1415
1416EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1417
1418/**
1419 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1420 * @dd: the infinipath device
1421 * @sleep: the new state
1422 *
1423 * Note that this will only take effect when the link state changes.
1424 */
1425int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1426 int sleep)
1427{
1428 if (sleep)
1429 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1430 else
1431 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1432 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1433 dd->ipath_ibcctrl);
1434 return 0;
1435}
1436
1437EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1438
1439int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1440{
1441 return (dd->ipath_ibcctrl >>
1442 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1443 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1444}
1445
1446EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1447
1448/**
1449 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1450 * @dd: the infinipath device
1451 * @n: the new threshold
1452 *
1453 * Note that this will only take effect when the link state changes.
1454 */
1455int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1456{
1457 unsigned v;
1458
1459 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1460 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1461 if (v != n) {
1462 dd->ipath_ibcctrl &=
1463 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1464 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1465 dd->ipath_ibcctrl |=
1466 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1467 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1468 dd->ipath_ibcctrl);
1469 }
1470 return 0;
1471}
1472
1473EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1474
1475int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1476{
1477 return (dd->ipath_ibcctrl >>
1478 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1479 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1480}
1481
1482EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1483
1484/**
1485 * ipath_layer_set_overrunthreshold - set the overrun threshold
1486 * @dd: the infinipath device
1487 * @n: the new threshold
1488 *
1489 * Note that this will only take effect when the link state changes.
1490 */
1491int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1492{
1493 unsigned v;
1494
1495 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1496 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1497 if (v != n) {
1498 dd->ipath_ibcctrl &=
1499 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1500 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1501 dd->ipath_ibcctrl |=
1502 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1503 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1504 dd->ipath_ibcctrl);
1505 }
1506 return 0;
1507}
1508
1509EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1510
1511int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1512 size_t namelen)
1513{
1514 return dd->ipath_f_get_boardname(dd, name, namelen);
1515}
1516EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1517
1518u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1519{
1520 return dd->ipath_rcvhdrentsize;
1521}
1522EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);