blob: 5248f57fe198b02a2da550eb1d7d4a4ddc8f0d6a [file] [log] [blame]
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001/*
John Gregor87427da2007-06-11 10:21:14 -07002 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08003 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/idr.h>
36#include <linux/pci.h>
Dave Olson9bec3992007-06-01 13:01:47 -070037#include <linux/io.h>
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080038#include <linux/delay.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41
42#include "ipath_kernel.h"
Bryan O'Sullivanb1c1b6a2006-08-25 11:24:31 -070043#include "ipath_verbs.h"
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -070044#include "ipath_common.h"
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080045
46static void ipath_update_pio_bufs(struct ipath_devdata *);
47
48const char *ipath_get_unit_name(int unit)
49{
50 static char iname[16];
51 snprintf(iname, sizeof iname, "infinipath%u", unit);
52 return iname;
53}
54
Bryan O'Sullivan759d5762006-07-01 04:35:49 -070055#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080056#define PFX IPATH_DRV_NAME ": "
57
58/*
59 * The size has to be longer than this string, so we can append
60 * board/chip information to it in the init code.
61 */
Bryan O'Sullivanb55f4f02006-08-25 11:24:33 -070062const char ib_ipath_version[] = IPATH_IDSTR "\n";
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080063
64static struct idr unit_table;
65DEFINE_SPINLOCK(ipath_devs_lock);
66LIST_HEAD(ipath_dev_list);
67
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -070068wait_queue_head_t ipath_state_wait;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080069
70unsigned ipath_debug = __IPATH_INFO;
71
72module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug);
75
76MODULE_LICENSE("GPL");
Bryan O'Sullivan759d5762006-07-01 04:35:49 -070077MODULE_AUTHOR("QLogic <support@pathscale.com>");
78MODULE_DESCRIPTION("QLogic InfiniPath driver");
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080079
80const char *ipath_ibcstatus_str[] = {
81 "Disabled",
82 "LinkUp",
83 "PollActive",
84 "PollQuiet",
85 "SleepDelay",
86 "SleepQuiet",
87 "LState6", /* unused */
88 "LState7", /* unused */
89 "CfgDebounce",
90 "CfgRcvfCfg",
91 "CfgWaitRmt",
92 "CfgIdle",
93 "RecovRetrain",
94 "LState0xD", /* unused */
95 "RecovWaitRmt",
96 "RecovIdle",
97};
98
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -080099static void __devexit ipath_remove_one(struct pci_dev *);
100static int __devinit ipath_init_one(struct pci_dev *,
101 const struct pci_device_id *);
102
103/* Only needed for registration, nothing else needs this info */
104#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
105#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
106#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
107
Arthur Jones35884232007-07-06 12:48:53 -0700108/* Number of seconds before our card status check... */
109#define STATUS_TIMEOUT 60
110
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800111static const struct pci_device_id ipath_pci_tbl[] = {
Roland Dreier6f4bb3d2006-05-12 14:57:52 -0700112 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
113 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
114 { 0, }
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800115};
116
117MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
118
119static struct pci_driver ipath_driver = {
120 .name = IPATH_DRV_NAME,
121 .probe = ipath_init_one,
122 .remove = __devexit_p(ipath_remove_one),
123 .id_table = ipath_pci_tbl,
124};
125
Arthur Jones35884232007-07-06 12:48:53 -0700126static void ipath_check_status(struct work_struct *work)
127{
128 struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
129 status_work.work);
130
131 /*
132 * If we don't have any interrupts, let the user know and
133 * don't bother checking again.
134 */
135 if (dd->ipath_int_counter == 0)
136 dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
137}
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800138
139static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
140 u32 *bar0, u32 *bar1)
141{
142 int ret;
143
144 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
145 if (ret)
146 ipath_dev_err(dd, "failed to read bar0 before enable: "
147 "error %d\n", -ret);
148
149 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
150 if (ret)
151 ipath_dev_err(dd, "failed to read bar1 before enable: "
152 "error %d\n", -ret);
153
154 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
155}
156
157static void ipath_free_devdata(struct pci_dev *pdev,
158 struct ipath_devdata *dd)
159{
160 unsigned long flags;
161
162 pci_set_drvdata(pdev, NULL);
163
164 if (dd->ipath_unit != -1) {
165 spin_lock_irqsave(&ipath_devs_lock, flags);
166 idr_remove(&unit_table, dd->ipath_unit);
167 list_del(&dd->ipath_list);
168 spin_unlock_irqrestore(&ipath_devs_lock, flags);
169 }
Bryan O'Sullivan06993ca2006-07-01 04:36:02 -0700170 vfree(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800171}
172
173static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
174{
175 unsigned long flags;
176 struct ipath_devdata *dd;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800177 int ret;
178
179 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
180 dd = ERR_PTR(-ENOMEM);
181 goto bail;
182 }
183
Bryan O'Sullivan06993ca2006-07-01 04:36:02 -0700184 dd = vmalloc(sizeof(*dd));
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800185 if (!dd) {
186 dd = ERR_PTR(-ENOMEM);
187 goto bail;
188 }
Bryan O'Sullivan06993ca2006-07-01 04:36:02 -0700189 memset(dd, 0, sizeof(*dd));
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800190 dd->ipath_unit = -1;
191
192 spin_lock_irqsave(&ipath_devs_lock, flags);
193
194 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
195 if (ret < 0) {
196 printk(KERN_ERR IPATH_DRV_NAME
197 ": Could not allocate unit ID: error %d\n", -ret);
198 ipath_free_devdata(pdev, dd);
199 dd = ERR_PTR(ret);
200 goto bail_unlock;
201 }
202
203 dd->pcidev = pdev;
204 pci_set_drvdata(pdev, dd);
205
Arthur Jones35884232007-07-06 12:48:53 -0700206 INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
207
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800208 list_add(&dd->ipath_list, &ipath_dev_list);
209
210bail_unlock:
211 spin_unlock_irqrestore(&ipath_devs_lock, flags);
212
213bail:
214 return dd;
215}
216
217static inline struct ipath_devdata *__ipath_lookup(int unit)
218{
219 return idr_find(&unit_table, unit);
220}
221
222struct ipath_devdata *ipath_lookup(int unit)
223{
224 struct ipath_devdata *dd;
225 unsigned long flags;
226
227 spin_lock_irqsave(&ipath_devs_lock, flags);
228 dd = __ipath_lookup(unit);
229 spin_unlock_irqrestore(&ipath_devs_lock, flags);
230
231 return dd;
232}
233
234int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
235{
236 int nunits, npresent, nup;
237 struct ipath_devdata *dd;
238 unsigned long flags;
239 u32 maxports;
240
241 nunits = npresent = nup = maxports = 0;
242
243 spin_lock_irqsave(&ipath_devs_lock, flags);
244
245 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
246 nunits++;
247 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
248 npresent++;
249 if (dd->ipath_lid &&
250 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
251 | IPATH_LINKUNK)))
252 nup++;
253 if (dd->ipath_cfgports > maxports)
254 maxports = dd->ipath_cfgports;
255 }
256
257 spin_unlock_irqrestore(&ipath_devs_lock, flags);
258
259 if (npresentp)
260 *npresentp = npresent;
261 if (nupp)
262 *nupp = nup;
263 if (maxportsp)
264 *maxportsp = maxports;
265
266 return nunits;
267}
268
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800269/*
270 * These next two routines are placeholders in case we don't have per-arch
271 * code for controlling write combining. If explicit control of write
272 * combining is not available, performance will probably be awful.
273 */
274
275int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
276{
277 return -EOPNOTSUPP;
278}
279
280void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
281{
282}
283
Dave Olson9bec3992007-06-01 13:01:47 -0700284/*
285 * Perform a PIO buffer bandwidth write test, to verify proper system
286 * configuration. Even when all the setup calls work, occasionally
287 * BIOS or other issues can prevent write combining from working, or
288 * can cause other bandwidth problems to the chip.
289 *
290 * This test simply writes the same buffer over and over again, and
291 * measures close to the peak bandwidth to the chip (not testing
292 * data bandwidth to the wire). On chips that use an address-based
293 * trigger to send packets to the wire, this is easy. On chips that
294 * use a count to trigger, we want to make sure that the packet doesn't
295 * go out on the wire, or trigger flow control checks.
296 */
297static void ipath_verify_pioperf(struct ipath_devdata *dd)
298{
299 u32 pbnum, cnt, lcnt;
300 u32 __iomem *piobuf;
301 u32 *addr;
302 u64 msecs, emsecs;
303
304 piobuf = ipath_getpiobuf(dd, &pbnum);
305 if (!piobuf) {
306 dev_info(&dd->pcidev->dev,
307 "No PIObufs for checking perf, skipping\n");
308 return;
309 }
310
311 /*
312 * Enough to give us a reasonable test, less than piobuf size, and
313 * likely multiple of store buffer length.
314 */
315 cnt = 1024;
316
317 addr = vmalloc(cnt);
318 if (!addr) {
319 dev_info(&dd->pcidev->dev,
320 "Couldn't get memory for checking PIO perf,"
321 " skipping\n");
322 goto done;
323 }
324
325 preempt_disable(); /* we want reasonably accurate elapsed time */
326 msecs = 1 + jiffies_to_msecs(jiffies);
327 for (lcnt = 0; lcnt < 10000U; lcnt++) {
328 /* wait until we cross msec boundary */
329 if (jiffies_to_msecs(jiffies) >= msecs)
330 break;
331 udelay(1);
332 }
333
334 writeq(0, piobuf); /* length 0, no dwords actually sent */
335 ipath_flush_wc();
336
337 /*
338 * this is only roughly accurate, since even with preempt we
339 * still take interrupts that could take a while. Running for
340 * >= 5 msec seems to get us "close enough" to accurate values
341 */
342 msecs = jiffies_to_msecs(jiffies);
343 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
344 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
345 emsecs = jiffies_to_msecs(jiffies) - msecs;
346 }
347
348 /* 1 GiB/sec, slightly over IB SDR line rate */
349 if (lcnt < (emsecs * 1024U))
350 ipath_dev_err(dd,
351 "Performance problem: bandwidth to PIO buffers is "
352 "only %u MiB/sec\n",
353 lcnt / (u32) emsecs);
354 else
355 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
356 lcnt / (u32) emsecs);
357
358 preempt_enable();
359
360 vfree(addr);
361
362done:
363 /* disarm piobuf, so it's available again */
364 ipath_disarm_piobufs(dd, pbnum, 1);
365}
366
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800367static int __devinit ipath_init_one(struct pci_dev *pdev,
368 const struct pci_device_id *ent)
369{
370 int ret, len, j;
371 struct ipath_devdata *dd;
372 unsigned long long addr;
373 u32 bar0 = 0, bar1 = 0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800374
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800375 dd = ipath_alloc_devdata(pdev);
376 if (IS_ERR(dd)) {
377 ret = PTR_ERR(dd);
378 printk(KERN_ERR IPATH_DRV_NAME
379 ": Could not allocate devdata: error %d\n", -ret);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -0700380 goto bail;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800381 }
382
383 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
384
385 read_bars(dd, pdev, &bar0, &bar1);
386
387 ret = pci_enable_device(pdev);
388 if (ret) {
389 /* This can happen iff:
390 *
391 * We did a chip reset, and then failed to reprogram the
392 * BAR, or the chip reset due to an internal error. We then
393 * unloaded the driver and reloaded it.
394 *
395 * Both reset cases set the BAR back to initial state. For
396 * the latter case, the AER sticky error bit at offset 0x718
397 * should be set, but the Linux kernel doesn't yet know
398 * about that, it appears. If the original BAR was retained
399 * in the kernel data structures, this may be OK.
400 */
401 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
402 dd->ipath_unit, -ret);
403 goto bail_devdata;
404 }
405 addr = pci_resource_start(pdev, 0);
406 len = pci_resource_len(pdev, 0);
Bryan O'Sullivan51f65eb2006-11-08 17:44:58 -0800407 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800408 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
409 ent->device, ent->driver_data);
410
411 read_bars(dd, pdev, &bar0, &bar1);
412
413 if (!bar1 && !(bar0 & ~0xf)) {
414 if (addr) {
415 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
416 "rewriting as %llx\n", addr);
417 ret = pci_write_config_dword(
418 pdev, PCI_BASE_ADDRESS_0, addr);
419 if (ret) {
420 ipath_dev_err(dd, "rewrite of BAR0 "
421 "failed: err %d\n", -ret);
422 goto bail_disable;
423 }
424 ret = pci_write_config_dword(
425 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
426 if (ret) {
427 ipath_dev_err(dd, "rewrite of BAR1 "
428 "failed: err %d\n", -ret);
429 goto bail_disable;
430 }
431 } else {
432 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
433 "not usable until reboot\n");
434 ret = -ENODEV;
435 goto bail_disable;
436 }
437 }
438
439 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
440 if (ret) {
441 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
442 "err %d\n", dd->ipath_unit, -ret);
443 goto bail_disable;
444 }
445
446 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
447 if (ret) {
Bryan O'Sullivan68dd43a2006-04-24 14:22:58 -0700448 /*
449 * if the 64 bit setup fails, try 32 bit. Some systems
450 * do not setup 64 bit maps on systems with 2GB or less
451 * memory installed.
452 */
453 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
454 if (ret) {
Bryan O'Sullivanb1d88652006-07-01 04:35:59 -0700455 dev_info(&pdev->dev,
456 "Unable to set DMA mask for unit %u: %d\n",
457 dd->ipath_unit, ret);
Bryan O'Sullivan68dd43a2006-04-24 14:22:58 -0700458 goto bail_regions;
459 }
Bryan O'Sullivanb1d88652006-07-01 04:35:59 -0700460 else {
Bryan O'Sullivan68dd43a2006-04-24 14:22:58 -0700461 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
Bryan O'Sullivanb1d88652006-07-01 04:35:59 -0700462 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
463 if (ret)
464 dev_info(&pdev->dev,
465 "Unable to set DMA consistent mask "
466 "for unit %u: %d\n",
467 dd->ipath_unit, ret);
468
469 }
470 }
471 else {
472 ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
473 if (ret)
474 dev_info(&pdev->dev,
475 "Unable to set DMA consistent mask "
476 "for unit %u: %d\n",
477 dd->ipath_unit, ret);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800478 }
479
480 pci_set_master(pdev);
481
482 /*
483 * Save BARs to rewrite after device reset. Save all 64 bits of
484 * BAR, just in case.
485 */
486 dd->ipath_pcibar0 = addr;
487 dd->ipath_pcibar1 = addr >> 32;
488 dd->ipath_deviceid = ent->device; /* save for later use */
489 dd->ipath_vendorid = ent->vendor;
490
491 /* setup the chip-specific functions, as early as possible. */
492 switch (ent->device) {
493 case PCI_DEVICE_ID_INFINIPATH_HT:
Bryan O'Sullivan820054b2007-03-15 14:45:06 -0700494#ifdef CONFIG_HT_IRQ
Bryan O'Sullivan525d0ca2006-08-25 11:24:39 -0700495 ipath_init_iba6110_funcs(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800496 break;
Bryan O'Sullivan820054b2007-03-15 14:45:06 -0700497#else
498 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
499 "CONFIG_HT_IRQ is not enabled\n", ent->device);
500 return -ENODEV;
Bryan O'Sullivane757bef2006-11-16 01:19:19 -0800501#endif
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800502 case PCI_DEVICE_ID_INFINIPATH_PE800:
Bryan O'Sullivan820054b2007-03-15 14:45:06 -0700503#ifdef CONFIG_PCI_MSI
Bryan O'Sullivan525d0ca2006-08-25 11:24:39 -0700504 ipath_init_iba6120_funcs(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800505 break;
Bryan O'Sullivan820054b2007-03-15 14:45:06 -0700506#else
507 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
508 "CONFIG_PCI_MSI is not enabled\n", ent->device);
509 return -ENODEV;
Bryan O'Sullivane757bef2006-11-16 01:19:19 -0800510#endif
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800511 default:
Bryan O'Sullivan759d5762006-07-01 04:35:49 -0700512 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800513 "failing\n", ent->device);
514 return -ENODEV;
515 }
516
517 for (j = 0; j < 6; j++) {
518 if (!pdev->resource[j].start)
519 continue;
Greg Kroah-Hartmane29419f2006-06-12 15:20:16 -0700520 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
521 j, (unsigned long long)pdev->resource[j].start,
522 (unsigned long long)pdev->resource[j].end,
523 (unsigned long long)pci_resource_len(pdev, j));
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800524 }
525
526 if (!addr) {
527 ipath_dev_err(dd, "No valid address in BAR 0!\n");
528 ret = -ENODEV;
529 goto bail_regions;
530 }
531
532 dd->ipath_deviceid = ent->device; /* save for later use */
533 dd->ipath_vendorid = ent->vendor;
534
Auke Kok44c10132007-06-08 15:46:36 -0700535 dd->ipath_pcirev = pdev->revision;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800536
Bryan O'Sullivaneb9dc6f2006-08-25 11:24:26 -0700537#if defined(__powerpc__)
538 /* There isn't a generic way to specify writethrough mappings */
539 dd->ipath_kregbase = __ioremap(addr, len,
540 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
541#else
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800542 dd->ipath_kregbase = ioremap_nocache(addr, len);
Bryan O'Sullivaneb9dc6f2006-08-25 11:24:26 -0700543#endif
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800544
545 if (!dd->ipath_kregbase) {
546 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
547 addr);
548 ret = -ENOMEM;
549 goto bail_iounmap;
550 }
551 dd->ipath_kregend = (u64 __iomem *)
552 ((void __iomem *)dd->ipath_kregbase + len);
553 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
554 /* for user mmap */
Bryan O'Sullivanb35f0042006-07-01 04:35:59 -0700555 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
556 addr, dd->ipath_kregbase);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800557
558 /*
559 * clear ipath_flags here instead of in ipath_init_chip as it is set
560 * by ipath_setup_htconfig.
561 */
562 dd->ipath_flags = 0;
Bryan O'Sullivanfba75202006-07-01 04:36:09 -0700563 dd->ipath_lli_counter = 0;
564 dd->ipath_lli_errors = 0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800565
566 if (dd->ipath_f_bus(dd, pdev))
567 ipath_dev_err(dd, "Failed to setup config space; "
568 "continuing anyway\n");
569
570 /*
Thomas Gleixnerdace1452006-07-01 19:29:38 -0700571 * set up our interrupt handler; IRQF_SHARED probably not needed,
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800572 * since MSI interrupts shouldn't be shared but won't hurt for now.
573 * check 0 irq after we return from chip-specific bus setup, since
574 * that can affect this due to setup
575 */
Bryan O'Sullivan51f65eb2006-11-08 17:44:58 -0800576 if (!dd->ipath_irq)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800577 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
578 "work\n");
579 else {
Bryan O'Sullivan51f65eb2006-11-08 17:44:58 -0800580 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800581 IPATH_DRV_NAME, dd);
582 if (ret) {
583 ipath_dev_err(dd, "Couldn't setup irq handler, "
Bryan O'Sullivan51f65eb2006-11-08 17:44:58 -0800584 "irq=%d: %d\n", dd->ipath_irq, ret);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800585 goto bail_iounmap;
586 }
587 }
588
589 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
590 if (ret)
Arthur Jones7b196e22007-03-15 14:45:04 -0700591 goto bail_irqsetup;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800592
593 ret = ipath_enable_wc(dd);
594
595 if (ret) {
596 ipath_dev_err(dd, "Write combining not enabled "
597 "(err %d): performance may be poor\n",
598 -ret);
599 ret = 0;
600 }
601
Dave Olson9bec3992007-06-01 13:01:47 -0700602 ipath_verify_pioperf(dd);
603
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800604 ipath_device_create_group(&pdev->dev, dd);
605 ipathfs_add_device(dd);
606 ipath_user_add(dd);
Bryan O'Sullivana2acb2f2006-07-01 04:35:52 -0700607 ipath_diag_add(dd);
Bryan O'Sullivanb1c1b6a2006-08-25 11:24:31 -0700608 ipath_register_ib_device(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800609
Arthur Jones35884232007-07-06 12:48:53 -0700610 /* Check that card status in STATUS_TIMEOUT seconds. */
611 schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
612
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800613 goto bail;
614
Arthur Jones7b196e22007-03-15 14:45:04 -0700615bail_irqsetup:
616 if (pdev->irq) free_irq(pdev->irq, dd);
617
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800618bail_iounmap:
619 iounmap((volatile void __iomem *) dd->ipath_kregbase);
620
621bail_regions:
622 pci_release_regions(pdev);
623
624bail_disable:
625 pci_disable_device(pdev);
626
627bail_devdata:
628 ipath_free_devdata(pdev, dd);
629
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800630bail:
631 return ret;
632}
633
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700634static void __devexit cleanup_device(struct ipath_devdata *dd)
635{
636 int port;
637
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700638 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
639 /* can't do anything more with chip; needs re-init */
640 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
641 if (dd->ipath_kregbase) {
642 /*
643 * if we haven't already cleaned up before these are
644 * to ensure any register reads/writes "fail" until
645 * re-init
646 */
647 dd->ipath_kregbase = NULL;
648 dd->ipath_uregbase = 0;
649 dd->ipath_sregbase = 0;
650 dd->ipath_cregbase = 0;
651 dd->ipath_kregsize = 0;
652 }
653 ipath_disable_wc(dd);
654 }
655
656 if (dd->ipath_pioavailregs_dma) {
657 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
658 (void *) dd->ipath_pioavailregs_dma,
659 dd->ipath_pioavailregs_phys);
660 dd->ipath_pioavailregs_dma = NULL;
661 }
662 if (dd->ipath_dummy_hdrq) {
663 dma_free_coherent(&dd->pcidev->dev,
664 dd->ipath_pd[0]->port_rcvhdrq_size,
665 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
666 dd->ipath_dummy_hdrq = NULL;
667 }
668
669 if (dd->ipath_pageshadow) {
670 struct page **tmpp = dd->ipath_pageshadow;
671 dma_addr_t *tmpd = dd->ipath_physshadow;
672 int i, cnt = 0;
673
674 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
675 "locked\n");
676 for (port = 0; port < dd->ipath_cfgports; port++) {
677 int port_tidbase = port * dd->ipath_rcvtidcnt;
678 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
679 for (i = port_tidbase; i < maxtid; i++) {
680 if (!tmpp[i])
681 continue;
682 pci_unmap_page(dd->pcidev, tmpd[i],
683 PAGE_SIZE, PCI_DMA_FROMDEVICE);
684 ipath_release_user_pages(&tmpp[i], 1);
685 tmpp[i] = NULL;
686 cnt++;
687 }
688 }
689 if (cnt) {
690 ipath_stats.sps_pageunlocks += cnt;
691 ipath_cdbg(VERBOSE, "There were still %u expTID "
692 "entries locked\n", cnt);
693 }
694 if (ipath_stats.sps_pagelocks ||
695 ipath_stats.sps_pageunlocks)
696 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
697 "unlocked via ipath_m{un}lock\n",
698 (unsigned long long)
699 ipath_stats.sps_pagelocks,
700 (unsigned long long)
701 ipath_stats.sps_pageunlocks);
702
703 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
704 dd->ipath_pageshadow);
Bryan O'Sullivan9783ab42007-03-15 14:45:07 -0700705 tmpp = dd->ipath_pageshadow;
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700706 dd->ipath_pageshadow = NULL;
Bryan O'Sullivan9783ab42007-03-15 14:45:07 -0700707 vfree(tmpp);
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700708 }
709
710 /*
711 * free any resources still in use (usually just kernel ports)
712 * at unload; we do for portcnt, not cfgports, because cfgports
713 * could have changed while we were loaded.
714 */
715 for (port = 0; port < dd->ipath_portcnt; port++) {
716 struct ipath_portdata *pd = dd->ipath_pd[port];
717 dd->ipath_pd[port] = NULL;
718 ipath_free_pddata(dd, pd);
719 }
720 kfree(dd->ipath_pd);
721 /*
722 * debuggability, in case some cleanup path tries to use it
723 * after this
724 */
725 dd->ipath_pd = NULL;
726}
727
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800728static void __devexit ipath_remove_one(struct pci_dev *pdev)
729{
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700730 struct ipath_devdata *dd = pci_get_drvdata(pdev);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800731
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700732 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800733
Bryan O'Sullivan53c1d2c2007-03-15 14:45:11 -0700734 /*
735 * disable the IB link early, to be sure no new packets arrive, which
736 * complicates the shutdown process
737 */
738 ipath_shutdown_device(dd);
739
Arthur Jones35884232007-07-06 12:48:53 -0700740 cancel_delayed_work(&dd->status_work);
741 flush_scheduled_work();
742
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700743 if (dd->verbs_dev)
Bryan O'Sullivanc78f6412006-09-28 09:00:01 -0700744 ipath_unregister_ib_device(dd->verbs_dev);
Bryan O'Sullivanc78f6412006-09-28 09:00:01 -0700745
Bryan O'Sullivana2acb2f2006-07-01 04:35:52 -0700746 ipath_diag_remove(dd);
747 ipath_user_remove(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800748 ipathfs_remove_device(dd);
749 ipath_device_remove_group(&pdev->dev, dd);
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700750
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800751 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
752 "unit %u\n", dd, (u32) dd->ipath_unit);
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700753
754 cleanup_device(dd);
755
756 /*
757 * turn off rcv, send, and interrupts for all ports, all drivers
758 * should also hard reset the chip here?
759 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
760 * for all versions of the driver, if they were allocated
761 */
Bryan O'Sullivan51f65eb2006-11-08 17:44:58 -0800762 if (dd->ipath_irq) {
763 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
764 dd->ipath_unit, dd->ipath_irq);
765 dd->ipath_f_free_irq(dd);
Bryan O'Sullivan7227aac2006-09-28 09:00:16 -0700766 } else
767 ipath_dbg("irq is 0, not doing free_irq "
768 "for unit %u\n", dd->ipath_unit);
769 /*
770 * we check for NULL here, because it's outside
771 * the kregbase check, and we need to call it
772 * after the free_irq. Thus it's possible that
773 * the function pointers were never initialized.
774 */
775 if (dd->ipath_f_cleanup)
776 /* clean up chip-specific stuff */
777 dd->ipath_f_cleanup(dd);
778
779 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
780 iounmap((volatile void __iomem *) dd->ipath_kregbase);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800781 pci_release_regions(pdev);
782 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
783 pci_disable_device(pdev);
784
785 ipath_free_devdata(pdev, dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800786}
787
788/* general driver use */
789DEFINE_MUTEX(ipath_mutex);
790
791static DEFINE_SPINLOCK(ipath_pioavail_lock);
792
793/**
794 * ipath_disarm_piobufs - cancel a range of PIO buffers
795 * @dd: the infinipath device
796 * @first: the first PIO buffer to cancel
797 * @cnt: the number of PIO buffers to cancel
798 *
799 * cancel a range of PIO buffers, used when they might be armed, but
800 * not triggered. Used at init to ensure buffer state, and also user
801 * process close, in case it died while writing to a PIO buffer
802 * Also after errors.
803 */
804void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
805 unsigned cnt)
806{
807 unsigned i, last = first + cnt;
808 u64 sendctrl, sendorig;
809
810 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
Dave Olson93800682007-06-18 14:24:41 -0700811 sendorig = dd->ipath_sendctrl;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800812 for (i = first; i < last; i++) {
Dave Olson93800682007-06-18 14:24:41 -0700813 sendctrl = sendorig | INFINIPATH_S_DISARM |
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800814 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
815 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
816 sendctrl);
817 }
818
819 /*
820 * Write it again with current value, in case ipath_sendctrl changed
821 * while we were looping; no critical bits that would require
822 * locking.
823 *
Dave Olson93800682007-06-18 14:24:41 -0700824 * disable PIOAVAILUPD, then re-enable, reading scratch in
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800825 * between. This seems to avoid a chip timing race that causes
826 * pioavail updates to memory to stop.
827 */
828 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
Dave Olson3810f2a2007-07-20 14:23:37 -0700829 sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800830 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
831 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
832 dd->ipath_sendctrl);
833}
834
835/**
836 * ipath_wait_linkstate - wait for an IB link state change to occur
837 * @dd: the infinipath device
838 * @state: the state to wait for
839 * @msecs: the number of milliseconds to wait
840 *
841 * wait up to msecs milliseconds for IB link state change to occur for
842 * now, take the easy polling route. Currently used only by
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -0700843 * ipath_set_linkstate. Returns 0 if state reached, otherwise
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800844 * -ETIMEDOUT state can have multiple states set, for any of several
845 * transitions.
846 */
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -0700847static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
848 int msecs)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800849{
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -0700850 dd->ipath_state_wanted = state;
851 wait_event_interruptible_timeout(ipath_state_wait,
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800852 (dd->ipath_flags & state),
853 msecs_to_jiffies(msecs));
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -0700854 dd->ipath_state_wanted = 0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800855
856 if (!(dd->ipath_flags & state)) {
857 u64 val;
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -0700858 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
859 " ms\n",
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800860 /* test INIT ahead of DOWN, both can be set */
861 (state & IPATH_LINKINIT) ? "INIT" :
862 ((state & IPATH_LINKDOWN) ? "DOWN" :
863 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
864 msecs);
865 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
866 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
867 (unsigned long long) ipath_read_kreg64(
868 dd, dd->ipath_kregs->kr_ibcctrl),
869 (unsigned long long) val,
870 ipath_ibcstatus_str[val & 0xf]);
871 }
872 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
873}
874
Bryan O'Sullivan8ec10772007-03-15 14:44:55 -0700875/*
876 * Decode the error status into strings, deciding whether to always
877 * print * it or not depending on "normal packet errors" vs everything
878 * else. Return 1 if "real" errors, otherwise 0 if only packet
879 * errors, so caller can decide what to print with the string.
880 */
881int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800882{
Bryan O'Sullivan8ec10772007-03-15 14:44:55 -0700883 int iserr = 1;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800884 *buf = '\0';
Bryan O'Sullivan8ec10772007-03-15 14:44:55 -0700885 if (err & INFINIPATH_E_PKTERRS) {
886 if (!(err & ~INFINIPATH_E_PKTERRS))
887 iserr = 0; // if only packet errors.
888 if (ipath_debug & __IPATH_ERRPKTDBG) {
889 if (err & INFINIPATH_E_REBP)
890 strlcat(buf, "EBP ", blen);
891 if (err & INFINIPATH_E_RVCRC)
892 strlcat(buf, "VCRC ", blen);
893 if (err & INFINIPATH_E_RICRC) {
894 strlcat(buf, "CRC ", blen);
895 // clear for check below, so only once
896 err &= INFINIPATH_E_RICRC;
897 }
898 if (err & INFINIPATH_E_RSHORTPKTLEN)
899 strlcat(buf, "rshortpktlen ", blen);
900 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
901 strlcat(buf, "sdroppeddatapkt ", blen);
902 if (err & INFINIPATH_E_SPKTLEN)
903 strlcat(buf, "spktlen ", blen);
904 }
905 if ((err & INFINIPATH_E_RICRC) &&
906 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
907 strlcat(buf, "CRC ", blen);
908 if (!iserr)
909 goto done;
910 }
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800911 if (err & INFINIPATH_E_RHDRLEN)
912 strlcat(buf, "rhdrlen ", blen);
913 if (err & INFINIPATH_E_RBADTID)
914 strlcat(buf, "rbadtid ", blen);
915 if (err & INFINIPATH_E_RBADVERSION)
916 strlcat(buf, "rbadversion ", blen);
917 if (err & INFINIPATH_E_RHDR)
918 strlcat(buf, "rhdr ", blen);
919 if (err & INFINIPATH_E_RLONGPKTLEN)
920 strlcat(buf, "rlongpktlen ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800921 if (err & INFINIPATH_E_RMAXPKTLEN)
922 strlcat(buf, "rmaxpktlen ", blen);
923 if (err & INFINIPATH_E_RMINPKTLEN)
924 strlcat(buf, "rminpktlen ", blen);
Bryan O'Sullivan8ec10772007-03-15 14:44:55 -0700925 if (err & INFINIPATH_E_SMINPKTLEN)
926 strlcat(buf, "sminpktlen ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800927 if (err & INFINIPATH_E_RFORMATERR)
928 strlcat(buf, "rformaterr ", blen);
929 if (err & INFINIPATH_E_RUNSUPVL)
930 strlcat(buf, "runsupvl ", blen);
931 if (err & INFINIPATH_E_RUNEXPCHAR)
932 strlcat(buf, "runexpchar ", blen);
933 if (err & INFINIPATH_E_RIBFLOW)
934 strlcat(buf, "ribflow ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800935 if (err & INFINIPATH_E_SUNDERRUN)
936 strlcat(buf, "sunderrun ", blen);
937 if (err & INFINIPATH_E_SPIOARMLAUNCH)
938 strlcat(buf, "spioarmlaunch ", blen);
939 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
940 strlcat(buf, "sunexperrpktnum ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800941 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
942 strlcat(buf, "sdroppedsmppkt ", blen);
943 if (err & INFINIPATH_E_SMAXPKTLEN)
944 strlcat(buf, "smaxpktlen ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800945 if (err & INFINIPATH_E_SUNSUPVL)
946 strlcat(buf, "sunsupVL ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800947 if (err & INFINIPATH_E_INVALIDADDR)
948 strlcat(buf, "invalidaddr ", blen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800949 if (err & INFINIPATH_E_RRCVEGRFULL)
950 strlcat(buf, "rcvegrfull ", blen);
951 if (err & INFINIPATH_E_RRCVHDRFULL)
952 strlcat(buf, "rcvhdrfull ", blen);
953 if (err & INFINIPATH_E_IBSTATUSCHANGED)
954 strlcat(buf, "ibcstatuschg ", blen);
955 if (err & INFINIPATH_E_RIBLOSTLINK)
956 strlcat(buf, "riblostlink ", blen);
957 if (err & INFINIPATH_E_HARDWARE)
958 strlcat(buf, "hardware ", blen);
959 if (err & INFINIPATH_E_RESET)
960 strlcat(buf, "reset ", blen);
Bryan O'Sullivan8ec10772007-03-15 14:44:55 -0700961done:
962 return iserr;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -0800963}
964
965/**
966 * get_rhf_errstring - decode RHF errors
967 * @err: the err number
968 * @msg: the output buffer
969 * @len: the length of the output buffer
970 *
971 * only used one place now, may want more later
972 */
973static void get_rhf_errstring(u32 err, char *msg, size_t len)
974{
975 /* if no errors, and so don't need to check what's first */
976 *msg = '\0';
977
978 if (err & INFINIPATH_RHF_H_ICRCERR)
979 strlcat(msg, "icrcerr ", len);
980 if (err & INFINIPATH_RHF_H_VCRCERR)
981 strlcat(msg, "vcrcerr ", len);
982 if (err & INFINIPATH_RHF_H_PARITYERR)
983 strlcat(msg, "parityerr ", len);
984 if (err & INFINIPATH_RHF_H_LENERR)
985 strlcat(msg, "lenerr ", len);
986 if (err & INFINIPATH_RHF_H_MTUERR)
987 strlcat(msg, "mtuerr ", len);
988 if (err & INFINIPATH_RHF_H_IHDRERR)
989 /* infinipath hdr checksum error */
990 strlcat(msg, "ipathhdrerr ", len);
991 if (err & INFINIPATH_RHF_H_TIDERR)
992 strlcat(msg, "tiderr ", len);
993 if (err & INFINIPATH_RHF_H_MKERR)
994 /* bad port, offset, etc. */
995 strlcat(msg, "invalid ipathhdr ", len);
996 if (err & INFINIPATH_RHF_H_IBERR)
997 strlcat(msg, "iberr ", len);
998 if (err & INFINIPATH_RHF_L_SWA)
999 strlcat(msg, "swA ", len);
1000 if (err & INFINIPATH_RHF_L_SWB)
1001 strlcat(msg, "swB ", len);
1002}
1003
1004/**
1005 * ipath_get_egrbuf - get an eager buffer
1006 * @dd: the infinipath device
1007 * @bufnum: the eager buffer to get
1008 * @err: unused
1009 *
1010 * must only be called if ipath_pd[port] is known to be allocated
1011 */
1012static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
1013 int err)
1014{
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001015 return dd->ipath_port0_skbinfo ?
1016 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001017}
1018
1019/**
1020 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
1021 * @dd: the infinipath device
1022 * @gfp_mask: the sk_buff SFP mask
1023 */
1024struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1025 gfp_t gfp_mask)
1026{
1027 struct sk_buff *skb;
1028 u32 len;
1029
1030 /*
1031 * Only fully supported way to handle this is to allocate lots
1032 * extra, align as needed, and then do skb_reserve(). That wastes
1033 * a lot of memory... I'll have to hack this into infinipath_copy
1034 * also.
1035 */
1036
1037 /*
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001038 * We need 2 extra bytes for ipath_ether data sent in the
1039 * key header. In order to keep everything dword aligned,
1040 * we'll reserve 4 bytes.
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001041 */
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001042 len = dd->ipath_ibmaxlen + 4;
1043
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001044 if (dd->ipath_flags & IPATH_4BYTE_TID) {
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001045 /* We need a 2KB multiple alignment, and there is no way
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001046 * to do it except to allocate extra and then skb_reserve
1047 * enough to bring it up to the right alignment.
1048 */
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001049 len += 2047;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001050 }
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001051
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001052 skb = __dev_alloc_skb(len, gfp_mask);
1053 if (!skb) {
1054 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1055 len);
1056 goto bail;
1057 }
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001058
1059 skb_reserve(skb, 4);
1060
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001061 if (dd->ipath_flags & IPATH_4BYTE_TID) {
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001062 u32 una = (unsigned long)skb->data & 2047;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001063 if (una)
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001064 skb_reserve(skb, 2048 - una);
1065 }
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001066
1067bail:
1068 return skb;
1069}
1070
Ralph Campbell3d37b9e2006-07-17 18:18:36 -07001071static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1072 u32 eflags,
1073 u32 l,
1074 u32 etail,
1075 u64 *rc)
1076{
1077 char emsg[128];
1078 struct ipath_message_header *hdr;
1079
1080 get_rhf_errstring(eflags, emsg, sizeof emsg);
1081 hdr = (struct ipath_message_header *)&rc[1];
1082 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1083 "tlen=%x opcode=%x egridx=%x: %s\n",
1084 eflags, l,
1085 ipath_hdrget_rcv_type((__le32 *) rc),
1086 ipath_hdrget_length_in_bytes((__le32 *) rc),
1087 be32_to_cpu(hdr->bth[0]) >> 24,
1088 etail, emsg);
1089
1090 /* Count local link integrity errors. */
1091 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1092 u8 n = (dd->ipath_ibcctrl >>
1093 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1094 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1095
1096 if (++dd->ipath_lli_counter > n) {
1097 dd->ipath_lli_counter = 0;
1098 dd->ipath_lli_errors++;
1099 }
1100 }
1101}
1102
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001103/*
1104 * ipath_kreceive - receive a packet
1105 * @dd: the infinipath device
1106 *
1107 * called from interrupt handler for errors or receive interrupt
1108 */
1109void ipath_kreceive(struct ipath_devdata *dd)
1110{
1111 u64 *rc;
1112 void *ebuf;
1113 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1114 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1115 u32 etail = -1, l, hdrqtail;
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001116 struct ipath_message_header *hdr;
Bryan O'Sullivan57abad22006-07-01 04:36:05 -07001117 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001118 static u64 totcalls; /* stats, may eventually remove */
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001119
1120 if (!dd->ipath_hdrqtailptr) {
1121 ipath_dev_err(dd,
1122 "hdrqtailptr not set, can't do receives\n");
1123 goto bail;
1124 }
1125
Bryan O'Sullivanf5f99922006-07-01 04:36:05 -07001126 l = dd->ipath_port0head;
Bryan O'Sullivan57abad22006-07-01 04:36:05 -07001127 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
1128 if (l == hdrqtail)
Ralph Campbell9ca48652007-07-06 12:48:48 -07001129 goto bail;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001130
Bryan O'Sullivan57abad22006-07-01 04:36:05 -07001131reloop:
Bryan O'Sullivanf5f99922006-07-01 04:36:05 -07001132 for (i = 0; l != hdrqtail; i++) {
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001133 u32 qp;
1134 u8 *bthbytes;
1135
1136 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001137 hdr = (struct ipath_message_header *)&rc[1];
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001138 /*
1139 * could make a network order version of IPATH_KD_QP, and
1140 * do the obvious shift before masking to speed this up.
1141 */
1142 qp = ntohl(hdr->bth[1]) & 0xffffff;
1143 bthbytes = (u8 *) hdr->bth;
1144
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001145 eflags = ipath_hdrget_err_flags((__le32 *) rc);
1146 etype = ipath_hdrget_rcv_type((__le32 *) rc);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001147 /* total length */
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001148 tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001149 ebuf = NULL;
1150 if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
1151 /*
1152 * it turns out that the chips uses an eager buffer
1153 * for all non-expected packets, whether it "needs"
1154 * one or not. So always get the index, but don't
1155 * set ebuf (so we try to copy data) unless the
1156 * length requires it.
1157 */
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001158 etail = ipath_hdrget_index((__le32 *) rc);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001159 if (tlen > sizeof(*hdr) ||
1160 etype == RCVHQ_RCV_TYPE_NON_KD)
1161 ebuf = ipath_get_egrbuf(dd, etail, 0);
1162 }
1163
1164 /*
1165 * both tiderr and ipathhdrerr are set for all plain IB
1166 * packets; only ipathhdrerr should be set.
1167 */
1168
1169 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
Bryan O'Sullivan27b678d2006-07-01 04:36:17 -07001170 RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001171 hdr->iph.ver_port_tid_offset) !=
1172 IPS_PROTO_VERSION) {
1173 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1174 "%x\n", etype);
1175 }
1176
Ralph Campbell3d37b9e2006-07-17 18:18:36 -07001177 if (unlikely(eflags))
1178 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
1179 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001180 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
1181 if (dd->ipath_lli_counter)
1182 dd->ipath_lli_counter--;
1183 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1184 "qp=%x), len %x; ignored\n",
1185 etype, bthbytes[0], qp, tlen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001186 }
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001187 else if (etype == RCVHQ_RCV_TYPE_EAGER)
1188 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1189 "qp=%x), len %x; ignored\n",
1190 etype, bthbytes[0], qp, tlen);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001191 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1192 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1193 be32_to_cpu(hdr->bth[0]) & 0xff);
Ralph Campbell3d37b9e2006-07-17 18:18:36 -07001194 else {
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001195 /*
1196 * error packet, type of error unknown.
1197 * Probably type 3, but we don't know, so don't
1198 * even try to print the opcode, etc.
1199 */
1200 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
1201 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
1202 "hdr %llx %llx %llx %llx %llx\n",
1203 etail, tlen, (unsigned long) rc, l,
1204 (unsigned long long) rc[0],
1205 (unsigned long long) rc[1],
1206 (unsigned long long) rc[2],
1207 (unsigned long long) rc[3],
1208 (unsigned long long) rc[4],
1209 (unsigned long long) rc[5]);
1210 }
1211 l += rsize;
1212 if (l >= maxcnt)
1213 l = 0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001214 if (etype != RCVHQ_RCV_TYPE_EXPECTED)
Bryan O'Sullivanf5f99922006-07-01 04:36:05 -07001215 updegr = 1;
1216 /*
1217 * update head regs on last packet, and every 16 packets.
1218 * Reduce bus traffic, while still trying to prevent
1219 * rcvhdrq overflows, for when the queue is nearly full
1220 */
1221 if (l == hdrqtail || (i && !(i&0xf))) {
1222 u64 lval;
Bryan O'Sullivan525d0ca2006-08-25 11:24:39 -07001223 if (l == hdrqtail)
1224 /* request IBA6120 interrupt only on last */
Bryan O'Sullivanf5f99922006-07-01 04:36:05 -07001225 lval = dd->ipath_rhdrhead_intr_off | l;
1226 else
1227 lval = l;
1228 (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
1229 if (updegr) {
1230 (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
1231 etail, 0);
1232 updegr = 0;
1233 }
1234 }
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001235 }
1236
Bryan O'Sullivan57abad22006-07-01 04:36:05 -07001237 if (!dd->ipath_rhdrhead_intr_off && !reloop) {
Bryan O'Sullivan525d0ca2006-08-25 11:24:39 -07001238 /* IBA6110 workaround; we can have a race clearing chip
Bryan O'Sullivan57abad22006-07-01 04:36:05 -07001239 * interrupt with another interrupt about to be delivered,
1240 * and can clear it before it is delivered on the GPIO
1241 * workaround. By doing the extra check here for the
1242 * in-memory tail register updating while we were doing
1243 * earlier packets, we "almost" guarantee we have covered
1244 * that case.
1245 */
1246 u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
1247 if (hqtail != hdrqtail) {
1248 hdrqtail = hqtail;
1249 reloop = 1; /* loop 1 extra time at most */
1250 goto reloop;
1251 }
1252 }
1253
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001254 pkttot += i;
1255
1256 dd->ipath_port0head = l;
1257
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001258 if (pkttot > ipath_stats.sps_maxpkts_call)
1259 ipath_stats.sps_maxpkts_call = pkttot;
1260 ipath_stats.sps_port0pkts += pkttot;
1261 ipath_stats.sps_avgpkts_call =
1262 ipath_stats.sps_port0pkts / ++totcalls;
1263
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001264bail:;
1265}
1266
1267/**
1268 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1269 * @dd: the infinipath device
1270 *
1271 * called whenever our local copy indicates we have run out of send buffers
1272 * NOTE: This can be called from interrupt context by some code
1273 * and from non-interrupt context by ipath_getpiobuf().
1274 */
1275
1276static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1277{
1278 unsigned long flags;
1279 int i;
1280 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1281
1282 /* If the generation (check) bits have changed, then we update the
1283 * busy bit for the corresponding PIO buffer. This algorithm will
1284 * modify positions to the value they already have in some cases
1285 * (i.e., no change), but it's faster than changing only the bits
1286 * that have changed.
1287 *
1288 * We would like to do this atomicly, to avoid spinlocks in the
1289 * critical send path, but that's not really possible, given the
1290 * type of changes, and that this routine could be called on
1291 * multiple cpu's simultaneously, so we lock in this routine only,
1292 * to avoid conflicting updates; all we change is the shadow, and
1293 * it's a single 64 bit memory location, so by definition the update
1294 * is atomic in terms of what other cpu's can see in testing the
1295 * bits. The spin_lock overhead isn't too bad, since it only
1296 * happens when all buffers are in use, so only cpu overhead, not
1297 * latency or bandwidth is affected.
1298 */
1299#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1300 if (!dd->ipath_pioavailregs_dma) {
1301 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1302 return;
1303 }
1304 if (ipath_debug & __IPATH_VERBDBG) {
1305 /* only if packet debug and verbose */
1306 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1307 unsigned long *shadow = dd->ipath_pioavailshadow;
1308
1309 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1310 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1311 "s3=%lx\n",
1312 (unsigned long long) le64_to_cpu(dma[0]),
1313 shadow[0],
1314 (unsigned long long) le64_to_cpu(dma[1]),
1315 shadow[1],
1316 (unsigned long long) le64_to_cpu(dma[2]),
1317 shadow[2],
1318 (unsigned long long) le64_to_cpu(dma[3]),
1319 shadow[3]);
1320 if (piobregs > 4)
1321 ipath_cdbg(
1322 PKT, "2nd group, dma4=%llx shad4=%lx, "
1323 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1324 "d7=%llx s7=%lx\n",
1325 (unsigned long long) le64_to_cpu(dma[4]),
1326 shadow[4],
1327 (unsigned long long) le64_to_cpu(dma[5]),
1328 shadow[5],
1329 (unsigned long long) le64_to_cpu(dma[6]),
1330 shadow[6],
1331 (unsigned long long) le64_to_cpu(dma[7]),
1332 shadow[7]);
1333 }
1334 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1335 for (i = 0; i < piobregs; i++) {
1336 u64 pchbusy, pchg, piov, pnew;
1337 /*
1338 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1339 */
1340 if (i > 3) {
1341 if (i & 1)
1342 piov = le64_to_cpu(
1343 dd->ipath_pioavailregs_dma[i - 1]);
1344 else
1345 piov = le64_to_cpu(
1346 dd->ipath_pioavailregs_dma[i + 1]);
1347 } else
1348 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1349 pchg = _IPATH_ALL_CHECKBITS &
1350 ~(dd->ipath_pioavailshadow[i] ^ piov);
1351 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1352 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1353 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1354 pnew |= piov & pchbusy;
1355 dd->ipath_pioavailshadow[i] = pnew;
1356 }
1357 }
1358 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1359}
1360
1361/**
1362 * ipath_setrcvhdrsize - set the receive header size
1363 * @dd: the infinipath device
1364 * @rhdrsize: the receive header size
1365 *
1366 * called from user init code, and also layered driver init
1367 */
1368int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1369{
1370 int ret = 0;
1371
1372 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1373 if (dd->ipath_rcvhdrsize != rhdrsize) {
1374 dev_info(&dd->pcidev->dev,
1375 "Error: can't set protocol header "
1376 "size %u, already %u\n",
1377 rhdrsize, dd->ipath_rcvhdrsize);
1378 ret = -EAGAIN;
1379 } else
1380 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1381 "size %u\n", dd->ipath_rcvhdrsize);
1382 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1383 (sizeof(u64) / sizeof(u32)))) {
1384 ipath_dbg("Error: can't set protocol header size %u "
1385 "(> max %u)\n", rhdrsize,
1386 dd->ipath_rcvhdrentsize -
1387 (u32) (sizeof(u64) / sizeof(u32)));
1388 ret = -EOVERFLOW;
1389 } else {
1390 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1391 dd->ipath_rcvhdrsize = rhdrsize;
1392 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1393 dd->ipath_rcvhdrsize);
1394 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1395 dd->ipath_rcvhdrsize);
1396 }
1397 return ret;
1398}
1399
1400/**
1401 * ipath_getpiobuf - find an available pio buffer
1402 * @dd: the infinipath device
1403 * @pbufnum: the buffer number is placed here
1404 *
1405 * do appropriate marking as busy, etc.
1406 * returns buffer number if one found (>=0), negative number is error.
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -07001407 * Used by ipath_layer_send
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001408 */
1409u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1410{
1411 int i, j, starti, updated = 0;
1412 unsigned piobcnt, iter;
1413 unsigned long flags;
1414 unsigned long *shadow = dd->ipath_pioavailshadow;
1415 u32 __iomem *buf;
1416
1417 piobcnt = (unsigned)(dd->ipath_piobcnt2k
1418 + dd->ipath_piobcnt4k);
1419 starti = dd->ipath_lastport_piobuf;
1420 iter = piobcnt - starti;
1421 if (dd->ipath_upd_pio_shadow) {
1422 /*
1423 * Minor optimization. If we had no buffers on last call,
1424 * start out by doing the update; continue and do scan even
1425 * if no buffers were updated, to be paranoid
1426 */
1427 ipath_update_pio_bufs(dd);
1428 /* we scanned here, don't do it at end of scan */
1429 updated = 1;
1430 i = starti;
1431 } else
1432 i = dd->ipath_lastpioindex;
1433
1434rescan:
1435 /*
1436 * while test_and_set_bit() is atomic, we do that and then the
1437 * change_bit(), and the pair is not. See if this is the cause
1438 * of the remaining armlaunch errors.
1439 */
1440 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1441 for (j = 0; j < iter; j++, i++) {
1442 if (i >= piobcnt)
1443 i = starti;
1444 /*
1445 * To avoid bus lock overhead, we first find a candidate
1446 * buffer, then do the test and set, and continue if that
1447 * fails.
1448 */
1449 if (test_bit((2 * i) + 1, shadow) ||
1450 test_and_set_bit((2 * i) + 1, shadow))
1451 continue;
1452 /* flip generation bit */
1453 change_bit(2 * i, shadow);
1454 break;
1455 }
1456 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1457
1458 if (j == iter) {
1459 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1460
1461 /*
1462 * first time through; shadow exhausted, but may be real
1463 * buffers available, so go see; if any updated, rescan
1464 * (once)
1465 */
1466 if (!updated) {
1467 ipath_update_pio_bufs(dd);
1468 updated = 1;
1469 i = starti;
1470 goto rescan;
1471 }
1472 dd->ipath_upd_pio_shadow = 1;
1473 /*
1474 * not atomic, but if we lose one once in a while, that's OK
1475 */
1476 ipath_stats.sps_nopiobufs++;
1477 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1478 ipath_dbg(
1479 "%u pio sends with no bufavail; dmacopy: "
1480 "%llx %llx %llx %llx; shadow: "
1481 "%lx %lx %lx %lx\n",
1482 dd->ipath_consec_nopiobuf,
1483 (unsigned long long) le64_to_cpu(dma[0]),
1484 (unsigned long long) le64_to_cpu(dma[1]),
1485 (unsigned long long) le64_to_cpu(dma[2]),
1486 (unsigned long long) le64_to_cpu(dma[3]),
1487 shadow[0], shadow[1], shadow[2],
1488 shadow[3]);
1489 /*
1490 * 4 buffers per byte, 4 registers above, cover rest
1491 * below
1492 */
1493 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1494 (sizeof(shadow[0]) * 4 * 4))
1495 ipath_dbg("2nd group: dmacopy: %llx %llx "
1496 "%llx %llx; shadow: %lx %lx "
1497 "%lx %lx\n",
1498 (unsigned long long)
1499 le64_to_cpu(dma[4]),
1500 (unsigned long long)
1501 le64_to_cpu(dma[5]),
1502 (unsigned long long)
1503 le64_to_cpu(dma[6]),
1504 (unsigned long long)
1505 le64_to_cpu(dma[7]),
1506 shadow[4], shadow[5],
1507 shadow[6], shadow[7]);
1508 }
1509 buf = NULL;
1510 goto bail;
1511 }
1512
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001513 /*
1514 * set next starting place. Since it's just an optimization,
1515 * it doesn't matter who wins on this, so no locking
1516 */
1517 dd->ipath_lastpioindex = i + 1;
1518 if (dd->ipath_upd_pio_shadow)
1519 dd->ipath_upd_pio_shadow = 0;
1520 if (dd->ipath_consec_nopiobuf)
1521 dd->ipath_consec_nopiobuf = 0;
1522 if (i < dd->ipath_piobcnt2k)
1523 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1524 i * dd->ipath_palign);
1525 else
1526 buf = (u32 __iomem *)
1527 (dd->ipath_pio4kbase +
1528 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1529 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1530 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1531 if (pbufnum)
1532 *pbufnum = i;
1533
1534bail:
1535 return buf;
1536}
1537
1538/**
1539 * ipath_create_rcvhdrq - create a receive header queue
1540 * @dd: the infinipath device
1541 * @pd: the port data
1542 *
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001543 * this must be contiguous memory (from an i/o perspective), and must be
1544 * DMA'able (which means for some systems, it will go through an IOMMU,
1545 * or be forced into a low address range).
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001546 */
1547int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1548 struct ipath_portdata *pd)
1549{
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001550 int ret = 0;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001551
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001552 if (!pd->port_rcvhdrq) {
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001553 dma_addr_t phys_hdrqtail;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001554 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001555 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1556 sizeof(u32), PAGE_SIZE);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001557
1558 pd->port_rcvhdrq = dma_alloc_coherent(
1559 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1560 gfp_flags);
1561
1562 if (!pd->port_rcvhdrq) {
1563 ipath_dev_err(dd, "attempt to allocate %d bytes "
1564 "for port %u rcvhdrq failed\n",
1565 amt, pd->port_port);
1566 ret = -ENOMEM;
1567 goto bail;
1568 }
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001569 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1570 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
1571 if (!pd->port_rcvhdrtail_kvaddr) {
1572 ipath_dev_err(dd, "attempt to allocate 1 page "
1573 "for port %u rcvhdrqtailaddr failed\n",
1574 pd->port_port);
1575 ret = -ENOMEM;
Bryan O'Sullivan221e3192006-09-28 08:59:58 -07001576 dma_free_coherent(&dd->pcidev->dev, amt,
1577 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
1578 pd->port_rcvhdrq = NULL;
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001579 goto bail;
1580 }
1581 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001582
1583 pd->port_rcvhdrq_size = amt;
1584
1585 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1586 "for port %u rcvhdr Q\n",
1587 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1588 (unsigned long) pd->port_rcvhdrq_phys,
1589 (unsigned long) pd->port_rcvhdrq_size,
1590 pd->port_port);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001591
1592 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
1593 pd->port_port,
1594 (unsigned long long) phys_hdrqtail);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001595 }
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001596 else
1597 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1598 "hdrtailaddr@%p %llx physical\n",
1599 pd->port_port, pd->port_rcvhdrq,
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07001600 (unsigned long long) pd->port_rcvhdrq_phys,
1601 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1602 pd->port_rcvhdrqtailaddr_phys);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001603
1604 /* clear for security and sanity on each use */
1605 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
Bryan O'Sullivan076fafc2006-09-28 09:00:12 -07001606 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001607
1608 /*
1609 * tell chip each time we init it, even if we are re-using previous
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001610 * memory (we zero the register at process close)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001611 */
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001612 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1613 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001614 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1615 pd->port_port, pd->port_rcvhdrq_phys);
1616
1617 ret = 0;
1618bail:
1619 return ret;
1620}
1621
1622int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1623 u64 bits_to_wait_for, u64 * valp)
1624{
1625 unsigned long timeout;
1626 u64 lastval, val;
1627 int ret;
1628
1629 lastval = ipath_read_kreg64(dd, reg_id);
1630 /* wait a ridiculously long time */
1631 timeout = jiffies + msecs_to_jiffies(5);
1632 do {
1633 val = ipath_read_kreg64(dd, reg_id);
1634 /* set so they have something, even on failures. */
1635 *valp = val;
1636 if ((val & bits_to_wait_for) == bits_to_wait_for) {
1637 ret = 0;
1638 break;
1639 }
1640 if (val != lastval)
1641 ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
1642 "waiting for %llx bits\n",
1643 (unsigned long long) lastval,
1644 (unsigned long long) val,
1645 (unsigned long long) bits_to_wait_for);
1646 cond_resched();
1647 if (time_after(jiffies, timeout)) {
1648 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1649 "got %llx\n",
1650 (unsigned long long) bits_to_wait_for,
1651 reg_id, (unsigned long long) *valp);
1652 ret = -ENODEV;
1653 break;
1654 }
1655 } while (1);
1656
1657 return ret;
1658}
1659
1660/**
1661 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1662 * @dd: the infinipath device
1663 *
1664 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1665 * away indicating the last command has completed. It doesn't return data
1666 */
1667int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1668{
1669 unsigned long timeout;
1670 u64 val;
1671 int ret;
1672
1673 /* wait a ridiculously long time */
1674 timeout = jiffies + msecs_to_jiffies(5);
1675 do {
1676 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1677 if (!(val & IPATH_MDIO_CMDVALID)) {
1678 ret = 0;
1679 break;
1680 }
1681 cond_resched();
1682 if (time_after(jiffies, timeout)) {
1683 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1684 (unsigned long long) val);
1685 ret = -ENODEV;
1686 break;
1687 }
1688 } while (1);
1689
1690 return ret;
1691}
1692
Dave Olson93800682007-06-18 14:24:41 -07001693
1694/*
1695 * Flush all sends that might be in the ready to send state, as well as any
1696 * that are in the process of being sent. Used whenever we need to be
1697 * sure the send side is idle. Cleans up all buffer state by canceling
1698 * all pio buffers, and issuing an abort, which cleans up anything in the
1699 * launch fifo. The cancel is superfluous on some chip versions, but
1700 * it's safer to always do it.
1701 * PIOAvail bits are updated by the chip as if normal send had happened.
1702 */
Dave Olson3810f2a2007-07-20 14:23:37 -07001703void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
Dave Olson93800682007-06-18 14:24:41 -07001704{
1705 ipath_dbg("Cancelling all in-progress send buffers\n");
1706 dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
1707 /*
1708 * the abort bit is auto-clearing. We read scratch to be sure
1709 * that cancels and the abort have taken effect in the chip.
1710 */
1711 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1712 INFINIPATH_S_ABORT);
1713 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1714 ipath_disarm_piobufs(dd, 0,
1715 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
Dave Olson3810f2a2007-07-20 14:23:37 -07001716 if (restore_sendctrl) /* else done by caller later */
1717 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1718 dd->ipath_sendctrl);
Dave Olson93800682007-06-18 14:24:41 -07001719
1720 /* and again, be sure all have hit the chip */
1721 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1722}
1723
1724
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001725static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001726{
1727 static const char *what[4] = {
1728 [0] = "DOWN",
1729 [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
1730 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1731 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1732 };
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001733 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
1734 INFINIPATH_IBCC_LINKCMD_MASK;
1735
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -07001736 ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001737 "is %s\n", dd->ipath_unit,
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001738 what[linkcmd],
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001739 ipath_ibcstatus_str[
1740 (ipath_read_kreg64
1741 (dd, dd->ipath_kregs->kr_ibcstatus) >>
1742 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1743 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07001744 /* flush all queued sends when going to DOWN or INIT, to be sure that
Bryan O'Sullivan0fd41362006-08-25 11:24:34 -07001745 * they don't block MAD packets */
Dave Olson93800682007-06-18 14:24:41 -07001746 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT)
Dave Olson3810f2a2007-07-20 14:23:37 -07001747 ipath_cancel_sends(dd, 1);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001748
1749 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1750 dd->ipath_ibcctrl | which);
1751}
1752
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001753int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1754{
1755 u32 lstate;
1756 int ret;
1757
1758 switch (newstate) {
1759 case IPATH_IB_LINKDOWN:
1760 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
1761 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1762 /* don't wait */
1763 ret = 0;
1764 goto bail;
1765
1766 case IPATH_IB_LINKDOWN_SLEEP:
1767 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
1768 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1769 /* don't wait */
1770 ret = 0;
1771 goto bail;
1772
1773 case IPATH_IB_LINKDOWN_DISABLE:
1774 ipath_set_ib_lstate(dd,
1775 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1776 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1777 /* don't wait */
1778 ret = 0;
1779 goto bail;
1780
1781 case IPATH_IB_LINKINIT:
1782 if (dd->ipath_flags & IPATH_LINKINIT) {
1783 ret = 0;
1784 goto bail;
1785 }
1786 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
1787 INFINIPATH_IBCC_LINKCMD_SHIFT);
1788 lstate = IPATH_LINKINIT;
1789 break;
1790
1791 case IPATH_IB_LINKARM:
1792 if (dd->ipath_flags & IPATH_LINKARMED) {
1793 ret = 0;
1794 goto bail;
1795 }
1796 if (!(dd->ipath_flags &
1797 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
1798 ret = -EINVAL;
1799 goto bail;
1800 }
1801 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
1802 INFINIPATH_IBCC_LINKCMD_SHIFT);
1803 /*
1804 * Since the port can transition to ACTIVE by receiving
1805 * a non VL 15 packet, wait for either state.
1806 */
1807 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
1808 break;
1809
1810 case IPATH_IB_LINKACTIVE:
1811 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1812 ret = 0;
1813 goto bail;
1814 }
1815 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1816 ret = -EINVAL;
1817 goto bail;
1818 }
1819 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
1820 INFINIPATH_IBCC_LINKCMD_SHIFT);
1821 lstate = IPATH_LINKACTIVE;
1822 break;
1823
Bryan O'Sullivan946db672007-03-15 14:44:45 -07001824 case IPATH_IB_LINK_LOOPBACK:
1825 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
1826 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1827 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1828 dd->ipath_ibcctrl);
1829 ret = 0;
1830 goto bail; // no state change to wait for
1831
1832 case IPATH_IB_LINK_EXTERNAL:
1833 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
1834 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1835 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1836 dd->ipath_ibcctrl);
1837 ret = 0;
1838 goto bail; // no state change to wait for
1839
Bryan O'Sullivan34b2aaf2006-08-25 11:24:32 -07001840 default:
1841 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1842 ret = -EINVAL;
1843 goto bail;
1844 }
1845 ret = ipath_wait_linkstate(dd, lstate, 2000);
1846
1847bail:
1848 return ret;
1849}
1850
1851/**
1852 * ipath_set_mtu - set the MTU
1853 * @dd: the infinipath device
1854 * @arg: the new MTU
1855 *
1856 * we can handle "any" incoming size, the issue here is whether we
1857 * need to restrict our outgoing size. For now, we don't do any
1858 * sanity checking on this, and we don't deal with what happens to
1859 * programs that are already running when the size changes.
1860 * NOTE: changing the MTU will usually cause the IBC to go back to
1861 * link initialize (IPATH_IBSTATE_INIT) state...
1862 */
1863int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1864{
1865 u32 piosize;
1866 int changed = 0;
1867 int ret;
1868
1869 /*
1870 * mtu is IB data payload max. It's the largest power of 2 less
1871 * than piosize (or even larger, since it only really controls the
1872 * largest we can receive; we can send the max of the mtu and
1873 * piosize). We check that it's one of the valid IB sizes.
1874 */
1875 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1876 arg != 4096) {
1877 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1878 ret = -EINVAL;
1879 goto bail;
1880 }
1881 if (dd->ipath_ibmtu == arg) {
1882 ret = 0; /* same as current */
1883 goto bail;
1884 }
1885
1886 piosize = dd->ipath_ibmaxlen;
1887 dd->ipath_ibmtu = arg;
1888
1889 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1890 /* Only if it's not the initial value (or reset to it) */
1891 if (piosize != dd->ipath_init_ibmaxlen) {
1892 dd->ipath_ibmaxlen = piosize;
1893 changed = 1;
1894 }
1895 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1896 piosize = arg + IPATH_PIO_MAXIBHDR;
1897 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
1898 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1899 arg);
1900 dd->ipath_ibmaxlen = piosize;
1901 changed = 1;
1902 }
1903
1904 if (changed) {
1905 /*
1906 * set the IBC maxpktlength to the size of our pio
1907 * buffers in words
1908 */
1909 u64 ibc = dd->ipath_ibcctrl;
1910 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1911 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
1912
1913 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1914 dd->ipath_ibmaxlen = piosize;
1915 piosize /= sizeof(u32); /* in words */
1916 /*
1917 * for ICRC, which we only send in diag test pkt mode, and
1918 * we don't need to worry about that for mtu
1919 */
1920 piosize += 1;
1921
1922 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1923 dd->ipath_ibcctrl = ibc;
1924 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1925 dd->ipath_ibcctrl);
1926 dd->ipath_f_tidtemplate(dd);
1927 }
1928
1929 ret = 0;
1930
1931bail:
1932 return ret;
1933}
1934
1935int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1936{
1937 dd->ipath_lid = arg;
1938 dd->ipath_lmc = lmc;
1939
1940 return 0;
1941}
1942
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08001943
1944/**
1945 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
1946 * @dd: the infinipath device
1947 * @regno: the register number to write
1948 * @port: the port containing the register
1949 * @value: the value to write
1950 *
1951 * Registers that vary with the chip implementation constants (port)
1952 * use this routine.
1953 */
1954void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1955 unsigned port, u64 value)
1956{
1957 u16 where;
1958
1959 if (port < dd->ipath_portcnt &&
1960 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1961 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1962 where = regno + port;
1963 else
1964 where = -1;
1965
1966 ipath_write_kreg(dd, where, value);
1967}
1968
Michael Albaugh82466f02007-05-16 15:45:09 -07001969/*
1970 * Following deal with the "obviously simple" task of overriding the state
1971 * of the LEDS, which normally indicate link physical and logical status.
1972 * The complications arise in dealing with different hardware mappings
1973 * and the board-dependent routine being called from interrupts.
1974 * and then there's the requirement to _flash_ them.
1975 */
1976#define LED_OVER_FREQ_SHIFT 8
1977#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
1978/* Below is "non-zero" to force override, but both actual LEDs are off */
1979#define LED_OVER_BOTH_OFF (8)
1980
Roland Dreierda9aec72007-07-17 18:37:43 -07001981static void ipath_run_led_override(unsigned long opaque)
Michael Albaugh82466f02007-05-16 15:45:09 -07001982{
1983 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
1984 int timeoff;
1985 int pidx;
1986 u64 lstate, ltstate, val;
1987
1988 if (!(dd->ipath_flags & IPATH_INITTED))
1989 return;
1990
1991 pidx = dd->ipath_led_override_phase++ & 1;
1992 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
1993 timeoff = dd->ipath_led_override_timeoff;
1994
1995 /*
1996 * below potentially restores the LED values per current status,
1997 * should also possibly setup the traffic-blink register,
1998 * but leave that to per-chip functions.
1999 */
2000 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2001 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
2002 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
2003 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
2004 INFINIPATH_IBCS_LINKSTATE_MASK;
2005
2006 dd->ipath_f_setextled(dd, lstate, ltstate);
2007 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2008}
2009
2010void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2011{
2012 int timeoff, freq;
2013
2014 if (!(dd->ipath_flags & IPATH_INITTED))
2015 return;
2016
2017 /* First check if we are blinking. If not, use 1HZ polling */
2018 timeoff = HZ;
2019 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2020
2021 if (freq) {
2022 /* For blink, set each phase from one nybble of val */
2023 dd->ipath_led_override_vals[0] = val & 0xF;
2024 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2025 timeoff = (HZ << 4)/freq;
2026 } else {
2027 /* Non-blink set both phases the same. */
2028 dd->ipath_led_override_vals[0] = val & 0xF;
2029 dd->ipath_led_override_vals[1] = val & 0xF;
2030 }
2031 dd->ipath_led_override_timeoff = timeoff;
2032
2033 /*
2034 * If the timer has not already been started, do so. Use a "quick"
2035 * timeout so the function will be called soon, to look at our request.
2036 */
2037 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2038 /* Need to start timer */
2039 init_timer(&dd->ipath_led_override_timer);
2040 dd->ipath_led_override_timer.function =
2041 ipath_run_led_override;
2042 dd->ipath_led_override_timer.data = (unsigned long) dd;
2043 dd->ipath_led_override_timer.expires = jiffies + 1;
2044 add_timer(&dd->ipath_led_override_timer);
2045 } else {
2046 atomic_dec(&dd->ipath_led_override_timer_active);
2047 }
2048}
2049
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002050/**
2051 * ipath_shutdown_device - shut down a device
2052 * @dd: the infinipath device
2053 *
2054 * This is called to make the device quiet when we are about to
2055 * unload the driver, and also when the device is administratively
2056 * disabled. It does not free any data structures.
2057 * Everything it does has to be setup again by ipath_init_chip(dd,1)
2058 */
2059void ipath_shutdown_device(struct ipath_devdata *dd)
2060{
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002061 ipath_dbg("Shutting down the device\n");
2062
2063 dd->ipath_flags |= IPATH_LINKUNK;
2064 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2065 IPATH_LINKINIT | IPATH_LINKARMED |
2066 IPATH_LINKACTIVE);
2067 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2068 IPATH_STATUS_IB_READY);
2069
2070 /* mask interrupts, but not errors */
2071 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2072
2073 dd->ipath_rcvctrl = 0;
2074 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2075 dd->ipath_rcvctrl);
2076
2077 /*
2078 * gracefully stop all sends allowing any in progress to trickle out
2079 * first.
2080 */
2081 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
2082 /* flush it */
Roland Dreier44f8e3f2006-12-12 11:50:20 -08002083 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002084 /*
2085 * enough for anything that's going to trickle out to have actually
2086 * done so.
2087 */
2088 udelay(5);
2089
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002090 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
2091 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
Dave Olson3810f2a2007-07-20 14:23:37 -07002092 ipath_cancel_sends(dd, 0);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002093
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002094 /* disable IBC */
2095 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2096 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
Bryan O'Sullivana40f55f2006-07-01 04:36:00 -07002097 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002098
2099 /*
2100 * clear SerdesEnable and turn the leds off; do this here because
2101 * we are unloading, so don't count on interrupts to move along
2102 * Turn the LEDs off explictly for the same reason.
2103 */
2104 dd->ipath_f_quiet_serdes(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002105
2106 if (dd->ipath_stats_timer_active) {
2107 del_timer_sync(&dd->ipath_stats_timer);
2108 dd->ipath_stats_timer_active = 0;
2109 }
2110
2111 /*
2112 * clear all interrupts and errors, so that the next time the driver
2113 * is loaded or device is enabled, we know that whatever is set
2114 * happened while we were unloaded
2115 */
2116 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2117 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2118 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2119 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
Michael Albaughaecd3b52007-05-17 07:26:28 -07002120
2121 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2122 ipath_update_eeprom_log(dd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002123}
2124
2125/**
2126 * ipath_free_pddata - free a port's allocated data
2127 * @dd: the infinipath device
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002128 * @pd: the portdata structure
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002129 *
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002130 * free up any allocated data for a port
2131 * This should not touch anything that would affect a simultaneous
2132 * re-allocation of port data, because it is called after ipath_mutex
2133 * is released (and can be called from reinit as well).
2134 * It should never change any chip state, or global driver state.
2135 * (The only exception to global state is freeing the port0 port0_skbs.)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002136 */
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002137void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002138{
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002139 if (!pd)
2140 return;
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002141
2142 if (pd->port_rcvhdrq) {
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002143 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2144 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2145 (unsigned long) pd->port_rcvhdrq_size);
2146 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2147 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2148 pd->port_rcvhdrq = NULL;
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002149 if (pd->port_rcvhdrtail_kvaddr) {
2150 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
Bryan O'Sullivan076fafc2006-09-28 09:00:12 -07002151 pd->port_rcvhdrtail_kvaddr,
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002152 pd->port_rcvhdrqtailaddr_phys);
2153 pd->port_rcvhdrtail_kvaddr = NULL;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002154 }
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002155 }
2156 if (pd->port_port && pd->port_rcvegrbuf) {
2157 unsigned e;
2158
2159 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2160 void *base = pd->port_rcvegrbuf[e];
2161 size_t size = pd->port_rcvegrbuf_size;
2162
2163 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2164 "chunk %u/%u\n", base,
2165 (unsigned long) size,
2166 e, pd->port_rcvegrbuf_chunks);
2167 dma_free_coherent(&dd->pcidev->dev, size,
2168 base, pd->port_rcvegrbuf_phys[e]);
2169 }
Bryan O'Sullivan9929b0f2006-09-28 08:59:59 -07002170 kfree(pd->port_rcvegrbuf);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002171 pd->port_rcvegrbuf = NULL;
Bryan O'Sullivan9929b0f2006-09-28 08:59:59 -07002172 kfree(pd->port_rcvegrbuf_phys);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002173 pd->port_rcvegrbuf_phys = NULL;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002174 pd->port_rcvegrbuf_chunks = 0;
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07002175 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002176 unsigned e;
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07002177 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002178
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07002179 dd->ipath_port0_skbinfo = NULL;
2180 ipath_cdbg(VERBOSE, "free closed port %d "
2181 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2182 skbinfo);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002183 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
Bryan O'Sullivan1fd3b402006-09-28 09:00:13 -07002184 if (skbinfo[e].skb) {
2185 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2186 dd->ipath_ibmaxlen,
2187 PCI_DMA_FROMDEVICE);
2188 dev_kfree_skb(skbinfo[e].skb);
2189 }
2190 vfree(skbinfo);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002191 }
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002192 kfree(pd->port_tid_pg_list);
Bryan O'Sullivan9929b0f2006-09-28 08:59:59 -07002193 vfree(pd->subport_uregbase);
2194 vfree(pd->subport_rcvegrbuf);
2195 vfree(pd->subport_rcvhdr_base);
Bryan O'Sullivanf37bda92006-07-01 04:36:03 -07002196 kfree(pd);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002197}
2198
Roland Dreierac2ae4c2006-04-19 11:40:12 -07002199static int __init infinipath_init(void)
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002200{
2201 int ret;
2202
Bryan O'Sullivan39c0d0b2007-03-15 14:44:52 -07002203 if (ipath_debug & __IPATH_DBG)
2204 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002205
2206 /*
2207 * These must be called before the driver is registered with
2208 * the PCI subsystem.
2209 */
2210 idr_init(&unit_table);
2211 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2212 ret = -ENOMEM;
2213 goto bail;
2214 }
2215
2216 ret = pci_register_driver(&ipath_driver);
2217 if (ret < 0) {
2218 printk(KERN_ERR IPATH_DRV_NAME
2219 ": Unable to register driver: error %d\n", -ret);
2220 goto bail_unit;
2221 }
2222
2223 ret = ipath_driver_create_group(&ipath_driver.driver);
2224 if (ret < 0) {
2225 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
2226 "sysfs entries: error %d\n", -ret);
2227 goto bail_pci;
2228 }
2229
2230 ret = ipath_init_ipathfs();
2231 if (ret < 0) {
2232 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2233 "ipathfs: error %d\n", -ret);
2234 goto bail_group;
2235 }
2236
2237 goto bail;
2238
2239bail_group:
2240 ipath_driver_remove_group(&ipath_driver.driver);
2241
2242bail_pci:
2243 pci_unregister_driver(&ipath_driver);
2244
2245bail_unit:
2246 idr_destroy(&unit_table);
2247
2248bail:
2249 return ret;
2250}
2251
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002252static void __exit infinipath_cleanup(void)
2253{
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002254 ipath_exit_ipathfs();
2255
2256 ipath_driver_remove_group(&ipath_driver.driver);
2257
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002258 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2259 pci_unregister_driver(&ipath_driver);
2260
2261 idr_destroy(&unit_table);
2262}
2263
2264/**
2265 * ipath_reset_device - reset the chip if possible
2266 * @unit: the device to reset
2267 *
2268 * Whether or not reset is successful, we attempt to re-initialize the chip
2269 * (that is, much like a driver unload/reload). We clear the INITTED flag
2270 * so that the various entry points will fail until we reinitialize. For
2271 * now, we only allow this if no user ports are open that use chip resources
2272 */
2273int ipath_reset_device(int unit)
2274{
2275 int ret, i;
2276 struct ipath_devdata *dd = ipath_lookup(unit);
2277
2278 if (!dd) {
2279 ret = -ENODEV;
2280 goto bail;
2281 }
2282
Michael Albaugh82466f02007-05-16 15:45:09 -07002283 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2284 /* Need to stop LED timer, _then_ shut off LEDs */
2285 del_timer_sync(&dd->ipath_led_override_timer);
2286 atomic_set(&dd->ipath_led_override_timer_active, 0);
2287 }
2288
2289 /* Shut off LEDs after we are sure timer is not running */
2290 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2291 dd->ipath_f_setextled(dd, 0, 0);
2292
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002293 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2294
2295 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2296 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2297 "not initialized or not present\n", unit);
2298 ret = -ENXIO;
2299 goto bail;
2300 }
2301
2302 if (dd->ipath_pd)
Bryan O'Sullivan23e86a42006-04-24 14:22:59 -07002303 for (i = 1; i < dd->ipath_cfgports; i++) {
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002304 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
2305 ipath_dbg("unit %u port %d is in use "
2306 "(PID %u cmd %s), can't reset\n",
2307 unit, i,
2308 dd->ipath_pd[i]->port_pid,
2309 dd->ipath_pd[i]->port_comm);
2310 ret = -EBUSY;
2311 goto bail;
2312 }
2313 }
2314
2315 dd->ipath_flags &= ~IPATH_INITTED;
2316 ret = dd->ipath_f_reset(dd);
2317 if (ret != 1)
2318 ipath_dbg("reset was not successful\n");
2319 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
2320 unit);
2321 ret = ipath_init_chip(dd, 1);
2322 if (ret)
2323 ipath_dev_err(dd, "Reinitialize unit %u after "
2324 "reset failed with %d\n", unit, ret);
2325 else
2326 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2327 "resetting\n", unit);
2328
2329bail:
2330 return ret;
2331}
2332
Bryan O'Sullivan30fc5c32006-08-25 11:24:48 -07002333int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2334{
2335 u64 val;
2336 if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
2337 return -1;
2338 }
2339 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2340 dd->ipath_rx_pol_inv = new_pol_inv;
2341 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2342 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
Roland Dreier3cd96562006-09-22 15:22:46 -07002343 INFINIPATH_XGXS_RX_POL_SHIFT);
2344 val |= ((u64)dd->ipath_rx_pol_inv) <<
2345 INFINIPATH_XGXS_RX_POL_SHIFT;
Bryan O'Sullivan30fc5c32006-08-25 11:24:48 -07002346 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2347 }
2348 return 0;
2349}
Bryan O'Sullivan7bb206e2006-03-29 15:23:24 -08002350module_init(infinipath_init);
2351module_exit(infinipath_cleanup);