blob: d8463b1c3df3fe97c459feff6843de56425b028a [file] [log] [blame]
David S. Millera3138df2007-10-09 01:54:01 -07001/* niu.c: Neptune ethernet driver.
2 *
David S. Millerbe0c0072008-05-04 01:34:31 -07003 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
David S. Millera3138df2007-10-09 01:54:01 -07004 */
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/pci.h>
9#include <linux/dma-mapping.h>
10#include <linux/netdevice.h>
11#include <linux/ethtool.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/delay.h>
15#include <linux/bitops.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
18#include <linux/if_vlan.h>
19#include <linux/ip.h>
20#include <linux/in.h>
21#include <linux/ipv6.h>
22#include <linux/log2.h>
23#include <linux/jiffies.h>
24#include <linux/crc32.h>
25
26#include <linux/io.h>
27
28#ifdef CONFIG_SPARC64
29#include <linux/of_device.h>
30#endif
31
32#include "niu.h"
33
34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": "
David S. Millerbe0c0072008-05-04 01:34:31 -070036#define DRV_MODULE_VERSION "0.9"
37#define DRV_MODULE_RELDATE "May 4, 2008"
David S. Millera3138df2007-10-09 01:54:01 -070038
39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43MODULE_DESCRIPTION("NIU ethernet driver");
44MODULE_LICENSE("GPL");
45MODULE_VERSION(DRV_MODULE_VERSION);
46
47#ifndef DMA_44BIT_MASK
48#define DMA_44BIT_MASK 0x00000fffffffffffULL
49#endif
50
51#ifndef readq
52static u64 readq(void __iomem *reg)
53{
David S. Millere23a59e2008-11-12 14:32:54 -080054 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
David S. Millera3138df2007-10-09 01:54:01 -070055}
56
57static void writeq(u64 val, void __iomem *reg)
58{
59 writel(val & 0xffffffff, reg);
60 writel(val >> 32, reg + 0x4UL);
61}
62#endif
63
64static struct pci_device_id niu_pci_tbl[] = {
65 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
66 {}
67};
68
69MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
70
71#define NIU_TX_TIMEOUT (5 * HZ)
72
73#define nr64(reg) readq(np->regs + (reg))
74#define nw64(reg, val) writeq((val), np->regs + (reg))
75
76#define nr64_mac(reg) readq(np->mac_regs + (reg))
77#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
78
79#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
80#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
81
82#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
83#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
84
85#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
86#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
87
88#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89
90static int niu_debug;
91static int debug = -1;
92module_param(debug, int, 0);
93MODULE_PARM_DESC(debug, "NIU debug level");
94
95#define niudbg(TYPE, f, a...) \
96do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97 printk(KERN_DEBUG PFX f, ## a); \
98} while (0)
99
100#define niuinfo(TYPE, f, a...) \
101do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102 printk(KERN_INFO PFX f, ## a); \
103} while (0)
104
105#define niuwarn(TYPE, f, a...) \
106do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107 printk(KERN_WARNING PFX f, ## a); \
108} while (0)
109
110#define niu_lock_parent(np, flags) \
111 spin_lock_irqsave(&np->parent->lock, flags)
112#define niu_unlock_parent(np, flags) \
113 spin_unlock_irqrestore(&np->parent->lock, flags)
114
Matheos Worku5fbd7e22008-02-28 21:25:43 -0800115static int serdes_init_10g_serdes(struct niu *np);
116
David S. Millera3138df2007-10-09 01:54:01 -0700117static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
118 u64 bits, int limit, int delay)
119{
120 while (--limit >= 0) {
121 u64 val = nr64_mac(reg);
122
123 if (!(val & bits))
124 break;
125 udelay(delay);
126 }
127 if (limit < 0)
128 return -ENODEV;
129 return 0;
130}
131
132static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
133 u64 bits, int limit, int delay,
134 const char *reg_name)
135{
136 int err;
137
138 nw64_mac(reg, bits);
139 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
140 if (err)
141 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
142 "would not clear, val[%llx]\n",
143 np->dev->name, (unsigned long long) bits, reg_name,
144 (unsigned long long) nr64_mac(reg));
145 return err;
146}
147
148#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
151})
152
153static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
154 u64 bits, int limit, int delay)
155{
156 while (--limit >= 0) {
157 u64 val = nr64_ipp(reg);
158
159 if (!(val & bits))
160 break;
161 udelay(delay);
162 }
163 if (limit < 0)
164 return -ENODEV;
165 return 0;
166}
167
168static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
169 u64 bits, int limit, int delay,
170 const char *reg_name)
171{
172 int err;
173 u64 val;
174
175 val = nr64_ipp(reg);
176 val |= bits;
177 nw64_ipp(reg, val);
178
179 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
180 if (err)
181 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
182 "would not clear, val[%llx]\n",
183 np->dev->name, (unsigned long long) bits, reg_name,
184 (unsigned long long) nr64_ipp(reg));
185 return err;
186}
187
188#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
191})
192
193static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194 u64 bits, int limit, int delay)
195{
196 while (--limit >= 0) {
197 u64 val = nr64(reg);
198
199 if (!(val & bits))
200 break;
201 udelay(delay);
202 }
203 if (limit < 0)
204 return -ENODEV;
205 return 0;
206}
207
208#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
211})
212
213static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
214 u64 bits, int limit, int delay,
215 const char *reg_name)
216{
217 int err;
218
219 nw64(reg, bits);
220 err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
221 if (err)
222 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
223 "would not clear, val[%llx]\n",
224 np->dev->name, (unsigned long long) bits, reg_name,
225 (unsigned long long) nr64(reg));
226 return err;
227}
228
229#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
232})
233
234static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
235{
236 u64 val = (u64) lp->timer;
237
238 if (on)
239 val |= LDG_IMGMT_ARM;
240
241 nw64(LDG_IMGMT(lp->ldg_num), val);
242}
243
244static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
245{
246 unsigned long mask_reg, bits;
247 u64 val;
248
249 if (ldn < 0 || ldn > LDN_MAX)
250 return -EINVAL;
251
252 if (ldn < 64) {
253 mask_reg = LD_IM0(ldn);
254 bits = LD_IM0_MASK;
255 } else {
256 mask_reg = LD_IM1(ldn - 64);
257 bits = LD_IM1_MASK;
258 }
259
260 val = nr64(mask_reg);
261 if (on)
262 val &= ~bits;
263 else
264 val |= bits;
265 nw64(mask_reg, val);
266
267 return 0;
268}
269
270static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
271{
272 struct niu_parent *parent = np->parent;
273 int i;
274
275 for (i = 0; i <= LDN_MAX; i++) {
276 int err;
277
278 if (parent->ldg_map[i] != lp->ldg_num)
279 continue;
280
281 err = niu_ldn_irq_enable(np, i, on);
282 if (err)
283 return err;
284 }
285 return 0;
286}
287
288static int niu_enable_interrupts(struct niu *np, int on)
289{
290 int i;
291
292 for (i = 0; i < np->num_ldg; i++) {
293 struct niu_ldg *lp = &np->ldg[i];
294 int err;
295
296 err = niu_enable_ldn_in_ldg(np, lp, on);
297 if (err)
298 return err;
299 }
300 for (i = 0; i < np->num_ldg; i++)
301 niu_ldg_rearm(np, &np->ldg[i], on);
302
303 return 0;
304}
305
306static u32 phy_encode(u32 type, int port)
307{
308 return (type << (port * 2));
309}
310
311static u32 phy_decode(u32 val, int port)
312{
313 return (val >> (port * 2)) & PORT_TYPE_MASK;
314}
315
316static int mdio_wait(struct niu *np)
317{
318 int limit = 1000;
319 u64 val;
320
321 while (--limit > 0) {
322 val = nr64(MIF_FRAME_OUTPUT);
323 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
324 return val & MIF_FRAME_OUTPUT_DATA;
325
326 udelay(10);
327 }
328
329 return -ENODEV;
330}
331
332static int mdio_read(struct niu *np, int port, int dev, int reg)
333{
334 int err;
335
336 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
337 err = mdio_wait(np);
338 if (err < 0)
339 return err;
340
341 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
342 return mdio_wait(np);
343}
344
345static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
346{
347 int err;
348
349 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
350 err = mdio_wait(np);
351 if (err < 0)
352 return err;
353
354 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
355 err = mdio_wait(np);
356 if (err < 0)
357 return err;
358
359 return 0;
360}
361
362static int mii_read(struct niu *np, int port, int reg)
363{
364 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
365 return mdio_wait(np);
366}
367
368static int mii_write(struct niu *np, int port, int reg, int data)
369{
370 int err;
371
372 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
373 err = mdio_wait(np);
374 if (err < 0)
375 return err;
376
377 return 0;
378}
379
380static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
381{
382 int err;
383
384 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
385 ESR2_TI_PLL_TX_CFG_L(channel),
386 val & 0xffff);
387 if (!err)
388 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389 ESR2_TI_PLL_TX_CFG_H(channel),
390 val >> 16);
391 return err;
392}
393
394static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
395{
396 int err;
397
398 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
399 ESR2_TI_PLL_RX_CFG_L(channel),
400 val & 0xffff);
401 if (!err)
402 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
403 ESR2_TI_PLL_RX_CFG_H(channel),
404 val >> 16);
405 return err;
406}
407
408/* Mode is always 10G fiber. */
409static int serdes_init_niu(struct niu *np)
410{
411 struct niu_link_config *lp = &np->link_config;
412 u32 tx_cfg, rx_cfg;
413 unsigned long i;
414
415 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
416 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
417 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
418 PLL_RX_CFG_EQ_LP_ADAPTIVE);
419
420 if (lp->loopback_mode == LOOPBACK_PHY) {
421 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
422
423 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
424 ESR2_TI_PLL_TEST_CFG_L, test_cfg);
425
426 tx_cfg |= PLL_TX_CFG_ENTEST;
427 rx_cfg |= PLL_RX_CFG_ENTEST;
428 }
429
430 /* Initialize all 4 lanes of the SERDES. */
431 for (i = 0; i < 4; i++) {
432 int err = esr2_set_tx_cfg(np, i, tx_cfg);
433 if (err)
434 return err;
435 }
436
437 for (i = 0; i < 4; i++) {
438 int err = esr2_set_rx_cfg(np, i, rx_cfg);
439 if (err)
440 return err;
441 }
442
443 return 0;
444}
445
446static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
447{
448 int err;
449
450 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
451 if (err >= 0) {
452 *val = (err & 0xffff);
453 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
454 ESR_RXTX_CTRL_H(chan));
455 if (err >= 0)
456 *val |= ((err & 0xffff) << 16);
457 err = 0;
458 }
459 return err;
460}
461
462static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
463{
464 int err;
465
466 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
467 ESR_GLUE_CTRL0_L(chan));
468 if (err >= 0) {
469 *val = (err & 0xffff);
470 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
471 ESR_GLUE_CTRL0_H(chan));
472 if (err >= 0) {
473 *val |= ((err & 0xffff) << 16);
474 err = 0;
475 }
476 }
477 return err;
478}
479
480static int esr_read_reset(struct niu *np, u32 *val)
481{
482 int err;
483
484 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
485 ESR_RXTX_RESET_CTRL_L);
486 if (err >= 0) {
487 *val = (err & 0xffff);
488 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
489 ESR_RXTX_RESET_CTRL_H);
490 if (err >= 0) {
491 *val |= ((err & 0xffff) << 16);
492 err = 0;
493 }
494 }
495 return err;
496}
497
498static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
499{
500 int err;
501
502 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
503 ESR_RXTX_CTRL_L(chan), val & 0xffff);
504 if (!err)
505 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
506 ESR_RXTX_CTRL_H(chan), (val >> 16));
507 return err;
508}
509
510static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
511{
512 int err;
513
514 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
515 ESR_GLUE_CTRL0_L(chan), val & 0xffff);
516 if (!err)
517 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
518 ESR_GLUE_CTRL0_H(chan), (val >> 16));
519 return err;
520}
521
522static int esr_reset(struct niu *np)
523{
524 u32 reset;
525 int err;
526
527 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
528 ESR_RXTX_RESET_CTRL_L, 0x0000);
529 if (err)
530 return err;
531 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
532 ESR_RXTX_RESET_CTRL_H, 0xffff);
533 if (err)
534 return err;
535 udelay(200);
536
537 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
538 ESR_RXTX_RESET_CTRL_L, 0xffff);
539 if (err)
540 return err;
541 udelay(200);
542
543 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
544 ESR_RXTX_RESET_CTRL_H, 0x0000);
545 if (err)
546 return err;
547 udelay(200);
548
549 err = esr_read_reset(np, &reset);
550 if (err)
551 return err;
552 if (reset != 0) {
553 dev_err(np->device, PFX "Port %u ESR_RESET "
554 "did not clear [%08x]\n",
555 np->port, reset);
556 return -ENODEV;
557 }
558
559 return 0;
560}
561
562static int serdes_init_10g(struct niu *np)
563{
564 struct niu_link_config *lp = &np->link_config;
565 unsigned long ctrl_reg, test_cfg_reg, i;
566 u64 ctrl_val, test_cfg_val, sig, mask, val;
567 int err;
568
569 switch (np->port) {
570 case 0:
571 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
572 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
573 break;
574 case 1:
575 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
576 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
577 break;
578
579 default:
580 return -EINVAL;
581 }
582 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
583 ENET_SERDES_CTRL_SDET_1 |
584 ENET_SERDES_CTRL_SDET_2 |
585 ENET_SERDES_CTRL_SDET_3 |
586 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
587 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
588 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
589 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
590 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
591 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
592 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
593 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
594 test_cfg_val = 0;
595
596 if (lp->loopback_mode == LOOPBACK_PHY) {
597 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
598 ENET_SERDES_TEST_MD_0_SHIFT) |
599 (ENET_TEST_MD_PAD_LOOPBACK <<
600 ENET_SERDES_TEST_MD_1_SHIFT) |
601 (ENET_TEST_MD_PAD_LOOPBACK <<
602 ENET_SERDES_TEST_MD_2_SHIFT) |
603 (ENET_TEST_MD_PAD_LOOPBACK <<
604 ENET_SERDES_TEST_MD_3_SHIFT));
605 }
606
607 nw64(ctrl_reg, ctrl_val);
608 nw64(test_cfg_reg, test_cfg_val);
609
610 /* Initialize all 4 lanes of the SERDES. */
611 for (i = 0; i < 4; i++) {
612 u32 rxtx_ctrl, glue0;
613
614 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
615 if (err)
616 return err;
617 err = esr_read_glue0(np, i, &glue0);
618 if (err)
619 return err;
620
621 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
622 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
623 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
624
625 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
626 ESR_GLUE_CTRL0_THCNT |
627 ESR_GLUE_CTRL0_BLTIME);
628 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
629 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
630 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
631 (BLTIME_300_CYCLES <<
632 ESR_GLUE_CTRL0_BLTIME_SHIFT));
633
634 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
635 if (err)
636 return err;
637 err = esr_write_glue0(np, i, glue0);
638 if (err)
639 return err;
640 }
641
642 err = esr_reset(np);
643 if (err)
644 return err;
645
646 sig = nr64(ESR_INT_SIGNALS);
647 switch (np->port) {
648 case 0:
649 mask = ESR_INT_SIGNALS_P0_BITS;
650 val = (ESR_INT_SRDY0_P0 |
651 ESR_INT_DET0_P0 |
652 ESR_INT_XSRDY_P0 |
653 ESR_INT_XDP_P0_CH3 |
654 ESR_INT_XDP_P0_CH2 |
655 ESR_INT_XDP_P0_CH1 |
656 ESR_INT_XDP_P0_CH0);
657 break;
658
659 case 1:
660 mask = ESR_INT_SIGNALS_P1_BITS;
661 val = (ESR_INT_SRDY0_P1 |
662 ESR_INT_DET0_P1 |
663 ESR_INT_XSRDY_P1 |
664 ESR_INT_XDP_P1_CH3 |
665 ESR_INT_XDP_P1_CH2 |
666 ESR_INT_XDP_P1_CH1 |
667 ESR_INT_XDP_P1_CH0);
668 break;
669
670 default:
671 return -EINVAL;
672 }
673
674 if ((sig & mask) != val) {
Matheos Workua5d6ab52008-04-24 21:09:20 -0700675 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
676 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
677 return 0;
678 }
David S. Millera3138df2007-10-09 01:54:01 -0700679 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
680 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
681 return -ENODEV;
682 }
Matheos Workua5d6ab52008-04-24 21:09:20 -0700683 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
684 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
David S. Millera3138df2007-10-09 01:54:01 -0700685 return 0;
686}
687
688static int serdes_init_1g(struct niu *np)
689{
690 u64 val;
691
692 val = nr64(ENET_SERDES_1_PLL_CFG);
693 val &= ~ENET_SERDES_PLL_FBDIV2;
694 switch (np->port) {
695 case 0:
696 val |= ENET_SERDES_PLL_HRATE0;
697 break;
698 case 1:
699 val |= ENET_SERDES_PLL_HRATE1;
700 break;
701 case 2:
702 val |= ENET_SERDES_PLL_HRATE2;
703 break;
704 case 3:
705 val |= ENET_SERDES_PLL_HRATE3;
706 break;
707 default:
708 return -EINVAL;
709 }
710 nw64(ENET_SERDES_1_PLL_CFG, val);
711
712 return 0;
713}
714
Matheos Worku5fbd7e22008-02-28 21:25:43 -0800715static int serdes_init_1g_serdes(struct niu *np)
716{
717 struct niu_link_config *lp = &np->link_config;
718 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
719 u64 ctrl_val, test_cfg_val, sig, mask, val;
720 int err;
721 u64 reset_val, val_rd;
722
723 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
724 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
725 ENET_SERDES_PLL_FBDIV0;
726 switch (np->port) {
727 case 0:
728 reset_val = ENET_SERDES_RESET_0;
729 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
730 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
731 pll_cfg = ENET_SERDES_0_PLL_CFG;
732 break;
733 case 1:
734 reset_val = ENET_SERDES_RESET_1;
735 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
736 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
737 pll_cfg = ENET_SERDES_1_PLL_CFG;
738 break;
739
740 default:
741 return -EINVAL;
742 }
743 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
744 ENET_SERDES_CTRL_SDET_1 |
745 ENET_SERDES_CTRL_SDET_2 |
746 ENET_SERDES_CTRL_SDET_3 |
747 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
748 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
749 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
750 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
751 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
752 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
753 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
754 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
755 test_cfg_val = 0;
756
757 if (lp->loopback_mode == LOOPBACK_PHY) {
758 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
759 ENET_SERDES_TEST_MD_0_SHIFT) |
760 (ENET_TEST_MD_PAD_LOOPBACK <<
761 ENET_SERDES_TEST_MD_1_SHIFT) |
762 (ENET_TEST_MD_PAD_LOOPBACK <<
763 ENET_SERDES_TEST_MD_2_SHIFT) |
764 (ENET_TEST_MD_PAD_LOOPBACK <<
765 ENET_SERDES_TEST_MD_3_SHIFT));
766 }
767
768 nw64(ENET_SERDES_RESET, reset_val);
769 mdelay(20);
770 val_rd = nr64(ENET_SERDES_RESET);
771 val_rd &= ~reset_val;
772 nw64(pll_cfg, val);
773 nw64(ctrl_reg, ctrl_val);
774 nw64(test_cfg_reg, test_cfg_val);
775 nw64(ENET_SERDES_RESET, val_rd);
776 mdelay(2000);
777
778 /* Initialize all 4 lanes of the SERDES. */
779 for (i = 0; i < 4; i++) {
780 u32 rxtx_ctrl, glue0;
781
782 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
783 if (err)
784 return err;
785 err = esr_read_glue0(np, i, &glue0);
786 if (err)
787 return err;
788
789 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
790 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
791 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
792
793 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
794 ESR_GLUE_CTRL0_THCNT |
795 ESR_GLUE_CTRL0_BLTIME);
796 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
797 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
798 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
799 (BLTIME_300_CYCLES <<
800 ESR_GLUE_CTRL0_BLTIME_SHIFT));
801
802 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
803 if (err)
804 return err;
805 err = esr_write_glue0(np, i, glue0);
806 if (err)
807 return err;
808 }
809
810
811 sig = nr64(ESR_INT_SIGNALS);
812 switch (np->port) {
813 case 0:
814 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
815 mask = val;
816 break;
817
818 case 1:
819 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
820 mask = val;
821 break;
822
823 default:
824 return -EINVAL;
825 }
826
827 if ((sig & mask) != val) {
828 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
829 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
830 return -ENODEV;
831 }
832
833 return 0;
834}
835
836static int link_status_1g_serdes(struct niu *np, int *link_up_p)
837{
838 struct niu_link_config *lp = &np->link_config;
839 int link_up;
840 u64 val;
841 u16 current_speed;
842 unsigned long flags;
843 u8 current_duplex;
844
845 link_up = 0;
846 current_speed = SPEED_INVALID;
847 current_duplex = DUPLEX_INVALID;
848
849 spin_lock_irqsave(&np->lock, flags);
850
851 val = nr64_pcs(PCS_MII_STAT);
852
853 if (val & PCS_MII_STAT_LINK_STATUS) {
854 link_up = 1;
855 current_speed = SPEED_1000;
856 current_duplex = DUPLEX_FULL;
857 }
858
859 lp->active_speed = current_speed;
860 lp->active_duplex = current_duplex;
861 spin_unlock_irqrestore(&np->lock, flags);
862
863 *link_up_p = link_up;
864 return 0;
865}
866
Matheos Worku5fbd7e22008-02-28 21:25:43 -0800867static int link_status_10g_serdes(struct niu *np, int *link_up_p)
868{
869 unsigned long flags;
870 struct niu_link_config *lp = &np->link_config;
871 int link_up = 0;
872 int link_ok = 1;
873 u64 val, val2;
874 u16 current_speed;
875 u8 current_duplex;
876
877 if (!(np->flags & NIU_FLAGS_10G))
878 return link_status_1g_serdes(np, link_up_p);
879
880 current_speed = SPEED_INVALID;
881 current_duplex = DUPLEX_INVALID;
882 spin_lock_irqsave(&np->lock, flags);
883
884 val = nr64_xpcs(XPCS_STATUS(0));
885 val2 = nr64_mac(XMAC_INTER2);
886 if (val2 & 0x01000000)
887 link_ok = 0;
888
889 if ((val & 0x1000ULL) && link_ok) {
890 link_up = 1;
891 current_speed = SPEED_10000;
892 current_duplex = DUPLEX_FULL;
893 }
894 lp->active_speed = current_speed;
895 lp->active_duplex = current_duplex;
896 spin_unlock_irqrestore(&np->lock, flags);
897 *link_up_p = link_up;
898 return 0;
899}
900
Matheos Worku5fbd7e22008-02-28 21:25:43 -0800901static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
902{
903 struct niu_link_config *lp = &np->link_config;
904 u16 current_speed, bmsr;
905 unsigned long flags;
906 u8 current_duplex;
907 int err, link_up;
908
909 link_up = 0;
910 current_speed = SPEED_INVALID;
911 current_duplex = DUPLEX_INVALID;
912
913 spin_lock_irqsave(&np->lock, flags);
914
915 err = -EINVAL;
916
917 err = mii_read(np, np->phy_addr, MII_BMSR);
918 if (err < 0)
919 goto out;
920
921 bmsr = err;
922 if (bmsr & BMSR_LSTATUS) {
923 u16 adv, lpa, common, estat;
924
925 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
926 if (err < 0)
927 goto out;
928 adv = err;
929
930 err = mii_read(np, np->phy_addr, MII_LPA);
931 if (err < 0)
932 goto out;
933 lpa = err;
934
935 common = adv & lpa;
936
937 err = mii_read(np, np->phy_addr, MII_ESTATUS);
938 if (err < 0)
939 goto out;
940 estat = err;
941 link_up = 1;
942 current_speed = SPEED_1000;
943 current_duplex = DUPLEX_FULL;
944
945 }
946 lp->active_speed = current_speed;
947 lp->active_duplex = current_duplex;
948 err = 0;
949
950out:
951 spin_unlock_irqrestore(&np->lock, flags);
952
953 *link_up_p = link_up;
954 return err;
955}
956
David S. Millera3138df2007-10-09 01:54:01 -0700957static int bcm8704_reset(struct niu *np)
958{
959 int err, limit;
960
961 err = mdio_read(np, np->phy_addr,
962 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
963 if (err < 0)
964 return err;
965 err |= BMCR_RESET;
966 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
967 MII_BMCR, err);
968 if (err)
969 return err;
970
971 limit = 1000;
972 while (--limit >= 0) {
973 err = mdio_read(np, np->phy_addr,
974 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
975 if (err < 0)
976 return err;
977 if (!(err & BMCR_RESET))
978 break;
979 }
980 if (limit < 0) {
981 dev_err(np->device, PFX "Port %u PHY will not reset "
982 "(bmcr=%04x)\n", np->port, (err & 0xffff));
983 return -ENODEV;
984 }
985 return 0;
986}
987
988/* When written, certain PHY registers need to be read back twice
989 * in order for the bits to settle properly.
990 */
991static int bcm8704_user_dev3_readback(struct niu *np, int reg)
992{
993 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
994 if (err < 0)
995 return err;
996 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
997 if (err < 0)
998 return err;
999 return 0;
1000}
1001
Matheos Workua5d6ab52008-04-24 21:09:20 -07001002static int bcm8706_init_user_dev3(struct niu *np)
1003{
1004 int err;
1005
1006
1007 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1008 BCM8704_USER_OPT_DIGITAL_CTRL);
1009 if (err < 0)
1010 return err;
1011 err &= ~USER_ODIG_CTRL_GPIOS;
1012 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1013 err |= USER_ODIG_CTRL_RESV2;
1014 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1015 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1016 if (err)
1017 return err;
1018
1019 mdelay(1000);
1020
1021 return 0;
1022}
1023
David S. Millera3138df2007-10-09 01:54:01 -07001024static int bcm8704_init_user_dev3(struct niu *np)
1025{
1026 int err;
1027
1028 err = mdio_write(np, np->phy_addr,
1029 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1030 (USER_CONTROL_OPTXRST_LVL |
1031 USER_CONTROL_OPBIASFLT_LVL |
1032 USER_CONTROL_OBTMPFLT_LVL |
1033 USER_CONTROL_OPPRFLT_LVL |
1034 USER_CONTROL_OPTXFLT_LVL |
1035 USER_CONTROL_OPRXLOS_LVL |
1036 USER_CONTROL_OPRXFLT_LVL |
1037 USER_CONTROL_OPTXON_LVL |
1038 (0x3f << USER_CONTROL_RES1_SHIFT)));
1039 if (err)
1040 return err;
1041
1042 err = mdio_write(np, np->phy_addr,
1043 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1044 (USER_PMD_TX_CTL_XFP_CLKEN |
1045 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1046 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1047 USER_PMD_TX_CTL_TSCK_LPWREN));
1048 if (err)
1049 return err;
1050
1051 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1052 if (err)
1053 return err;
1054 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1055 if (err)
1056 return err;
1057
1058 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1059 BCM8704_USER_OPT_DIGITAL_CTRL);
1060 if (err < 0)
1061 return err;
1062 err &= ~USER_ODIG_CTRL_GPIOS;
1063 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1064 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1065 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1066 if (err)
1067 return err;
1068
1069 mdelay(1000);
1070
1071 return 0;
1072}
1073
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001074static int mrvl88x2011_act_led(struct niu *np, int val)
1075{
1076 int err;
1077
1078 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1079 MRVL88X2011_LED_8_TO_11_CTL);
1080 if (err < 0)
1081 return err;
1082
1083 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1084 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1085
1086 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1087 MRVL88X2011_LED_8_TO_11_CTL, err);
1088}
1089
1090static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1091{
1092 int err;
1093
1094 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1095 MRVL88X2011_LED_BLINK_CTL);
1096 if (err >= 0) {
1097 err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1098 err |= (rate << 4);
1099
1100 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1101 MRVL88X2011_LED_BLINK_CTL, err);
1102 }
1103
1104 return err;
1105}
1106
1107static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1108{
1109 int err;
1110
1111 /* Set LED functions */
1112 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1113 if (err)
1114 return err;
1115
1116 /* led activity */
1117 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1118 if (err)
1119 return err;
1120
1121 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1122 MRVL88X2011_GENERAL_CTL);
1123 if (err < 0)
1124 return err;
1125
1126 err |= MRVL88X2011_ENA_XFPREFCLK;
1127
1128 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1129 MRVL88X2011_GENERAL_CTL, err);
1130 if (err < 0)
1131 return err;
1132
1133 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1134 MRVL88X2011_PMA_PMD_CTL_1);
1135 if (err < 0)
1136 return err;
1137
1138 if (np->link_config.loopback_mode == LOOPBACK_MAC)
1139 err |= MRVL88X2011_LOOPBACK;
1140 else
1141 err &= ~MRVL88X2011_LOOPBACK;
1142
1143 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1144 MRVL88X2011_PMA_PMD_CTL_1, err);
1145 if (err < 0)
1146 return err;
1147
1148 /* Enable PMD */
1149 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1150 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1151}
1152
Matheos Workua5d6ab52008-04-24 21:09:20 -07001153
1154static int xcvr_diag_bcm870x(struct niu *np)
David S. Millera3138df2007-10-09 01:54:01 -07001155{
David S. Millera3138df2007-10-09 01:54:01 -07001156 u16 analog_stat0, tx_alarm_status;
Matheos Workua5d6ab52008-04-24 21:09:20 -07001157 int err = 0;
David S. Millera3138df2007-10-09 01:54:01 -07001158
1159#if 1
1160 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1161 MII_STAT1000);
1162 if (err < 0)
1163 return err;
1164 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1165 np->port, err);
1166
1167 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1168 if (err < 0)
1169 return err;
1170 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1171 np->port, err);
1172
1173 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1174 MII_NWAYTEST);
1175 if (err < 0)
1176 return err;
1177 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1178 np->port, err);
1179#endif
1180
1181 /* XXX dig this out it might not be so useful XXX */
1182 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1183 BCM8704_USER_ANALOG_STATUS0);
1184 if (err < 0)
1185 return err;
1186 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1187 BCM8704_USER_ANALOG_STATUS0);
1188 if (err < 0)
1189 return err;
1190 analog_stat0 = err;
1191
1192 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1193 BCM8704_USER_TX_ALARM_STATUS);
1194 if (err < 0)
1195 return err;
1196 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1197 BCM8704_USER_TX_ALARM_STATUS);
1198 if (err < 0)
1199 return err;
1200 tx_alarm_status = err;
1201
1202 if (analog_stat0 != 0x03fc) {
1203 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1204 pr_info(PFX "Port %u cable not connected "
1205 "or bad cable.\n", np->port);
1206 } else if (analog_stat0 == 0x639c) {
1207 pr_info(PFX "Port %u optical module is bad "
1208 "or missing.\n", np->port);
1209 }
1210 }
1211
1212 return 0;
1213}
1214
Matheos Workua5d6ab52008-04-24 21:09:20 -07001215static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1216{
1217 struct niu_link_config *lp = &np->link_config;
1218 int err;
1219
1220 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1221 MII_BMCR);
1222 if (err < 0)
1223 return err;
1224
1225 err &= ~BMCR_LOOPBACK;
1226
1227 if (lp->loopback_mode == LOOPBACK_MAC)
1228 err |= BMCR_LOOPBACK;
1229
1230 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1231 MII_BMCR, err);
1232 if (err)
1233 return err;
1234
1235 return 0;
1236}
1237
1238static int xcvr_init_10g_bcm8706(struct niu *np)
1239{
1240 int err = 0;
1241 u64 val;
1242
1243 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1244 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1245 return err;
1246
1247 val = nr64_mac(XMAC_CONFIG);
1248 val &= ~XMAC_CONFIG_LED_POLARITY;
1249 val |= XMAC_CONFIG_FORCE_LED_ON;
1250 nw64_mac(XMAC_CONFIG, val);
1251
1252 val = nr64(MIF_CONFIG);
1253 val |= MIF_CONFIG_INDIRECT_MODE;
1254 nw64(MIF_CONFIG, val);
1255
1256 err = bcm8704_reset(np);
1257 if (err)
1258 return err;
1259
1260 err = xcvr_10g_set_lb_bcm870x(np);
1261 if (err)
1262 return err;
1263
1264 err = bcm8706_init_user_dev3(np);
1265 if (err)
1266 return err;
1267
1268 err = xcvr_diag_bcm870x(np);
1269 if (err)
1270 return err;
1271
1272 return 0;
1273}
1274
1275static int xcvr_init_10g_bcm8704(struct niu *np)
1276{
1277 int err;
1278
1279 err = bcm8704_reset(np);
1280 if (err)
1281 return err;
1282
1283 err = bcm8704_init_user_dev3(np);
1284 if (err)
1285 return err;
1286
1287 err = xcvr_10g_set_lb_bcm870x(np);
1288 if (err)
1289 return err;
1290
1291 err = xcvr_diag_bcm870x(np);
1292 if (err)
1293 return err;
1294
1295 return 0;
1296}
1297
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001298static int xcvr_init_10g(struct niu *np)
1299{
1300 int phy_id, err;
1301 u64 val;
1302
1303 val = nr64_mac(XMAC_CONFIG);
1304 val &= ~XMAC_CONFIG_LED_POLARITY;
1305 val |= XMAC_CONFIG_FORCE_LED_ON;
1306 nw64_mac(XMAC_CONFIG, val);
1307
1308 /* XXX shared resource, lock parent XXX */
1309 val = nr64(MIF_CONFIG);
1310 val |= MIF_CONFIG_INDIRECT_MODE;
1311 nw64(MIF_CONFIG, val);
1312
1313 phy_id = phy_decode(np->parent->port_phy, np->port);
1314 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1315
1316 /* handle different phy types */
1317 switch (phy_id & NIU_PHY_ID_MASK) {
1318 case NIU_PHY_ID_MRVL88X2011:
1319 err = xcvr_init_10g_mrvl88x2011(np);
1320 break;
1321
1322 default: /* bcom 8704 */
1323 err = xcvr_init_10g_bcm8704(np);
1324 break;
1325 }
1326
1327 return 0;
1328}
1329
David S. Millera3138df2007-10-09 01:54:01 -07001330static int mii_reset(struct niu *np)
1331{
1332 int limit, err;
1333
1334 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1335 if (err)
1336 return err;
1337
1338 limit = 1000;
1339 while (--limit >= 0) {
1340 udelay(500);
1341 err = mii_read(np, np->phy_addr, MII_BMCR);
1342 if (err < 0)
1343 return err;
1344 if (!(err & BMCR_RESET))
1345 break;
1346 }
1347 if (limit < 0) {
1348 dev_err(np->device, PFX "Port %u MII would not reset, "
1349 "bmcr[%04x]\n", np->port, err);
1350 return -ENODEV;
1351 }
1352
1353 return 0;
1354}
1355
Matheos Worku5fbd7e22008-02-28 21:25:43 -08001356static int xcvr_init_1g_rgmii(struct niu *np)
1357{
1358 int err;
1359 u64 val;
1360 u16 bmcr, bmsr, estat;
1361
1362 val = nr64(MIF_CONFIG);
1363 val &= ~MIF_CONFIG_INDIRECT_MODE;
1364 nw64(MIF_CONFIG, val);
1365
1366 err = mii_reset(np);
1367 if (err)
1368 return err;
1369
1370 err = mii_read(np, np->phy_addr, MII_BMSR);
1371 if (err < 0)
1372 return err;
1373 bmsr = err;
1374
1375 estat = 0;
1376 if (bmsr & BMSR_ESTATEN) {
1377 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1378 if (err < 0)
1379 return err;
1380 estat = err;
1381 }
1382
1383 bmcr = 0;
1384 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1385 if (err)
1386 return err;
1387
1388 if (bmsr & BMSR_ESTATEN) {
1389 u16 ctrl1000 = 0;
1390
1391 if (estat & ESTATUS_1000_TFULL)
1392 ctrl1000 |= ADVERTISE_1000FULL;
1393 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1394 if (err)
1395 return err;
1396 }
1397
1398 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1399
1400 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1401 if (err)
1402 return err;
1403
1404 err = mii_read(np, np->phy_addr, MII_BMCR);
1405 if (err < 0)
1406 return err;
1407 bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1408
1409 err = mii_read(np, np->phy_addr, MII_BMSR);
1410 if (err < 0)
1411 return err;
1412
1413 return 0;
1414}
1415
David S. Millera3138df2007-10-09 01:54:01 -07001416static int mii_init_common(struct niu *np)
1417{
1418 struct niu_link_config *lp = &np->link_config;
1419 u16 bmcr, bmsr, adv, estat;
1420 int err;
1421
1422 err = mii_reset(np);
1423 if (err)
1424 return err;
1425
1426 err = mii_read(np, np->phy_addr, MII_BMSR);
1427 if (err < 0)
1428 return err;
1429 bmsr = err;
1430
1431 estat = 0;
1432 if (bmsr & BMSR_ESTATEN) {
1433 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1434 if (err < 0)
1435 return err;
1436 estat = err;
1437 }
1438
1439 bmcr = 0;
1440 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1441 if (err)
1442 return err;
1443
1444 if (lp->loopback_mode == LOOPBACK_MAC) {
1445 bmcr |= BMCR_LOOPBACK;
1446 if (lp->active_speed == SPEED_1000)
1447 bmcr |= BMCR_SPEED1000;
1448 if (lp->active_duplex == DUPLEX_FULL)
1449 bmcr |= BMCR_FULLDPLX;
1450 }
1451
1452 if (lp->loopback_mode == LOOPBACK_PHY) {
1453 u16 aux;
1454
1455 aux = (BCM5464R_AUX_CTL_EXT_LB |
1456 BCM5464R_AUX_CTL_WRITE_1);
1457 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1458 if (err)
1459 return err;
1460 }
1461
1462 /* XXX configurable XXX */
1463 /* XXX for now don't advertise half-duplex or asym pause... XXX */
1464 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1465 if (bmsr & BMSR_10FULL)
1466 adv |= ADVERTISE_10FULL;
1467 if (bmsr & BMSR_100FULL)
1468 adv |= ADVERTISE_100FULL;
1469 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1470 if (err)
1471 return err;
1472
1473 if (bmsr & BMSR_ESTATEN) {
1474 u16 ctrl1000 = 0;
1475
1476 if (estat & ESTATUS_1000_TFULL)
1477 ctrl1000 |= ADVERTISE_1000FULL;
1478 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1479 if (err)
1480 return err;
1481 }
1482 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1483
1484 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1485 if (err)
1486 return err;
1487
1488 err = mii_read(np, np->phy_addr, MII_BMCR);
1489 if (err < 0)
1490 return err;
1491 err = mii_read(np, np->phy_addr, MII_BMSR);
1492 if (err < 0)
1493 return err;
1494#if 0
1495 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1496 np->port, bmcr, bmsr);
1497#endif
1498
1499 return 0;
1500}
1501
1502static int xcvr_init_1g(struct niu *np)
1503{
1504 u64 val;
1505
1506 /* XXX shared resource, lock parent XXX */
1507 val = nr64(MIF_CONFIG);
1508 val &= ~MIF_CONFIG_INDIRECT_MODE;
1509 nw64(MIF_CONFIG, val);
1510
1511 return mii_init_common(np);
1512}
1513
1514static int niu_xcvr_init(struct niu *np)
1515{
1516 const struct niu_phy_ops *ops = np->phy_ops;
1517 int err;
1518
1519 err = 0;
1520 if (ops->xcvr_init)
1521 err = ops->xcvr_init(np);
1522
1523 return err;
1524}
1525
1526static int niu_serdes_init(struct niu *np)
1527{
1528 const struct niu_phy_ops *ops = np->phy_ops;
1529 int err;
1530
1531 err = 0;
1532 if (ops->serdes_init)
1533 err = ops->serdes_init(np);
1534
1535 return err;
1536}
1537
1538static void niu_init_xif(struct niu *);
Mirko Lindner0c3b0912007-12-05 21:10:02 -08001539static void niu_handle_led(struct niu *, int status);
David S. Millera3138df2007-10-09 01:54:01 -07001540
1541static int niu_link_status_common(struct niu *np, int link_up)
1542{
1543 struct niu_link_config *lp = &np->link_config;
1544 struct net_device *dev = np->dev;
1545 unsigned long flags;
1546
1547 if (!netif_carrier_ok(dev) && link_up) {
1548 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1549 dev->name,
1550 (lp->active_speed == SPEED_10000 ?
1551 "10Gb/sec" :
1552 (lp->active_speed == SPEED_1000 ?
1553 "1Gb/sec" :
1554 (lp->active_speed == SPEED_100 ?
1555 "100Mbit/sec" : "10Mbit/sec"))),
1556 (lp->active_duplex == DUPLEX_FULL ?
1557 "full" : "half"));
1558
1559 spin_lock_irqsave(&np->lock, flags);
1560 niu_init_xif(np);
Mirko Lindner0c3b0912007-12-05 21:10:02 -08001561 niu_handle_led(np, 1);
David S. Millera3138df2007-10-09 01:54:01 -07001562 spin_unlock_irqrestore(&np->lock, flags);
1563
1564 netif_carrier_on(dev);
1565 } else if (netif_carrier_ok(dev) && !link_up) {
1566 niuwarn(LINK, "%s: Link is down\n", dev->name);
Mirko Lindner0c3b0912007-12-05 21:10:02 -08001567 spin_lock_irqsave(&np->lock, flags);
1568 niu_handle_led(np, 0);
1569 spin_unlock_irqrestore(&np->lock, flags);
David S. Millera3138df2007-10-09 01:54:01 -07001570 netif_carrier_off(dev);
1571 }
1572
1573 return 0;
1574}
1575
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001576static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
David S. Millera3138df2007-10-09 01:54:01 -07001577{
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001578 int err, link_up, pma_status, pcs_status;
David S. Millera3138df2007-10-09 01:54:01 -07001579
1580 link_up = 0;
1581
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001582 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1583 MRVL88X2011_10G_PMD_STATUS_2);
1584 if (err < 0)
David S. Millera3138df2007-10-09 01:54:01 -07001585 goto out;
1586
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001587 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1588 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1589 MRVL88X2011_PMA_PMD_STATUS_1);
1590 if (err < 0)
1591 goto out;
1592
1593 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1594
1595 /* Check PMC Register : 3.0001.2 == 1: read twice */
1596 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1597 MRVL88X2011_PMA_PMD_STATUS_1);
1598 if (err < 0)
1599 goto out;
1600
1601 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1602 MRVL88X2011_PMA_PMD_STATUS_1);
1603 if (err < 0)
1604 goto out;
1605
1606 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1607
1608 /* Check XGXS Register : 4.0018.[0-3,12] */
1609 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1610 MRVL88X2011_10G_XGXS_LANE_STAT);
1611 if (err < 0)
1612 goto out;
1613
1614 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1615 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1616 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1617 0x800))
1618 link_up = (pma_status && pcs_status) ? 1 : 0;
1619
1620 np->link_config.active_speed = SPEED_10000;
1621 np->link_config.active_duplex = DUPLEX_FULL;
1622 err = 0;
1623out:
1624 mrvl88x2011_act_led(np, (link_up ?
1625 MRVL88X2011_LED_CTL_PCS_ACT :
1626 MRVL88X2011_LED_CTL_OFF));
1627
1628 *link_up_p = link_up;
1629 return err;
1630}
1631
Matheos Workua5d6ab52008-04-24 21:09:20 -07001632static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1633{
1634 int err, link_up;
1635 link_up = 0;
1636
1637 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1638 BCM8704_PMD_RCV_SIGDET);
1639 if (err < 0)
1640 goto out;
1641 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1642 err = 0;
1643 goto out;
1644 }
1645
1646 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1647 BCM8704_PCS_10G_R_STATUS);
1648 if (err < 0)
1649 goto out;
1650
1651 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1652 err = 0;
1653 goto out;
1654 }
1655
1656 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1657 BCM8704_PHYXS_XGXS_LANE_STAT);
1658 if (err < 0)
1659 goto out;
1660 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1661 PHYXS_XGXS_LANE_STAT_MAGIC |
1662 PHYXS_XGXS_LANE_STAT_PATTEST |
1663 PHYXS_XGXS_LANE_STAT_LANE3 |
1664 PHYXS_XGXS_LANE_STAT_LANE2 |
1665 PHYXS_XGXS_LANE_STAT_LANE1 |
1666 PHYXS_XGXS_LANE_STAT_LANE0)) {
1667 err = 0;
1668 np->link_config.active_speed = SPEED_INVALID;
1669 np->link_config.active_duplex = DUPLEX_INVALID;
1670 goto out;
1671 }
1672
1673 link_up = 1;
1674 np->link_config.active_speed = SPEED_10000;
1675 np->link_config.active_duplex = DUPLEX_FULL;
1676 err = 0;
1677
1678out:
1679 *link_up_p = link_up;
1680 if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1681 err = 0;
1682 return err;
1683}
1684
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001685static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1686{
1687 int err, link_up;
1688
1689 link_up = 0;
1690
David S. Millera3138df2007-10-09 01:54:01 -07001691 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1692 BCM8704_PMD_RCV_SIGDET);
1693 if (err < 0)
1694 goto out;
1695 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1696 err = 0;
1697 goto out;
1698 }
1699
1700 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1701 BCM8704_PCS_10G_R_STATUS);
1702 if (err < 0)
1703 goto out;
1704 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1705 err = 0;
1706 goto out;
1707 }
1708
1709 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1710 BCM8704_PHYXS_XGXS_LANE_STAT);
1711 if (err < 0)
1712 goto out;
1713
1714 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1715 PHYXS_XGXS_LANE_STAT_MAGIC |
1716 PHYXS_XGXS_LANE_STAT_LANE3 |
1717 PHYXS_XGXS_LANE_STAT_LANE2 |
1718 PHYXS_XGXS_LANE_STAT_LANE1 |
1719 PHYXS_XGXS_LANE_STAT_LANE0)) {
1720 err = 0;
1721 goto out;
1722 }
1723
1724 link_up = 1;
1725 np->link_config.active_speed = SPEED_10000;
1726 np->link_config.active_duplex = DUPLEX_FULL;
1727 err = 0;
1728
1729out:
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08001730 *link_up_p = link_up;
1731 return err;
1732}
1733
1734static int link_status_10g(struct niu *np, int *link_up_p)
1735{
1736 unsigned long flags;
1737 int err = -EINVAL;
1738
1739 spin_lock_irqsave(&np->lock, flags);
1740
1741 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1742 int phy_id;
1743
1744 phy_id = phy_decode(np->parent->port_phy, np->port);
1745 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1746
1747 /* handle different phy types */
1748 switch (phy_id & NIU_PHY_ID_MASK) {
1749 case NIU_PHY_ID_MRVL88X2011:
1750 err = link_status_10g_mrvl(np, link_up_p);
1751 break;
1752
1753 default: /* bcom 8704 */
1754 err = link_status_10g_bcom(np, link_up_p);
1755 break;
1756 }
1757 }
1758
David S. Millera3138df2007-10-09 01:54:01 -07001759 spin_unlock_irqrestore(&np->lock, flags);
1760
David S. Millera3138df2007-10-09 01:54:01 -07001761 return err;
1762}
1763
Matheos Workua5d6ab52008-04-24 21:09:20 -07001764static int niu_10g_phy_present(struct niu *np)
1765{
1766 u64 sig, mask, val;
1767
1768 sig = nr64(ESR_INT_SIGNALS);
1769 switch (np->port) {
1770 case 0:
1771 mask = ESR_INT_SIGNALS_P0_BITS;
1772 val = (ESR_INT_SRDY0_P0 |
1773 ESR_INT_DET0_P0 |
1774 ESR_INT_XSRDY_P0 |
1775 ESR_INT_XDP_P0_CH3 |
1776 ESR_INT_XDP_P0_CH2 |
1777 ESR_INT_XDP_P0_CH1 |
1778 ESR_INT_XDP_P0_CH0);
1779 break;
1780
1781 case 1:
1782 mask = ESR_INT_SIGNALS_P1_BITS;
1783 val = (ESR_INT_SRDY0_P1 |
1784 ESR_INT_DET0_P1 |
1785 ESR_INT_XSRDY_P1 |
1786 ESR_INT_XDP_P1_CH3 |
1787 ESR_INT_XDP_P1_CH2 |
1788 ESR_INT_XDP_P1_CH1 |
1789 ESR_INT_XDP_P1_CH0);
1790 break;
1791
1792 default:
1793 return 0;
1794 }
1795
1796 if ((sig & mask) != val)
1797 return 0;
1798 return 1;
1799}
1800
1801static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
1802{
1803 unsigned long flags;
1804 int err = 0;
1805 int phy_present;
1806 int phy_present_prev;
1807
1808 spin_lock_irqsave(&np->lock, flags);
1809
1810 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1811 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
1812 1 : 0;
1813 phy_present = niu_10g_phy_present(np);
1814 if (phy_present != phy_present_prev) {
1815 /* state change */
1816 if (phy_present) {
1817 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1818 if (np->phy_ops->xcvr_init)
1819 err = np->phy_ops->xcvr_init(np);
1820 if (err) {
1821 /* debounce */
1822 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1823 }
1824 } else {
1825 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1826 *link_up_p = 0;
1827 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
1828 np->dev->name);
1829 }
1830 }
1831 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
1832 err = link_status_10g_bcm8706(np, link_up_p);
1833 }
1834
1835 spin_unlock_irqrestore(&np->lock, flags);
1836
1837 return err;
1838}
1839
David S. Millera3138df2007-10-09 01:54:01 -07001840static int link_status_1g(struct niu *np, int *link_up_p)
1841{
David S. Millere415e6e2008-01-15 22:50:08 -08001842 struct niu_link_config *lp = &np->link_config;
David S. Millera3138df2007-10-09 01:54:01 -07001843 u16 current_speed, bmsr;
1844 unsigned long flags;
1845 u8 current_duplex;
1846 int err, link_up;
1847
1848 link_up = 0;
1849 current_speed = SPEED_INVALID;
1850 current_duplex = DUPLEX_INVALID;
1851
1852 spin_lock_irqsave(&np->lock, flags);
1853
1854 err = -EINVAL;
1855 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
1856 goto out;
1857
1858 err = mii_read(np, np->phy_addr, MII_BMSR);
1859 if (err < 0)
1860 goto out;
1861
1862 bmsr = err;
1863 if (bmsr & BMSR_LSTATUS) {
1864 u16 adv, lpa, common, estat;
1865
1866 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1867 if (err < 0)
1868 goto out;
1869 adv = err;
1870
1871 err = mii_read(np, np->phy_addr, MII_LPA);
1872 if (err < 0)
1873 goto out;
1874 lpa = err;
1875
1876 common = adv & lpa;
1877
1878 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1879 if (err < 0)
1880 goto out;
1881 estat = err;
1882
1883 link_up = 1;
1884 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
1885 current_speed = SPEED_1000;
1886 if (estat & ESTATUS_1000_TFULL)
1887 current_duplex = DUPLEX_FULL;
1888 else
1889 current_duplex = DUPLEX_HALF;
1890 } else {
1891 if (common & ADVERTISE_100BASE4) {
1892 current_speed = SPEED_100;
1893 current_duplex = DUPLEX_HALF;
1894 } else if (common & ADVERTISE_100FULL) {
1895 current_speed = SPEED_100;
1896 current_duplex = DUPLEX_FULL;
1897 } else if (common & ADVERTISE_100HALF) {
1898 current_speed = SPEED_100;
1899 current_duplex = DUPLEX_HALF;
1900 } else if (common & ADVERTISE_10FULL) {
1901 current_speed = SPEED_10;
1902 current_duplex = DUPLEX_FULL;
1903 } else if (common & ADVERTISE_10HALF) {
1904 current_speed = SPEED_10;
1905 current_duplex = DUPLEX_HALF;
1906 } else
1907 link_up = 0;
1908 }
1909 }
David S. Millere415e6e2008-01-15 22:50:08 -08001910 lp->active_speed = current_speed;
1911 lp->active_duplex = current_duplex;
David S. Millera3138df2007-10-09 01:54:01 -07001912 err = 0;
1913
1914out:
1915 spin_unlock_irqrestore(&np->lock, flags);
1916
1917 *link_up_p = link_up;
1918 return err;
1919}
1920
1921static int niu_link_status(struct niu *np, int *link_up_p)
1922{
1923 const struct niu_phy_ops *ops = np->phy_ops;
1924 int err;
1925
1926 err = 0;
1927 if (ops->link_status)
1928 err = ops->link_status(np, link_up_p);
1929
1930 return err;
1931}
1932
1933static void niu_timer(unsigned long __opaque)
1934{
1935 struct niu *np = (struct niu *) __opaque;
1936 unsigned long off;
1937 int err, link_up;
1938
1939 err = niu_link_status(np, &link_up);
1940 if (!err)
1941 niu_link_status_common(np, link_up);
1942
1943 if (netif_carrier_ok(np->dev))
1944 off = 5 * HZ;
1945 else
1946 off = 1 * HZ;
1947 np->timer.expires = jiffies + off;
1948
1949 add_timer(&np->timer);
1950}
1951
Matheos Worku5fbd7e22008-02-28 21:25:43 -08001952static const struct niu_phy_ops phy_ops_10g_serdes = {
1953 .serdes_init = serdes_init_10g_serdes,
1954 .link_status = link_status_10g_serdes,
1955};
1956
1957static const struct niu_phy_ops phy_ops_1g_rgmii = {
1958 .xcvr_init = xcvr_init_1g_rgmii,
1959 .link_status = link_status_1g_rgmii,
1960};
1961
David S. Millera3138df2007-10-09 01:54:01 -07001962static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
1963 .serdes_init = serdes_init_niu,
1964 .xcvr_init = xcvr_init_10g,
1965 .link_status = link_status_10g,
1966};
1967
1968static const struct niu_phy_ops phy_ops_10g_fiber = {
1969 .serdes_init = serdes_init_10g,
1970 .xcvr_init = xcvr_init_10g,
1971 .link_status = link_status_10g,
1972};
1973
Matheos Workua5d6ab52008-04-24 21:09:20 -07001974static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
1975 .serdes_init = serdes_init_10g,
1976 .xcvr_init = xcvr_init_10g_bcm8706,
1977 .link_status = link_status_10g_hotplug,
1978};
1979
David S. Millera3138df2007-10-09 01:54:01 -07001980static const struct niu_phy_ops phy_ops_10g_copper = {
1981 .serdes_init = serdes_init_10g,
1982 .link_status = link_status_10g, /* XXX */
1983};
1984
1985static const struct niu_phy_ops phy_ops_1g_fiber = {
1986 .serdes_init = serdes_init_1g,
1987 .xcvr_init = xcvr_init_1g,
1988 .link_status = link_status_1g,
1989};
1990
1991static const struct niu_phy_ops phy_ops_1g_copper = {
1992 .xcvr_init = xcvr_init_1g,
1993 .link_status = link_status_1g,
1994};
1995
1996struct niu_phy_template {
1997 const struct niu_phy_ops *ops;
1998 u32 phy_addr_base;
1999};
2000
2001static const struct niu_phy_template phy_template_niu = {
2002 .ops = &phy_ops_10g_fiber_niu,
2003 .phy_addr_base = 16,
2004};
2005
2006static const struct niu_phy_template phy_template_10g_fiber = {
2007 .ops = &phy_ops_10g_fiber,
2008 .phy_addr_base = 8,
2009};
2010
Matheos Workua5d6ab52008-04-24 21:09:20 -07002011static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2012 .ops = &phy_ops_10g_fiber_hotplug,
2013 .phy_addr_base = 8,
2014};
2015
David S. Millera3138df2007-10-09 01:54:01 -07002016static const struct niu_phy_template phy_template_10g_copper = {
2017 .ops = &phy_ops_10g_copper,
2018 .phy_addr_base = 10,
2019};
2020
2021static const struct niu_phy_template phy_template_1g_fiber = {
2022 .ops = &phy_ops_1g_fiber,
2023 .phy_addr_base = 0,
2024};
2025
2026static const struct niu_phy_template phy_template_1g_copper = {
2027 .ops = &phy_ops_1g_copper,
2028 .phy_addr_base = 0,
2029};
2030
Matheos Worku5fbd7e22008-02-28 21:25:43 -08002031static const struct niu_phy_template phy_template_1g_rgmii = {
2032 .ops = &phy_ops_1g_rgmii,
2033 .phy_addr_base = 0,
2034};
2035
2036static const struct niu_phy_template phy_template_10g_serdes = {
2037 .ops = &phy_ops_10g_serdes,
2038 .phy_addr_base = 0,
2039};
2040
2041static int niu_atca_port_num[4] = {
2042 0, 0, 11, 10
2043};
2044
2045static int serdes_init_10g_serdes(struct niu *np)
2046{
2047 struct niu_link_config *lp = &np->link_config;
2048 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2049 u64 ctrl_val, test_cfg_val, sig, mask, val;
2050 int err;
2051 u64 reset_val;
2052
2053 switch (np->port) {
2054 case 0:
2055 reset_val = ENET_SERDES_RESET_0;
2056 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2057 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2058 pll_cfg = ENET_SERDES_0_PLL_CFG;
2059 break;
2060 case 1:
2061 reset_val = ENET_SERDES_RESET_1;
2062 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2063 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2064 pll_cfg = ENET_SERDES_1_PLL_CFG;
2065 break;
2066
2067 default:
2068 return -EINVAL;
2069 }
2070 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2071 ENET_SERDES_CTRL_SDET_1 |
2072 ENET_SERDES_CTRL_SDET_2 |
2073 ENET_SERDES_CTRL_SDET_3 |
2074 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2075 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2076 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2077 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2078 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2079 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2080 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2081 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2082 test_cfg_val = 0;
2083
2084 if (lp->loopback_mode == LOOPBACK_PHY) {
2085 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2086 ENET_SERDES_TEST_MD_0_SHIFT) |
2087 (ENET_TEST_MD_PAD_LOOPBACK <<
2088 ENET_SERDES_TEST_MD_1_SHIFT) |
2089 (ENET_TEST_MD_PAD_LOOPBACK <<
2090 ENET_SERDES_TEST_MD_2_SHIFT) |
2091 (ENET_TEST_MD_PAD_LOOPBACK <<
2092 ENET_SERDES_TEST_MD_3_SHIFT));
2093 }
2094
2095 esr_reset(np);
2096 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2097 nw64(ctrl_reg, ctrl_val);
2098 nw64(test_cfg_reg, test_cfg_val);
2099
2100 /* Initialize all 4 lanes of the SERDES. */
2101 for (i = 0; i < 4; i++) {
2102 u32 rxtx_ctrl, glue0;
2103
2104 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2105 if (err)
2106 return err;
2107 err = esr_read_glue0(np, i, &glue0);
2108 if (err)
2109 return err;
2110
2111 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2112 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2113 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2114
2115 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2116 ESR_GLUE_CTRL0_THCNT |
2117 ESR_GLUE_CTRL0_BLTIME);
2118 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2119 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2120 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2121 (BLTIME_300_CYCLES <<
2122 ESR_GLUE_CTRL0_BLTIME_SHIFT));
2123
2124 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2125 if (err)
2126 return err;
2127 err = esr_write_glue0(np, i, glue0);
2128 if (err)
2129 return err;
2130 }
2131
2132
2133 sig = nr64(ESR_INT_SIGNALS);
2134 switch (np->port) {
2135 case 0:
2136 mask = ESR_INT_SIGNALS_P0_BITS;
2137 val = (ESR_INT_SRDY0_P0 |
2138 ESR_INT_DET0_P0 |
2139 ESR_INT_XSRDY_P0 |
2140 ESR_INT_XDP_P0_CH3 |
2141 ESR_INT_XDP_P0_CH2 |
2142 ESR_INT_XDP_P0_CH1 |
2143 ESR_INT_XDP_P0_CH0);
2144 break;
2145
2146 case 1:
2147 mask = ESR_INT_SIGNALS_P1_BITS;
2148 val = (ESR_INT_SRDY0_P1 |
2149 ESR_INT_DET0_P1 |
2150 ESR_INT_XSRDY_P1 |
2151 ESR_INT_XDP_P1_CH3 |
2152 ESR_INT_XDP_P1_CH2 |
2153 ESR_INT_XDP_P1_CH1 |
2154 ESR_INT_XDP_P1_CH0);
2155 break;
2156
2157 default:
2158 return -EINVAL;
2159 }
2160
2161 if ((sig & mask) != val) {
2162 int err;
2163 err = serdes_init_1g_serdes(np);
2164 if (!err) {
2165 np->flags &= ~NIU_FLAGS_10G;
2166 np->mac_xcvr = MAC_XCVR_PCS;
2167 } else {
2168 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2169 np->port);
2170 return -ENODEV;
2171 }
2172 }
2173
2174 return 0;
2175}
2176
David S. Millera3138df2007-10-09 01:54:01 -07002177static int niu_determine_phy_disposition(struct niu *np)
2178{
2179 struct niu_parent *parent = np->parent;
2180 u8 plat_type = parent->plat_type;
2181 const struct niu_phy_template *tp;
2182 u32 phy_addr_off = 0;
2183
2184 if (plat_type == PLAT_TYPE_NIU) {
2185 tp = &phy_template_niu;
2186 phy_addr_off += np->port;
2187 } else {
Matheos Worku5fbd7e22008-02-28 21:25:43 -08002188 switch (np->flags &
2189 (NIU_FLAGS_10G |
2190 NIU_FLAGS_FIBER |
2191 NIU_FLAGS_XCVR_SERDES)) {
David S. Millera3138df2007-10-09 01:54:01 -07002192 case 0:
2193 /* 1G copper */
2194 tp = &phy_template_1g_copper;
2195 if (plat_type == PLAT_TYPE_VF_P0)
2196 phy_addr_off = 10;
2197 else if (plat_type == PLAT_TYPE_VF_P1)
2198 phy_addr_off = 26;
2199
2200 phy_addr_off += (np->port ^ 0x3);
2201 break;
2202
2203 case NIU_FLAGS_10G:
2204 /* 10G copper */
2205 tp = &phy_template_1g_copper;
2206 break;
2207
2208 case NIU_FLAGS_FIBER:
2209 /* 1G fiber */
2210 tp = &phy_template_1g_fiber;
2211 break;
2212
2213 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2214 /* 10G fiber */
2215 tp = &phy_template_10g_fiber;
2216 if (plat_type == PLAT_TYPE_VF_P0 ||
2217 plat_type == PLAT_TYPE_VF_P1)
2218 phy_addr_off = 8;
2219 phy_addr_off += np->port;
Matheos Workua5d6ab52008-04-24 21:09:20 -07002220 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2221 tp = &phy_template_10g_fiber_hotplug;
2222 if (np->port == 0)
2223 phy_addr_off = 8;
2224 if (np->port == 1)
2225 phy_addr_off = 12;
2226 }
David S. Millera3138df2007-10-09 01:54:01 -07002227 break;
2228
Matheos Worku5fbd7e22008-02-28 21:25:43 -08002229 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2230 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2231 case NIU_FLAGS_XCVR_SERDES:
2232 switch(np->port) {
2233 case 0:
2234 case 1:
2235 tp = &phy_template_10g_serdes;
2236 break;
2237 case 2:
2238 case 3:
2239 tp = &phy_template_1g_rgmii;
2240 break;
2241 default:
2242 return -EINVAL;
2243 break;
2244 }
2245 phy_addr_off = niu_atca_port_num[np->port];
2246 break;
2247
David S. Millera3138df2007-10-09 01:54:01 -07002248 default:
2249 return -EINVAL;
2250 }
2251 }
2252
2253 np->phy_ops = tp->ops;
2254 np->phy_addr = tp->phy_addr_base + phy_addr_off;
2255
2256 return 0;
2257}
2258
2259static int niu_init_link(struct niu *np)
2260{
2261 struct niu_parent *parent = np->parent;
2262 int err, ignore;
2263
2264 if (parent->plat_type == PLAT_TYPE_NIU) {
2265 err = niu_xcvr_init(np);
2266 if (err)
2267 return err;
2268 msleep(200);
2269 }
2270 err = niu_serdes_init(np);
2271 if (err)
2272 return err;
2273 msleep(200);
2274 err = niu_xcvr_init(np);
2275 if (!err)
2276 niu_link_status(np, &ignore);
2277 return 0;
2278}
2279
2280static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2281{
2282 u16 reg0 = addr[4] << 8 | addr[5];
2283 u16 reg1 = addr[2] << 8 | addr[3];
2284 u16 reg2 = addr[0] << 8 | addr[1];
2285
2286 if (np->flags & NIU_FLAGS_XMAC) {
2287 nw64_mac(XMAC_ADDR0, reg0);
2288 nw64_mac(XMAC_ADDR1, reg1);
2289 nw64_mac(XMAC_ADDR2, reg2);
2290 } else {
2291 nw64_mac(BMAC_ADDR0, reg0);
2292 nw64_mac(BMAC_ADDR1, reg1);
2293 nw64_mac(BMAC_ADDR2, reg2);
2294 }
2295}
2296
2297static int niu_num_alt_addr(struct niu *np)
2298{
2299 if (np->flags & NIU_FLAGS_XMAC)
2300 return XMAC_NUM_ALT_ADDR;
2301 else
2302 return BMAC_NUM_ALT_ADDR;
2303}
2304
2305static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2306{
2307 u16 reg0 = addr[4] << 8 | addr[5];
2308 u16 reg1 = addr[2] << 8 | addr[3];
2309 u16 reg2 = addr[0] << 8 | addr[1];
2310
2311 if (index >= niu_num_alt_addr(np))
2312 return -EINVAL;
2313
2314 if (np->flags & NIU_FLAGS_XMAC) {
2315 nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2316 nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2317 nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2318 } else {
2319 nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2320 nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2321 nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2322 }
2323
2324 return 0;
2325}
2326
2327static int niu_enable_alt_mac(struct niu *np, int index, int on)
2328{
2329 unsigned long reg;
2330 u64 val, mask;
2331
2332 if (index >= niu_num_alt_addr(np))
2333 return -EINVAL;
2334
Matheos Workufa907892008-02-20 00:18:09 -08002335 if (np->flags & NIU_FLAGS_XMAC) {
David S. Millera3138df2007-10-09 01:54:01 -07002336 reg = XMAC_ADDR_CMPEN;
Matheos Workufa907892008-02-20 00:18:09 -08002337 mask = 1 << index;
2338 } else {
David S. Millera3138df2007-10-09 01:54:01 -07002339 reg = BMAC_ADDR_CMPEN;
Matheos Workufa907892008-02-20 00:18:09 -08002340 mask = 1 << (index + 1);
2341 }
David S. Millera3138df2007-10-09 01:54:01 -07002342
2343 val = nr64_mac(reg);
2344 if (on)
2345 val |= mask;
2346 else
2347 val &= ~mask;
2348 nw64_mac(reg, val);
2349
2350 return 0;
2351}
2352
2353static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2354 int num, int mac_pref)
2355{
2356 u64 val = nr64_mac(reg);
2357 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2358 val |= num;
2359 if (mac_pref)
2360 val |= HOST_INFO_MPR;
2361 nw64_mac(reg, val);
2362}
2363
2364static int __set_rdc_table_num(struct niu *np,
2365 int xmac_index, int bmac_index,
2366 int rdc_table_num, int mac_pref)
2367{
2368 unsigned long reg;
2369
2370 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2371 return -EINVAL;
2372 if (np->flags & NIU_FLAGS_XMAC)
2373 reg = XMAC_HOST_INFO(xmac_index);
2374 else
2375 reg = BMAC_HOST_INFO(bmac_index);
2376 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2377 return 0;
2378}
2379
2380static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2381 int mac_pref)
2382{
2383 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2384}
2385
2386static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2387 int mac_pref)
2388{
2389 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2390}
2391
2392static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2393 int table_num, int mac_pref)
2394{
2395 if (idx >= niu_num_alt_addr(np))
2396 return -EINVAL;
2397 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2398}
2399
2400static u64 vlan_entry_set_parity(u64 reg_val)
2401{
2402 u64 port01_mask;
2403 u64 port23_mask;
2404
2405 port01_mask = 0x00ff;
2406 port23_mask = 0xff00;
2407
2408 if (hweight64(reg_val & port01_mask) & 1)
2409 reg_val |= ENET_VLAN_TBL_PARITY0;
2410 else
2411 reg_val &= ~ENET_VLAN_TBL_PARITY0;
2412
2413 if (hweight64(reg_val & port23_mask) & 1)
2414 reg_val |= ENET_VLAN_TBL_PARITY1;
2415 else
2416 reg_val &= ~ENET_VLAN_TBL_PARITY1;
2417
2418 return reg_val;
2419}
2420
2421static void vlan_tbl_write(struct niu *np, unsigned long index,
2422 int port, int vpr, int rdc_table)
2423{
2424 u64 reg_val = nr64(ENET_VLAN_TBL(index));
2425
2426 reg_val &= ~((ENET_VLAN_TBL_VPR |
2427 ENET_VLAN_TBL_VLANRDCTBLN) <<
2428 ENET_VLAN_TBL_SHIFT(port));
2429 if (vpr)
2430 reg_val |= (ENET_VLAN_TBL_VPR <<
2431 ENET_VLAN_TBL_SHIFT(port));
2432 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2433
2434 reg_val = vlan_entry_set_parity(reg_val);
2435
2436 nw64(ENET_VLAN_TBL(index), reg_val);
2437}
2438
2439static void vlan_tbl_clear(struct niu *np)
2440{
2441 int i;
2442
2443 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2444 nw64(ENET_VLAN_TBL(i), 0);
2445}
2446
2447static int tcam_wait_bit(struct niu *np, u64 bit)
2448{
2449 int limit = 1000;
2450
2451 while (--limit > 0) {
2452 if (nr64(TCAM_CTL) & bit)
2453 break;
2454 udelay(1);
2455 }
2456 if (limit < 0)
2457 return -ENODEV;
2458
2459 return 0;
2460}
2461
2462static int tcam_flush(struct niu *np, int index)
2463{
2464 nw64(TCAM_KEY_0, 0x00);
2465 nw64(TCAM_KEY_MASK_0, 0xff);
2466 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2467
2468 return tcam_wait_bit(np, TCAM_CTL_STAT);
2469}
2470
2471#if 0
2472static int tcam_read(struct niu *np, int index,
2473 u64 *key, u64 *mask)
2474{
2475 int err;
2476
2477 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2478 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2479 if (!err) {
2480 key[0] = nr64(TCAM_KEY_0);
2481 key[1] = nr64(TCAM_KEY_1);
2482 key[2] = nr64(TCAM_KEY_2);
2483 key[3] = nr64(TCAM_KEY_3);
2484 mask[0] = nr64(TCAM_KEY_MASK_0);
2485 mask[1] = nr64(TCAM_KEY_MASK_1);
2486 mask[2] = nr64(TCAM_KEY_MASK_2);
2487 mask[3] = nr64(TCAM_KEY_MASK_3);
2488 }
2489 return err;
2490}
2491#endif
2492
2493static int tcam_write(struct niu *np, int index,
2494 u64 *key, u64 *mask)
2495{
2496 nw64(TCAM_KEY_0, key[0]);
2497 nw64(TCAM_KEY_1, key[1]);
2498 nw64(TCAM_KEY_2, key[2]);
2499 nw64(TCAM_KEY_3, key[3]);
2500 nw64(TCAM_KEY_MASK_0, mask[0]);
2501 nw64(TCAM_KEY_MASK_1, mask[1]);
2502 nw64(TCAM_KEY_MASK_2, mask[2]);
2503 nw64(TCAM_KEY_MASK_3, mask[3]);
2504 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2505
2506 return tcam_wait_bit(np, TCAM_CTL_STAT);
2507}
2508
2509#if 0
2510static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2511{
2512 int err;
2513
2514 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2515 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2516 if (!err)
2517 *data = nr64(TCAM_KEY_1);
2518
2519 return err;
2520}
2521#endif
2522
2523static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2524{
2525 nw64(TCAM_KEY_1, assoc_data);
2526 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2527
2528 return tcam_wait_bit(np, TCAM_CTL_STAT);
2529}
2530
2531static void tcam_enable(struct niu *np, int on)
2532{
2533 u64 val = nr64(FFLP_CFG_1);
2534
2535 if (on)
2536 val &= ~FFLP_CFG_1_TCAM_DIS;
2537 else
2538 val |= FFLP_CFG_1_TCAM_DIS;
2539 nw64(FFLP_CFG_1, val);
2540}
2541
2542static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2543{
2544 u64 val = nr64(FFLP_CFG_1);
2545
2546 val &= ~(FFLP_CFG_1_FFLPINITDONE |
2547 FFLP_CFG_1_CAMLAT |
2548 FFLP_CFG_1_CAMRATIO);
2549 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2550 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2551 nw64(FFLP_CFG_1, val);
2552
2553 val = nr64(FFLP_CFG_1);
2554 val |= FFLP_CFG_1_FFLPINITDONE;
2555 nw64(FFLP_CFG_1, val);
2556}
2557
2558static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2559 int on)
2560{
2561 unsigned long reg;
2562 u64 val;
2563
2564 if (class < CLASS_CODE_ETHERTYPE1 ||
2565 class > CLASS_CODE_ETHERTYPE2)
2566 return -EINVAL;
2567
2568 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2569 val = nr64(reg);
2570 if (on)
2571 val |= L2_CLS_VLD;
2572 else
2573 val &= ~L2_CLS_VLD;
2574 nw64(reg, val);
2575
2576 return 0;
2577}
2578
2579#if 0
2580static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2581 u64 ether_type)
2582{
2583 unsigned long reg;
2584 u64 val;
2585
2586 if (class < CLASS_CODE_ETHERTYPE1 ||
2587 class > CLASS_CODE_ETHERTYPE2 ||
2588 (ether_type & ~(u64)0xffff) != 0)
2589 return -EINVAL;
2590
2591 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2592 val = nr64(reg);
2593 val &= ~L2_CLS_ETYPE;
2594 val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2595 nw64(reg, val);
2596
2597 return 0;
2598}
2599#endif
2600
2601static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2602 int on)
2603{
2604 unsigned long reg;
2605 u64 val;
2606
2607 if (class < CLASS_CODE_USER_PROG1 ||
2608 class > CLASS_CODE_USER_PROG4)
2609 return -EINVAL;
2610
2611 reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2612 val = nr64(reg);
2613 if (on)
2614 val |= L3_CLS_VALID;
2615 else
2616 val &= ~L3_CLS_VALID;
2617 nw64(reg, val);
2618
2619 return 0;
2620}
2621
2622#if 0
2623static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2624 int ipv6, u64 protocol_id,
2625 u64 tos_mask, u64 tos_val)
2626{
2627 unsigned long reg;
2628 u64 val;
2629
2630 if (class < CLASS_CODE_USER_PROG1 ||
2631 class > CLASS_CODE_USER_PROG4 ||
2632 (protocol_id & ~(u64)0xff) != 0 ||
2633 (tos_mask & ~(u64)0xff) != 0 ||
2634 (tos_val & ~(u64)0xff) != 0)
2635 return -EINVAL;
2636
2637 reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2638 val = nr64(reg);
2639 val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2640 L3_CLS_TOSMASK | L3_CLS_TOS);
2641 if (ipv6)
2642 val |= L3_CLS_IPVER;
2643 val |= (protocol_id << L3_CLS_PID_SHIFT);
2644 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2645 val |= (tos_val << L3_CLS_TOS_SHIFT);
2646 nw64(reg, val);
2647
2648 return 0;
2649}
2650#endif
2651
2652static int tcam_early_init(struct niu *np)
2653{
2654 unsigned long i;
2655 int err;
2656
2657 tcam_enable(np, 0);
2658 tcam_set_lat_and_ratio(np,
2659 DEFAULT_TCAM_LATENCY,
2660 DEFAULT_TCAM_ACCESS_RATIO);
2661 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2662 err = tcam_user_eth_class_enable(np, i, 0);
2663 if (err)
2664 return err;
2665 }
2666 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2667 err = tcam_user_ip_class_enable(np, i, 0);
2668 if (err)
2669 return err;
2670 }
2671
2672 return 0;
2673}
2674
2675static int tcam_flush_all(struct niu *np)
2676{
2677 unsigned long i;
2678
2679 for (i = 0; i < np->parent->tcam_num_entries; i++) {
2680 int err = tcam_flush(np, i);
2681 if (err)
2682 return err;
2683 }
2684 return 0;
2685}
2686
2687static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2688{
2689 return ((u64)index | (num_entries == 1 ?
2690 HASH_TBL_ADDR_AUTOINC : 0));
2691}
2692
2693#if 0
2694static int hash_read(struct niu *np, unsigned long partition,
2695 unsigned long index, unsigned long num_entries,
2696 u64 *data)
2697{
2698 u64 val = hash_addr_regval(index, num_entries);
2699 unsigned long i;
2700
2701 if (partition >= FCRAM_NUM_PARTITIONS ||
2702 index + num_entries > FCRAM_SIZE)
2703 return -EINVAL;
2704
2705 nw64(HASH_TBL_ADDR(partition), val);
2706 for (i = 0; i < num_entries; i++)
2707 data[i] = nr64(HASH_TBL_DATA(partition));
2708
2709 return 0;
2710}
2711#endif
2712
2713static int hash_write(struct niu *np, unsigned long partition,
2714 unsigned long index, unsigned long num_entries,
2715 u64 *data)
2716{
2717 u64 val = hash_addr_regval(index, num_entries);
2718 unsigned long i;
2719
2720 if (partition >= FCRAM_NUM_PARTITIONS ||
2721 index + (num_entries * 8) > FCRAM_SIZE)
2722 return -EINVAL;
2723
2724 nw64(HASH_TBL_ADDR(partition), val);
2725 for (i = 0; i < num_entries; i++)
2726 nw64(HASH_TBL_DATA(partition), data[i]);
2727
2728 return 0;
2729}
2730
2731static void fflp_reset(struct niu *np)
2732{
2733 u64 val;
2734
2735 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
2736 udelay(10);
2737 nw64(FFLP_CFG_1, 0);
2738
2739 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
2740 nw64(FFLP_CFG_1, val);
2741}
2742
2743static void fflp_set_timings(struct niu *np)
2744{
2745 u64 val = nr64(FFLP_CFG_1);
2746
2747 val &= ~FFLP_CFG_1_FFLPINITDONE;
2748 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
2749 nw64(FFLP_CFG_1, val);
2750
2751 val = nr64(FFLP_CFG_1);
2752 val |= FFLP_CFG_1_FFLPINITDONE;
2753 nw64(FFLP_CFG_1, val);
2754
2755 val = nr64(FCRAM_REF_TMR);
2756 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
2757 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
2758 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
2759 nw64(FCRAM_REF_TMR, val);
2760}
2761
2762static int fflp_set_partition(struct niu *np, u64 partition,
2763 u64 mask, u64 base, int enable)
2764{
2765 unsigned long reg;
2766 u64 val;
2767
2768 if (partition >= FCRAM_NUM_PARTITIONS ||
2769 (mask & ~(u64)0x1f) != 0 ||
2770 (base & ~(u64)0x1f) != 0)
2771 return -EINVAL;
2772
2773 reg = FLW_PRT_SEL(partition);
2774
2775 val = nr64(reg);
2776 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
2777 val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
2778 val |= (base << FLW_PRT_SEL_BASE_SHIFT);
2779 if (enable)
2780 val |= FLW_PRT_SEL_EXT;
2781 nw64(reg, val);
2782
2783 return 0;
2784}
2785
2786static int fflp_disable_all_partitions(struct niu *np)
2787{
2788 unsigned long i;
2789
2790 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
2791 int err = fflp_set_partition(np, 0, 0, 0, 0);
2792 if (err)
2793 return err;
2794 }
2795 return 0;
2796}
2797
2798static void fflp_llcsnap_enable(struct niu *np, int on)
2799{
2800 u64 val = nr64(FFLP_CFG_1);
2801
2802 if (on)
2803 val |= FFLP_CFG_1_LLCSNAP;
2804 else
2805 val &= ~FFLP_CFG_1_LLCSNAP;
2806 nw64(FFLP_CFG_1, val);
2807}
2808
2809static void fflp_errors_enable(struct niu *np, int on)
2810{
2811 u64 val = nr64(FFLP_CFG_1);
2812
2813 if (on)
2814 val &= ~FFLP_CFG_1_ERRORDIS;
2815 else
2816 val |= FFLP_CFG_1_ERRORDIS;
2817 nw64(FFLP_CFG_1, val);
2818}
2819
2820static int fflp_hash_clear(struct niu *np)
2821{
2822 struct fcram_hash_ipv4 ent;
2823 unsigned long i;
2824
2825 /* IPV4 hash entry with valid bit clear, rest is don't care. */
2826 memset(&ent, 0, sizeof(ent));
2827 ent.header = HASH_HEADER_EXT;
2828
2829 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
2830 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
2831 if (err)
2832 return err;
2833 }
2834 return 0;
2835}
2836
2837static int fflp_early_init(struct niu *np)
2838{
2839 struct niu_parent *parent;
2840 unsigned long flags;
2841 int err;
2842
2843 niu_lock_parent(np, flags);
2844
2845 parent = np->parent;
2846 err = 0;
2847 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
2848 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
2849 np->port);
2850 if (np->parent->plat_type != PLAT_TYPE_NIU) {
2851 fflp_reset(np);
2852 fflp_set_timings(np);
2853 err = fflp_disable_all_partitions(np);
2854 if (err) {
2855 niudbg(PROBE, "fflp_disable_all_partitions "
2856 "failed, err=%d\n", err);
2857 goto out;
2858 }
2859 }
2860
2861 err = tcam_early_init(np);
2862 if (err) {
2863 niudbg(PROBE, "tcam_early_init failed, err=%d\n",
2864 err);
2865 goto out;
2866 }
2867 fflp_llcsnap_enable(np, 1);
2868 fflp_errors_enable(np, 0);
2869 nw64(H1POLY, 0);
2870 nw64(H2POLY, 0);
2871
2872 err = tcam_flush_all(np);
2873 if (err) {
2874 niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
2875 err);
2876 goto out;
2877 }
2878 if (np->parent->plat_type != PLAT_TYPE_NIU) {
2879 err = fflp_hash_clear(np);
2880 if (err) {
2881 niudbg(PROBE, "fflp_hash_clear failed, "
2882 "err=%d\n", err);
2883 goto out;
2884 }
2885 }
2886
2887 vlan_tbl_clear(np);
2888
2889 niudbg(PROBE, "fflp_early_init: Success\n");
2890 parent->flags |= PARENT_FLGS_CLS_HWINIT;
2891 }
2892out:
2893 niu_unlock_parent(np, flags);
2894 return err;
2895}
2896
2897static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
2898{
2899 if (class_code < CLASS_CODE_USER_PROG1 ||
2900 class_code > CLASS_CODE_SCTP_IPV6)
2901 return -EINVAL;
2902
2903 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
2904 return 0;
2905}
2906
2907static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
2908{
2909 if (class_code < CLASS_CODE_USER_PROG1 ||
2910 class_code > CLASS_CODE_SCTP_IPV6)
2911 return -EINVAL;
2912
2913 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
2914 return 0;
2915}
2916
2917static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
2918 u32 offset, u32 size)
2919{
2920 int i = skb_shinfo(skb)->nr_frags;
2921 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2922
2923 frag->page = page;
2924 frag->page_offset = offset;
2925 frag->size = size;
2926
2927 skb->len += size;
2928 skb->data_len += size;
2929 skb->truesize += size;
2930
2931 skb_shinfo(skb)->nr_frags = i + 1;
2932}
2933
2934static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
2935{
2936 a >>= PAGE_SHIFT;
2937 a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
2938
2939 return (a & (MAX_RBR_RING_SIZE - 1));
2940}
2941
2942static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
2943 struct page ***link)
2944{
2945 unsigned int h = niu_hash_rxaddr(rp, addr);
2946 struct page *p, **pp;
2947
2948 addr &= PAGE_MASK;
2949 pp = &rp->rxhash[h];
2950 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
2951 if (p->index == addr) {
2952 *link = pp;
2953 break;
2954 }
2955 }
2956
2957 return p;
2958}
2959
2960static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
2961{
2962 unsigned int h = niu_hash_rxaddr(rp, base);
2963
2964 page->index = base;
2965 page->mapping = (struct address_space *) rp->rxhash[h];
2966 rp->rxhash[h] = page;
2967}
2968
2969static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
2970 gfp_t mask, int start_index)
2971{
2972 struct page *page;
2973 u64 addr;
2974 int i;
2975
2976 page = alloc_page(mask);
2977 if (!page)
2978 return -ENOMEM;
2979
2980 addr = np->ops->map_page(np->device, page, 0,
2981 PAGE_SIZE, DMA_FROM_DEVICE);
2982
2983 niu_hash_page(rp, page, addr);
2984 if (rp->rbr_blocks_per_page > 1)
2985 atomic_add(rp->rbr_blocks_per_page - 1,
2986 &compound_head(page)->_count);
2987
2988 for (i = 0; i < rp->rbr_blocks_per_page; i++) {
2989 __le32 *rbr = &rp->rbr[start_index + i];
2990
2991 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
2992 addr += rp->rbr_block_size;
2993 }
2994
2995 return 0;
2996}
2997
2998static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
2999{
3000 int index = rp->rbr_index;
3001
3002 rp->rbr_pending++;
3003 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3004 int err = niu_rbr_add_page(np, rp, mask, index);
3005
3006 if (unlikely(err)) {
3007 rp->rbr_pending--;
3008 return;
3009 }
3010
3011 rp->rbr_index += rp->rbr_blocks_per_page;
3012 BUG_ON(rp->rbr_index > rp->rbr_table_size);
3013 if (rp->rbr_index == rp->rbr_table_size)
3014 rp->rbr_index = 0;
3015
3016 if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3017 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3018 rp->rbr_pending = 0;
3019 }
3020 }
3021}
3022
3023static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3024{
3025 unsigned int index = rp->rcr_index;
3026 int num_rcr = 0;
3027
3028 rp->rx_dropped++;
3029 while (1) {
3030 struct page *page, **link;
3031 u64 addr, val;
3032 u32 rcr_size;
3033
3034 num_rcr++;
3035
3036 val = le64_to_cpup(&rp->rcr[index]);
3037 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3038 RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3039 page = niu_find_rxpage(rp, addr, &link);
3040
3041 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3042 RCR_ENTRY_PKTBUFSZ_SHIFT];
3043 if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3044 *link = (struct page *) page->mapping;
3045 np->ops->unmap_page(np->device, page->index,
3046 PAGE_SIZE, DMA_FROM_DEVICE);
3047 page->index = 0;
3048 page->mapping = NULL;
3049 __free_page(page);
3050 rp->rbr_refill_pending++;
3051 }
3052
3053 index = NEXT_RCR(rp, index);
3054 if (!(val & RCR_ENTRY_MULTI))
3055 break;
3056
3057 }
3058 rp->rcr_index = index;
3059
3060 return num_rcr;
3061}
3062
3063static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3064{
3065 unsigned int index = rp->rcr_index;
3066 struct sk_buff *skb;
3067 int len, num_rcr;
3068
3069 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3070 if (unlikely(!skb))
3071 return niu_rx_pkt_ignore(np, rp);
3072
3073 num_rcr = 0;
3074 while (1) {
3075 struct page *page, **link;
3076 u32 rcr_size, append_size;
3077 u64 addr, val, off;
3078
3079 num_rcr++;
3080
3081 val = le64_to_cpup(&rp->rcr[index]);
3082
3083 len = (val & RCR_ENTRY_L2_LEN) >>
3084 RCR_ENTRY_L2_LEN_SHIFT;
3085 len -= ETH_FCS_LEN;
3086
3087 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3088 RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3089 page = niu_find_rxpage(rp, addr, &link);
3090
3091 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3092 RCR_ENTRY_PKTBUFSZ_SHIFT];
3093
3094 off = addr & ~PAGE_MASK;
3095 append_size = rcr_size;
3096 if (num_rcr == 1) {
3097 int ptype;
3098
3099 off += 2;
3100 append_size -= 2;
3101
3102 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3103 if ((ptype == RCR_PKT_TYPE_TCP ||
3104 ptype == RCR_PKT_TYPE_UDP) &&
3105 !(val & (RCR_ENTRY_NOPORT |
3106 RCR_ENTRY_ERROR)))
3107 skb->ip_summed = CHECKSUM_UNNECESSARY;
3108 else
3109 skb->ip_summed = CHECKSUM_NONE;
3110 }
3111 if (!(val & RCR_ENTRY_MULTI))
3112 append_size = len - skb->len;
3113
3114 niu_rx_skb_append(skb, page, off, append_size);
3115 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3116 *link = (struct page *) page->mapping;
3117 np->ops->unmap_page(np->device, page->index,
3118 PAGE_SIZE, DMA_FROM_DEVICE);
3119 page->index = 0;
3120 page->mapping = NULL;
3121 rp->rbr_refill_pending++;
3122 } else
3123 get_page(page);
3124
3125 index = NEXT_RCR(rp, index);
3126 if (!(val & RCR_ENTRY_MULTI))
3127 break;
3128
3129 }
3130 rp->rcr_index = index;
3131
3132 skb_reserve(skb, NET_IP_ALIGN);
3133 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
3134
3135 rp->rx_packets++;
3136 rp->rx_bytes += skb->len;
3137
3138 skb->protocol = eth_type_trans(skb, np->dev);
3139 netif_receive_skb(skb);
3140
David S. Miller792dd902008-01-04 23:52:06 -08003141 np->dev->last_rx = jiffies;
3142
David S. Millera3138df2007-10-09 01:54:01 -07003143 return num_rcr;
3144}
3145
3146static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3147{
3148 int blocks_per_page = rp->rbr_blocks_per_page;
3149 int err, index = rp->rbr_index;
3150
3151 err = 0;
3152 while (index < (rp->rbr_table_size - blocks_per_page)) {
3153 err = niu_rbr_add_page(np, rp, mask, index);
3154 if (err)
3155 break;
3156
3157 index += blocks_per_page;
3158 }
3159
3160 rp->rbr_index = index;
3161 return err;
3162}
3163
3164static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3165{
3166 int i;
3167
3168 for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3169 struct page *page;
3170
3171 page = rp->rxhash[i];
3172 while (page) {
3173 struct page *next = (struct page *) page->mapping;
3174 u64 base = page->index;
3175
3176 np->ops->unmap_page(np->device, base, PAGE_SIZE,
3177 DMA_FROM_DEVICE);
3178 page->index = 0;
3179 page->mapping = NULL;
3180
3181 __free_page(page);
3182
3183 page = next;
3184 }
3185 }
3186
3187 for (i = 0; i < rp->rbr_table_size; i++)
3188 rp->rbr[i] = cpu_to_le32(0);
3189 rp->rbr_index = 0;
3190}
3191
3192static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3193{
3194 struct tx_buff_info *tb = &rp->tx_buffs[idx];
3195 struct sk_buff *skb = tb->skb;
3196 struct tx_pkt_hdr *tp;
3197 u64 tx_flags;
3198 int i, len;
3199
3200 tp = (struct tx_pkt_hdr *) skb->data;
3201 tx_flags = le64_to_cpup(&tp->flags);
3202
3203 rp->tx_packets++;
3204 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3205 ((tx_flags & TXHDR_PAD) / 2));
3206
3207 len = skb_headlen(skb);
3208 np->ops->unmap_single(np->device, tb->mapping,
3209 len, DMA_TO_DEVICE);
3210
3211 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3212 rp->mark_pending--;
3213
3214 tb->skb = NULL;
3215 do {
3216 idx = NEXT_TX(rp, idx);
3217 len -= MAX_TX_DESC_LEN;
3218 } while (len > 0);
3219
3220 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3221 tb = &rp->tx_buffs[idx];
3222 BUG_ON(tb->skb != NULL);
3223 np->ops->unmap_page(np->device, tb->mapping,
3224 skb_shinfo(skb)->frags[i].size,
3225 DMA_TO_DEVICE);
3226 idx = NEXT_TX(rp, idx);
3227 }
3228
3229 dev_kfree_skb(skb);
3230
3231 return idx;
3232}
3233
3234#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3235
3236static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3237{
David S. Millerb4c21632008-07-15 03:48:19 -07003238 struct netdev_queue *txq;
David S. Millera3138df2007-10-09 01:54:01 -07003239 u16 pkt_cnt, tmp;
David S. Millerb4c21632008-07-15 03:48:19 -07003240 int cons, index;
David S. Millera3138df2007-10-09 01:54:01 -07003241 u64 cs;
3242
David S. Millerb4c21632008-07-15 03:48:19 -07003243 index = (rp - np->tx_rings);
3244 txq = netdev_get_tx_queue(np->dev, index);
3245
David S. Millera3138df2007-10-09 01:54:01 -07003246 cs = rp->tx_cs;
3247 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3248 goto out;
3249
3250 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3251 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3252 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3253
3254 rp->last_pkt_cnt = tmp;
3255
3256 cons = rp->cons;
3257
3258 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3259 np->dev->name, pkt_cnt, cons);
3260
3261 while (pkt_cnt--)
3262 cons = release_tx_packet(np, rp, cons);
3263
3264 rp->cons = cons;
3265 smp_mb();
3266
3267out:
David S. Millerb4c21632008-07-15 03:48:19 -07003268 if (unlikely(netif_tx_queue_stopped(txq) &&
David S. Millera3138df2007-10-09 01:54:01 -07003269 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
David S. Millerb4c21632008-07-15 03:48:19 -07003270 __netif_tx_lock(txq, smp_processor_id());
3271 if (netif_tx_queue_stopped(txq) &&
David S. Millera3138df2007-10-09 01:54:01 -07003272 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
David S. Millerb4c21632008-07-15 03:48:19 -07003273 netif_tx_wake_queue(txq);
3274 __netif_tx_unlock(txq);
David S. Millera3138df2007-10-09 01:54:01 -07003275 }
3276}
3277
3278static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3279{
3280 int qlen, rcr_done = 0, work_done = 0;
3281 struct rxdma_mailbox *mbox = rp->mbox;
3282 u64 stat;
3283
3284#if 1
3285 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3286 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3287#else
3288 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3289 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3290#endif
3291 mbox->rx_dma_ctl_stat = 0;
3292 mbox->rcrstat_a = 0;
3293
3294 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3295 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3296
3297 rcr_done = work_done = 0;
3298 qlen = min(qlen, budget);
3299 while (work_done < qlen) {
3300 rcr_done += niu_process_rx_pkt(np, rp);
3301 work_done++;
3302 }
3303
3304 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3305 unsigned int i;
3306
3307 for (i = 0; i < rp->rbr_refill_pending; i++)
3308 niu_rbr_refill(np, rp, GFP_ATOMIC);
3309 rp->rbr_refill_pending = 0;
3310 }
3311
3312 stat = (RX_DMA_CTL_STAT_MEX |
3313 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3314 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3315
3316 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3317
3318 return work_done;
3319}
3320
3321static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3322{
3323 u64 v0 = lp->v0;
3324 u32 tx_vec = (v0 >> 32);
3325 u32 rx_vec = (v0 & 0xffffffff);
3326 int i, work_done = 0;
3327
3328 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3329 np->dev->name, (unsigned long long) v0);
3330
3331 for (i = 0; i < np->num_tx_rings; i++) {
3332 struct tx_ring_info *rp = &np->tx_rings[i];
3333 if (tx_vec & (1 << rp->tx_channel))
3334 niu_tx_work(np, rp);
3335 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3336 }
3337
3338 for (i = 0; i < np->num_rx_rings; i++) {
3339 struct rx_ring_info *rp = &np->rx_rings[i];
3340
3341 if (rx_vec & (1 << rp->rx_channel)) {
3342 int this_work_done;
3343
3344 this_work_done = niu_rx_work(np, rp,
3345 budget);
3346
3347 budget -= this_work_done;
3348 work_done += this_work_done;
3349 }
3350 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3351 }
3352
3353 return work_done;
3354}
3355
3356static int niu_poll(struct napi_struct *napi, int budget)
3357{
3358 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3359 struct niu *np = lp->np;
3360 int work_done;
3361
3362 work_done = niu_poll_core(np, lp, budget);
3363
3364 if (work_done < budget) {
3365 netif_rx_complete(np->dev, napi);
3366 niu_ldg_rearm(np, lp, 1);
3367 }
3368 return work_done;
3369}
3370
3371static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3372 u64 stat)
3373{
3374 dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3375 np->dev->name, rp->rx_channel);
3376
3377 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3378 printk("RBR_TMOUT ");
3379 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3380 printk("RSP_CNT ");
3381 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3382 printk("BYTE_EN_BUS ");
3383 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3384 printk("RSP_DAT ");
3385 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3386 printk("RCR_ACK ");
3387 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3388 printk("RCR_SHA_PAR ");
3389 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3390 printk("RBR_PRE_PAR ");
3391 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3392 printk("CONFIG ");
3393 if (stat & RX_DMA_CTL_STAT_RCRINCON)
3394 printk("RCRINCON ");
3395 if (stat & RX_DMA_CTL_STAT_RCRFULL)
3396 printk("RCRFULL ");
3397 if (stat & RX_DMA_CTL_STAT_RBRFULL)
3398 printk("RBRFULL ");
3399 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3400 printk("RBRLOGPAGE ");
3401 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3402 printk("CFIGLOGPAGE ");
3403 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3404 printk("DC_FIDO ");
3405
3406 printk(")\n");
3407}
3408
3409static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3410{
3411 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3412 int err = 0;
3413
David S. Millera3138df2007-10-09 01:54:01 -07003414
3415 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3416 RX_DMA_CTL_STAT_PORT_FATAL))
3417 err = -EINVAL;
3418
Matheos Worku406f3532008-01-04 23:48:26 -08003419 if (err) {
3420 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3421 np->dev->name, rp->rx_channel,
3422 (unsigned long long) stat);
3423
3424 niu_log_rxchan_errors(np, rp, stat);
3425 }
3426
David S. Millera3138df2007-10-09 01:54:01 -07003427 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3428 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3429
3430 return err;
3431}
3432
3433static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3434 u64 cs)
3435{
3436 dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3437 np->dev->name, rp->tx_channel);
3438
3439 if (cs & TX_CS_MBOX_ERR)
3440 printk("MBOX ");
3441 if (cs & TX_CS_PKT_SIZE_ERR)
3442 printk("PKT_SIZE ");
3443 if (cs & TX_CS_TX_RING_OFLOW)
3444 printk("TX_RING_OFLOW ");
3445 if (cs & TX_CS_PREF_BUF_PAR_ERR)
3446 printk("PREF_BUF_PAR ");
3447 if (cs & TX_CS_NACK_PREF)
3448 printk("NACK_PREF ");
3449 if (cs & TX_CS_NACK_PKT_RD)
3450 printk("NACK_PKT_RD ");
3451 if (cs & TX_CS_CONF_PART_ERR)
3452 printk("CONF_PART ");
3453 if (cs & TX_CS_PKT_PRT_ERR)
3454 printk("PKT_PTR ");
3455
3456 printk(")\n");
3457}
3458
3459static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3460{
3461 u64 cs, logh, logl;
3462
3463 cs = nr64(TX_CS(rp->tx_channel));
3464 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3465 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3466
3467 dev_err(np->device, PFX "%s: TX channel %u error, "
3468 "cs[%llx] logh[%llx] logl[%llx]\n",
3469 np->dev->name, rp->tx_channel,
3470 (unsigned long long) cs,
3471 (unsigned long long) logh,
3472 (unsigned long long) logl);
3473
3474 niu_log_txchan_errors(np, rp, cs);
3475
3476 return -ENODEV;
3477}
3478
3479static int niu_mif_interrupt(struct niu *np)
3480{
3481 u64 mif_status = nr64(MIF_STATUS);
3482 int phy_mdint = 0;
3483
3484 if (np->flags & NIU_FLAGS_XMAC) {
3485 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3486
3487 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3488 phy_mdint = 1;
3489 }
3490
3491 dev_err(np->device, PFX "%s: MIF interrupt, "
3492 "stat[%llx] phy_mdint(%d)\n",
3493 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3494
3495 return -ENODEV;
3496}
3497
3498static void niu_xmac_interrupt(struct niu *np)
3499{
3500 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3501 u64 val;
3502
3503 val = nr64_mac(XTXMAC_STATUS);
3504 if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3505 mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3506 if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3507 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3508 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3509 mp->tx_fifo_errors++;
3510 if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3511 mp->tx_overflow_errors++;
3512 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3513 mp->tx_max_pkt_size_errors++;
3514 if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3515 mp->tx_underflow_errors++;
3516
3517 val = nr64_mac(XRXMAC_STATUS);
3518 if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3519 mp->rx_local_faults++;
3520 if (val & XRXMAC_STATUS_RFLT_DET)
3521 mp->rx_remote_faults++;
3522 if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3523 mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3524 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3525 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3526 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3527 mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3528 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3529 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3530 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3531 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3532 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3533 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3534 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3535 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3536 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3537 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3538 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3539 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3540 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3541 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3542 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3543 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3544 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3545 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3546 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3547 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3548 if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
3549 mp->rx_octets += RXMAC_BT_CNT_COUNT;
3550 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3551 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3552 if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3553 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3554 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3555 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3556 if (val & XRXMAC_STATUS_RXUFLOW)
3557 mp->rx_underflows++;
3558 if (val & XRXMAC_STATUS_RXOFLOW)
3559 mp->rx_overflows++;
3560
3561 val = nr64_mac(XMAC_FC_STAT);
3562 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3563 mp->pause_off_state++;
3564 if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3565 mp->pause_on_state++;
3566 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3567 mp->pause_received++;
3568}
3569
3570static void niu_bmac_interrupt(struct niu *np)
3571{
3572 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3573 u64 val;
3574
3575 val = nr64_mac(BTXMAC_STATUS);
3576 if (val & BTXMAC_STATUS_UNDERRUN)
3577 mp->tx_underflow_errors++;
3578 if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3579 mp->tx_max_pkt_size_errors++;
3580 if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3581 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3582 if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3583 mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3584
3585 val = nr64_mac(BRXMAC_STATUS);
3586 if (val & BRXMAC_STATUS_OVERFLOW)
3587 mp->rx_overflows++;
3588 if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3589 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3590 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3591 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3592 if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3593 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3594 if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3595 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3596
3597 val = nr64_mac(BMAC_CTRL_STATUS);
3598 if (val & BMAC_CTRL_STATUS_NOPAUSE)
3599 mp->pause_off_state++;
3600 if (val & BMAC_CTRL_STATUS_PAUSE)
3601 mp->pause_on_state++;
3602 if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3603 mp->pause_received++;
3604}
3605
3606static int niu_mac_interrupt(struct niu *np)
3607{
3608 if (np->flags & NIU_FLAGS_XMAC)
3609 niu_xmac_interrupt(np);
3610 else
3611 niu_bmac_interrupt(np);
3612
3613 return 0;
3614}
3615
3616static void niu_log_device_error(struct niu *np, u64 stat)
3617{
3618 dev_err(np->device, PFX "%s: Core device errors ( ",
3619 np->dev->name);
3620
3621 if (stat & SYS_ERR_MASK_META2)
3622 printk("META2 ");
3623 if (stat & SYS_ERR_MASK_META1)
3624 printk("META1 ");
3625 if (stat & SYS_ERR_MASK_PEU)
3626 printk("PEU ");
3627 if (stat & SYS_ERR_MASK_TXC)
3628 printk("TXC ");
3629 if (stat & SYS_ERR_MASK_RDMC)
3630 printk("RDMC ");
3631 if (stat & SYS_ERR_MASK_TDMC)
3632 printk("TDMC ");
3633 if (stat & SYS_ERR_MASK_ZCP)
3634 printk("ZCP ");
3635 if (stat & SYS_ERR_MASK_FFLP)
3636 printk("FFLP ");
3637 if (stat & SYS_ERR_MASK_IPP)
3638 printk("IPP ");
3639 if (stat & SYS_ERR_MASK_MAC)
3640 printk("MAC ");
3641 if (stat & SYS_ERR_MASK_SMX)
3642 printk("SMX ");
3643
3644 printk(")\n");
3645}
3646
3647static int niu_device_error(struct niu *np)
3648{
3649 u64 stat = nr64(SYS_ERR_STAT);
3650
3651 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
3652 np->dev->name, (unsigned long long) stat);
3653
3654 niu_log_device_error(np, stat);
3655
3656 return -ENODEV;
3657}
3658
Matheos Worku406f3532008-01-04 23:48:26 -08003659static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
3660 u64 v0, u64 v1, u64 v2)
David S. Millera3138df2007-10-09 01:54:01 -07003661{
Matheos Worku406f3532008-01-04 23:48:26 -08003662
David S. Millera3138df2007-10-09 01:54:01 -07003663 int i, err = 0;
3664
Matheos Worku406f3532008-01-04 23:48:26 -08003665 lp->v0 = v0;
3666 lp->v1 = v1;
3667 lp->v2 = v2;
3668
David S. Millera3138df2007-10-09 01:54:01 -07003669 if (v1 & 0x00000000ffffffffULL) {
3670 u32 rx_vec = (v1 & 0xffffffff);
3671
3672 for (i = 0; i < np->num_rx_rings; i++) {
3673 struct rx_ring_info *rp = &np->rx_rings[i];
3674
3675 if (rx_vec & (1 << rp->rx_channel)) {
3676 int r = niu_rx_error(np, rp);
Matheos Worku406f3532008-01-04 23:48:26 -08003677 if (r) {
David S. Millera3138df2007-10-09 01:54:01 -07003678 err = r;
Matheos Worku406f3532008-01-04 23:48:26 -08003679 } else {
3680 if (!v0)
3681 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3682 RX_DMA_CTL_STAT_MEX);
3683 }
David S. Millera3138df2007-10-09 01:54:01 -07003684 }
3685 }
3686 }
3687 if (v1 & 0x7fffffff00000000ULL) {
3688 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
3689
3690 for (i = 0; i < np->num_tx_rings; i++) {
3691 struct tx_ring_info *rp = &np->tx_rings[i];
3692
3693 if (tx_vec & (1 << rp->tx_channel)) {
3694 int r = niu_tx_error(np, rp);
3695 if (r)
3696 err = r;
3697 }
3698 }
3699 }
3700 if ((v0 | v1) & 0x8000000000000000ULL) {
3701 int r = niu_mif_interrupt(np);
3702 if (r)
3703 err = r;
3704 }
3705 if (v2) {
3706 if (v2 & 0x01ef) {
3707 int r = niu_mac_interrupt(np);
3708 if (r)
3709 err = r;
3710 }
3711 if (v2 & 0x0210) {
3712 int r = niu_device_error(np);
3713 if (r)
3714 err = r;
3715 }
3716 }
3717
3718 if (err)
3719 niu_enable_interrupts(np, 0);
3720
Matheos Worku406f3532008-01-04 23:48:26 -08003721 return err;
David S. Millera3138df2007-10-09 01:54:01 -07003722}
3723
3724static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
3725 int ldn)
3726{
3727 struct rxdma_mailbox *mbox = rp->mbox;
3728 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3729
3730 stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
3731 RX_DMA_CTL_STAT_RCRTO);
3732 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
3733
3734 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
3735 np->dev->name, (unsigned long long) stat);
3736}
3737
3738static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
3739 int ldn)
3740{
3741 rp->tx_cs = nr64(TX_CS(rp->tx_channel));
3742
3743 niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
3744 np->dev->name, (unsigned long long) rp->tx_cs);
3745}
3746
3747static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
3748{
3749 struct niu_parent *parent = np->parent;
3750 u32 rx_vec, tx_vec;
3751 int i;
3752
3753 tx_vec = (v0 >> 32);
3754 rx_vec = (v0 & 0xffffffff);
3755
3756 for (i = 0; i < np->num_rx_rings; i++) {
3757 struct rx_ring_info *rp = &np->rx_rings[i];
3758 int ldn = LDN_RXDMA(rp->rx_channel);
3759
3760 if (parent->ldg_map[ldn] != ldg)
3761 continue;
3762
3763 nw64(LD_IM0(ldn), LD_IM0_MASK);
3764 if (rx_vec & (1 << rp->rx_channel))
3765 niu_rxchan_intr(np, rp, ldn);
3766 }
3767
3768 for (i = 0; i < np->num_tx_rings; i++) {
3769 struct tx_ring_info *rp = &np->tx_rings[i];
3770 int ldn = LDN_TXDMA(rp->tx_channel);
3771
3772 if (parent->ldg_map[ldn] != ldg)
3773 continue;
3774
3775 nw64(LD_IM0(ldn), LD_IM0_MASK);
3776 if (tx_vec & (1 << rp->tx_channel))
3777 niu_txchan_intr(np, rp, ldn);
3778 }
3779}
3780
3781static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
3782 u64 v0, u64 v1, u64 v2)
3783{
3784 if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
3785 lp->v0 = v0;
3786 lp->v1 = v1;
3787 lp->v2 = v2;
3788 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
3789 __netif_rx_schedule(np->dev, &lp->napi);
3790 }
3791}
3792
3793static irqreturn_t niu_interrupt(int irq, void *dev_id)
3794{
3795 struct niu_ldg *lp = dev_id;
3796 struct niu *np = lp->np;
3797 int ldg = lp->ldg_num;
3798 unsigned long flags;
3799 u64 v0, v1, v2;
3800
3801 if (netif_msg_intr(np))
3802 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
3803 lp, ldg);
3804
3805 spin_lock_irqsave(&np->lock, flags);
3806
3807 v0 = nr64(LDSV0(ldg));
3808 v1 = nr64(LDSV1(ldg));
3809 v2 = nr64(LDSV2(ldg));
3810
3811 if (netif_msg_intr(np))
3812 printk("v0[%llx] v1[%llx] v2[%llx]\n",
3813 (unsigned long long) v0,
3814 (unsigned long long) v1,
3815 (unsigned long long) v2);
3816
3817 if (unlikely(!v0 && !v1 && !v2)) {
3818 spin_unlock_irqrestore(&np->lock, flags);
3819 return IRQ_NONE;
3820 }
3821
3822 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
Matheos Worku406f3532008-01-04 23:48:26 -08003823 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
David S. Millera3138df2007-10-09 01:54:01 -07003824 if (err)
3825 goto out;
3826 }
3827 if (likely(v0 & ~((u64)1 << LDN_MIF)))
3828 niu_schedule_napi(np, lp, v0, v1, v2);
3829 else
3830 niu_ldg_rearm(np, lp, 1);
3831out:
3832 spin_unlock_irqrestore(&np->lock, flags);
3833
3834 return IRQ_HANDLED;
3835}
3836
3837static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
3838{
3839 if (rp->mbox) {
3840 np->ops->free_coherent(np->device,
3841 sizeof(struct rxdma_mailbox),
3842 rp->mbox, rp->mbox_dma);
3843 rp->mbox = NULL;
3844 }
3845 if (rp->rcr) {
3846 np->ops->free_coherent(np->device,
3847 MAX_RCR_RING_SIZE * sizeof(__le64),
3848 rp->rcr, rp->rcr_dma);
3849 rp->rcr = NULL;
3850 rp->rcr_table_size = 0;
3851 rp->rcr_index = 0;
3852 }
3853 if (rp->rbr) {
3854 niu_rbr_free(np, rp);
3855
3856 np->ops->free_coherent(np->device,
3857 MAX_RBR_RING_SIZE * sizeof(__le32),
3858 rp->rbr, rp->rbr_dma);
3859 rp->rbr = NULL;
3860 rp->rbr_table_size = 0;
3861 rp->rbr_index = 0;
3862 }
3863 kfree(rp->rxhash);
3864 rp->rxhash = NULL;
3865}
3866
3867static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
3868{
3869 if (rp->mbox) {
3870 np->ops->free_coherent(np->device,
3871 sizeof(struct txdma_mailbox),
3872 rp->mbox, rp->mbox_dma);
3873 rp->mbox = NULL;
3874 }
3875 if (rp->descr) {
3876 int i;
3877
3878 for (i = 0; i < MAX_TX_RING_SIZE; i++) {
3879 if (rp->tx_buffs[i].skb)
3880 (void) release_tx_packet(np, rp, i);
3881 }
3882
3883 np->ops->free_coherent(np->device,
3884 MAX_TX_RING_SIZE * sizeof(__le64),
3885 rp->descr, rp->descr_dma);
3886 rp->descr = NULL;
3887 rp->pending = 0;
3888 rp->prod = 0;
3889 rp->cons = 0;
3890 rp->wrap_bit = 0;
3891 }
3892}
3893
3894static void niu_free_channels(struct niu *np)
3895{
3896 int i;
3897
3898 if (np->rx_rings) {
3899 for (i = 0; i < np->num_rx_rings; i++) {
3900 struct rx_ring_info *rp = &np->rx_rings[i];
3901
3902 niu_free_rx_ring_info(np, rp);
3903 }
3904 kfree(np->rx_rings);
3905 np->rx_rings = NULL;
3906 np->num_rx_rings = 0;
3907 }
3908
3909 if (np->tx_rings) {
3910 for (i = 0; i < np->num_tx_rings; i++) {
3911 struct tx_ring_info *rp = &np->tx_rings[i];
3912
3913 niu_free_tx_ring_info(np, rp);
3914 }
3915 kfree(np->tx_rings);
3916 np->tx_rings = NULL;
3917 np->num_tx_rings = 0;
3918 }
3919}
3920
3921static int niu_alloc_rx_ring_info(struct niu *np,
3922 struct rx_ring_info *rp)
3923{
3924 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
3925
3926 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
3927 GFP_KERNEL);
3928 if (!rp->rxhash)
3929 return -ENOMEM;
3930
3931 rp->mbox = np->ops->alloc_coherent(np->device,
3932 sizeof(struct rxdma_mailbox),
3933 &rp->mbox_dma, GFP_KERNEL);
3934 if (!rp->mbox)
3935 return -ENOMEM;
3936 if ((unsigned long)rp->mbox & (64UL - 1)) {
3937 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3938 "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
3939 return -EINVAL;
3940 }
3941
3942 rp->rcr = np->ops->alloc_coherent(np->device,
3943 MAX_RCR_RING_SIZE * sizeof(__le64),
3944 &rp->rcr_dma, GFP_KERNEL);
3945 if (!rp->rcr)
3946 return -ENOMEM;
3947 if ((unsigned long)rp->rcr & (64UL - 1)) {
3948 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3949 "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
3950 return -EINVAL;
3951 }
3952 rp->rcr_table_size = MAX_RCR_RING_SIZE;
3953 rp->rcr_index = 0;
3954
3955 rp->rbr = np->ops->alloc_coherent(np->device,
3956 MAX_RBR_RING_SIZE * sizeof(__le32),
3957 &rp->rbr_dma, GFP_KERNEL);
3958 if (!rp->rbr)
3959 return -ENOMEM;
3960 if ((unsigned long)rp->rbr & (64UL - 1)) {
3961 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3962 "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
3963 return -EINVAL;
3964 }
3965 rp->rbr_table_size = MAX_RBR_RING_SIZE;
3966 rp->rbr_index = 0;
3967 rp->rbr_pending = 0;
3968
3969 return 0;
3970}
3971
3972static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
3973{
3974 int mtu = np->dev->mtu;
3975
3976 /* These values are recommended by the HW designers for fair
3977 * utilization of DRR amongst the rings.
3978 */
3979 rp->max_burst = mtu + 32;
3980 if (rp->max_burst > 4096)
3981 rp->max_burst = 4096;
3982}
3983
3984static int niu_alloc_tx_ring_info(struct niu *np,
3985 struct tx_ring_info *rp)
3986{
3987 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
3988
3989 rp->mbox = np->ops->alloc_coherent(np->device,
3990 sizeof(struct txdma_mailbox),
3991 &rp->mbox_dma, GFP_KERNEL);
3992 if (!rp->mbox)
3993 return -ENOMEM;
3994 if ((unsigned long)rp->mbox & (64UL - 1)) {
3995 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3996 "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
3997 return -EINVAL;
3998 }
3999
4000 rp->descr = np->ops->alloc_coherent(np->device,
4001 MAX_TX_RING_SIZE * sizeof(__le64),
4002 &rp->descr_dma, GFP_KERNEL);
4003 if (!rp->descr)
4004 return -ENOMEM;
4005 if ((unsigned long)rp->descr & (64UL - 1)) {
4006 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4007 "TXDMA descr table %p\n", np->dev->name, rp->descr);
4008 return -EINVAL;
4009 }
4010
4011 rp->pending = MAX_TX_RING_SIZE;
4012 rp->prod = 0;
4013 rp->cons = 0;
4014 rp->wrap_bit = 0;
4015
4016 /* XXX make these configurable... XXX */
4017 rp->mark_freq = rp->pending / 4;
4018
4019 niu_set_max_burst(np, rp);
4020
4021 return 0;
4022}
4023
4024static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4025{
Olof Johansson81429972007-10-21 16:32:58 -07004026 u16 bss;
David S. Millera3138df2007-10-09 01:54:01 -07004027
Olof Johansson81429972007-10-21 16:32:58 -07004028 bss = min(PAGE_SHIFT, 15);
David S. Millera3138df2007-10-09 01:54:01 -07004029
Olof Johansson81429972007-10-21 16:32:58 -07004030 rp->rbr_block_size = 1 << bss;
4031 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
David S. Millera3138df2007-10-09 01:54:01 -07004032
4033 rp->rbr_sizes[0] = 256;
4034 rp->rbr_sizes[1] = 1024;
4035 if (np->dev->mtu > ETH_DATA_LEN) {
4036 switch (PAGE_SIZE) {
4037 case 4 * 1024:
4038 rp->rbr_sizes[2] = 4096;
4039 break;
4040
4041 default:
4042 rp->rbr_sizes[2] = 8192;
4043 break;
4044 }
4045 } else {
4046 rp->rbr_sizes[2] = 2048;
4047 }
4048 rp->rbr_sizes[3] = rp->rbr_block_size;
4049}
4050
4051static int niu_alloc_channels(struct niu *np)
4052{
4053 struct niu_parent *parent = np->parent;
4054 int first_rx_channel, first_tx_channel;
4055 int i, port, err;
4056
4057 port = np->port;
4058 first_rx_channel = first_tx_channel = 0;
4059 for (i = 0; i < port; i++) {
4060 first_rx_channel += parent->rxchan_per_port[i];
4061 first_tx_channel += parent->txchan_per_port[i];
4062 }
4063
4064 np->num_rx_rings = parent->rxchan_per_port[port];
4065 np->num_tx_rings = parent->txchan_per_port[port];
4066
David S. Millerb4c21632008-07-15 03:48:19 -07004067 np->dev->real_num_tx_queues = np->num_tx_rings;
4068
David S. Millera3138df2007-10-09 01:54:01 -07004069 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4070 GFP_KERNEL);
4071 err = -ENOMEM;
4072 if (!np->rx_rings)
4073 goto out_err;
4074
4075 for (i = 0; i < np->num_rx_rings; i++) {
4076 struct rx_ring_info *rp = &np->rx_rings[i];
4077
4078 rp->np = np;
4079 rp->rx_channel = first_rx_channel + i;
4080
4081 err = niu_alloc_rx_ring_info(np, rp);
4082 if (err)
4083 goto out_err;
4084
4085 niu_size_rbr(np, rp);
4086
4087 /* XXX better defaults, configurable, etc... XXX */
4088 rp->nonsyn_window = 64;
4089 rp->nonsyn_threshold = rp->rcr_table_size - 64;
4090 rp->syn_window = 64;
4091 rp->syn_threshold = rp->rcr_table_size - 64;
4092 rp->rcr_pkt_threshold = 16;
4093 rp->rcr_timeout = 8;
4094 rp->rbr_kick_thresh = RBR_REFILL_MIN;
4095 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4096 rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4097
4098 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4099 if (err)
4100 return err;
4101 }
4102
4103 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4104 GFP_KERNEL);
4105 err = -ENOMEM;
4106 if (!np->tx_rings)
4107 goto out_err;
4108
4109 for (i = 0; i < np->num_tx_rings; i++) {
4110 struct tx_ring_info *rp = &np->tx_rings[i];
4111
4112 rp->np = np;
4113 rp->tx_channel = first_tx_channel + i;
4114
4115 err = niu_alloc_tx_ring_info(np, rp);
4116 if (err)
4117 goto out_err;
4118 }
4119
4120 return 0;
4121
4122out_err:
4123 niu_free_channels(np);
4124 return err;
4125}
4126
4127static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4128{
4129 int limit = 1000;
4130
4131 while (--limit > 0) {
4132 u64 val = nr64(TX_CS(channel));
4133 if (val & TX_CS_SNG_STATE)
4134 return 0;
4135 }
4136 return -ENODEV;
4137}
4138
4139static int niu_tx_channel_stop(struct niu *np, int channel)
4140{
4141 u64 val = nr64(TX_CS(channel));
4142
4143 val |= TX_CS_STOP_N_GO;
4144 nw64(TX_CS(channel), val);
4145
4146 return niu_tx_cs_sng_poll(np, channel);
4147}
4148
4149static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4150{
4151 int limit = 1000;
4152
4153 while (--limit > 0) {
4154 u64 val = nr64(TX_CS(channel));
4155 if (!(val & TX_CS_RST))
4156 return 0;
4157 }
4158 return -ENODEV;
4159}
4160
4161static int niu_tx_channel_reset(struct niu *np, int channel)
4162{
4163 u64 val = nr64(TX_CS(channel));
4164 int err;
4165
4166 val |= TX_CS_RST;
4167 nw64(TX_CS(channel), val);
4168
4169 err = niu_tx_cs_reset_poll(np, channel);
4170 if (!err)
4171 nw64(TX_RING_KICK(channel), 0);
4172
4173 return err;
4174}
4175
4176static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4177{
4178 u64 val;
4179
4180 nw64(TX_LOG_MASK1(channel), 0);
4181 nw64(TX_LOG_VAL1(channel), 0);
4182 nw64(TX_LOG_MASK2(channel), 0);
4183 nw64(TX_LOG_VAL2(channel), 0);
4184 nw64(TX_LOG_PAGE_RELO1(channel), 0);
4185 nw64(TX_LOG_PAGE_RELO2(channel), 0);
4186 nw64(TX_LOG_PAGE_HDL(channel), 0);
4187
4188 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4189 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4190 nw64(TX_LOG_PAGE_VLD(channel), val);
4191
4192 /* XXX TXDMA 32bit mode? XXX */
4193
4194 return 0;
4195}
4196
4197static void niu_txc_enable_port(struct niu *np, int on)
4198{
4199 unsigned long flags;
4200 u64 val, mask;
4201
4202 niu_lock_parent(np, flags);
4203 val = nr64(TXC_CONTROL);
4204 mask = (u64)1 << np->port;
4205 if (on) {
4206 val |= TXC_CONTROL_ENABLE | mask;
4207 } else {
4208 val &= ~mask;
4209 if ((val & ~TXC_CONTROL_ENABLE) == 0)
4210 val &= ~TXC_CONTROL_ENABLE;
4211 }
4212 nw64(TXC_CONTROL, val);
4213 niu_unlock_parent(np, flags);
4214}
4215
4216static void niu_txc_set_imask(struct niu *np, u64 imask)
4217{
4218 unsigned long flags;
4219 u64 val;
4220
4221 niu_lock_parent(np, flags);
4222 val = nr64(TXC_INT_MASK);
4223 val &= ~TXC_INT_MASK_VAL(np->port);
4224 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4225 niu_unlock_parent(np, flags);
4226}
4227
4228static void niu_txc_port_dma_enable(struct niu *np, int on)
4229{
4230 u64 val = 0;
4231
4232 if (on) {
4233 int i;
4234
4235 for (i = 0; i < np->num_tx_rings; i++)
4236 val |= (1 << np->tx_rings[i].tx_channel);
4237 }
4238 nw64(TXC_PORT_DMA(np->port), val);
4239}
4240
4241static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4242{
4243 int err, channel = rp->tx_channel;
4244 u64 val, ring_len;
4245
4246 err = niu_tx_channel_stop(np, channel);
4247 if (err)
4248 return err;
4249
4250 err = niu_tx_channel_reset(np, channel);
4251 if (err)
4252 return err;
4253
4254 err = niu_tx_channel_lpage_init(np, channel);
4255 if (err)
4256 return err;
4257
4258 nw64(TXC_DMA_MAX(channel), rp->max_burst);
4259 nw64(TX_ENT_MSK(channel), 0);
4260
4261 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4262 TX_RNG_CFIG_STADDR)) {
4263 dev_err(np->device, PFX "%s: TX ring channel %d "
4264 "DMA addr (%llx) is not aligned.\n",
4265 np->dev->name, channel,
4266 (unsigned long long) rp->descr_dma);
4267 return -EINVAL;
4268 }
4269
4270 /* The length field in TX_RNG_CFIG is measured in 64-byte
4271 * blocks. rp->pending is the number of TX descriptors in
4272 * our ring, 8 bytes each, thus we divide by 8 bytes more
4273 * to get the proper value the chip wants.
4274 */
4275 ring_len = (rp->pending / 8);
4276
4277 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4278 rp->descr_dma);
4279 nw64(TX_RNG_CFIG(channel), val);
4280
4281 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4282 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4283 dev_err(np->device, PFX "%s: TX ring channel %d "
4284 "MBOX addr (%llx) is has illegal bits.\n",
4285 np->dev->name, channel,
4286 (unsigned long long) rp->mbox_dma);
4287 return -EINVAL;
4288 }
4289 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4290 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4291
4292 nw64(TX_CS(channel), 0);
4293
4294 rp->last_pkt_cnt = 0;
4295
4296 return 0;
4297}
4298
4299static void niu_init_rdc_groups(struct niu *np)
4300{
4301 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4302 int i, first_table_num = tp->first_table_num;
4303
4304 for (i = 0; i < tp->num_tables; i++) {
4305 struct rdc_table *tbl = &tp->tables[i];
4306 int this_table = first_table_num + i;
4307 int slot;
4308
4309 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4310 nw64(RDC_TBL(this_table, slot),
4311 tbl->rxdma_channel[slot]);
4312 }
4313
4314 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4315}
4316
4317static void niu_init_drr_weight(struct niu *np)
4318{
4319 int type = phy_decode(np->parent->port_phy, np->port);
4320 u64 val;
4321
4322 switch (type) {
4323 case PORT_TYPE_10G:
4324 val = PT_DRR_WEIGHT_DEFAULT_10G;
4325 break;
4326
4327 case PORT_TYPE_1G:
4328 default:
4329 val = PT_DRR_WEIGHT_DEFAULT_1G;
4330 break;
4331 }
4332 nw64(PT_DRR_WT(np->port), val);
4333}
4334
4335static int niu_init_hostinfo(struct niu *np)
4336{
4337 struct niu_parent *parent = np->parent;
4338 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4339 int i, err, num_alt = niu_num_alt_addr(np);
4340 int first_rdc_table = tp->first_table_num;
4341
4342 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4343 if (err)
4344 return err;
4345
4346 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4347 if (err)
4348 return err;
4349
4350 for (i = 0; i < num_alt; i++) {
4351 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4352 if (err)
4353 return err;
4354 }
4355
4356 return 0;
4357}
4358
4359static int niu_rx_channel_reset(struct niu *np, int channel)
4360{
4361 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4362 RXDMA_CFIG1_RST, 1000, 10,
4363 "RXDMA_CFIG1");
4364}
4365
4366static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4367{
4368 u64 val;
4369
4370 nw64(RX_LOG_MASK1(channel), 0);
4371 nw64(RX_LOG_VAL1(channel), 0);
4372 nw64(RX_LOG_MASK2(channel), 0);
4373 nw64(RX_LOG_VAL2(channel), 0);
4374 nw64(RX_LOG_PAGE_RELO1(channel), 0);
4375 nw64(RX_LOG_PAGE_RELO2(channel), 0);
4376 nw64(RX_LOG_PAGE_HDL(channel), 0);
4377
4378 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4379 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4380 nw64(RX_LOG_PAGE_VLD(channel), val);
4381
4382 return 0;
4383}
4384
4385static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4386{
4387 u64 val;
4388
4389 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4390 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4391 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4392 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4393 nw64(RDC_RED_PARA(rp->rx_channel), val);
4394}
4395
4396static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4397{
4398 u64 val = 0;
4399
4400 switch (rp->rbr_block_size) {
4401 case 4 * 1024:
4402 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4403 break;
4404 case 8 * 1024:
4405 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4406 break;
4407 case 16 * 1024:
4408 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4409 break;
4410 case 32 * 1024:
4411 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4412 break;
4413 default:
4414 return -EINVAL;
4415 }
4416 val |= RBR_CFIG_B_VLD2;
4417 switch (rp->rbr_sizes[2]) {
4418 case 2 * 1024:
4419 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4420 break;
4421 case 4 * 1024:
4422 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4423 break;
4424 case 8 * 1024:
4425 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4426 break;
4427 case 16 * 1024:
4428 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4429 break;
4430
4431 default:
4432 return -EINVAL;
4433 }
4434 val |= RBR_CFIG_B_VLD1;
4435 switch (rp->rbr_sizes[1]) {
4436 case 1 * 1024:
4437 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4438 break;
4439 case 2 * 1024:
4440 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4441 break;
4442 case 4 * 1024:
4443 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4444 break;
4445 case 8 * 1024:
4446 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4447 break;
4448
4449 default:
4450 return -EINVAL;
4451 }
4452 val |= RBR_CFIG_B_VLD0;
4453 switch (rp->rbr_sizes[0]) {
4454 case 256:
4455 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4456 break;
4457 case 512:
4458 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4459 break;
4460 case 1 * 1024:
4461 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4462 break;
4463 case 2 * 1024:
4464 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4465 break;
4466
4467 default:
4468 return -EINVAL;
4469 }
4470
4471 *ret = val;
4472 return 0;
4473}
4474
4475static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4476{
4477 u64 val = nr64(RXDMA_CFIG1(channel));
4478 int limit;
4479
4480 if (on)
4481 val |= RXDMA_CFIG1_EN;
4482 else
4483 val &= ~RXDMA_CFIG1_EN;
4484 nw64(RXDMA_CFIG1(channel), val);
4485
4486 limit = 1000;
4487 while (--limit > 0) {
4488 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4489 break;
4490 udelay(10);
4491 }
4492 if (limit <= 0)
4493 return -ENODEV;
4494 return 0;
4495}
4496
4497static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4498{
4499 int err, channel = rp->rx_channel;
4500 u64 val;
4501
4502 err = niu_rx_channel_reset(np, channel);
4503 if (err)
4504 return err;
4505
4506 err = niu_rx_channel_lpage_init(np, channel);
4507 if (err)
4508 return err;
4509
4510 niu_rx_channel_wred_init(np, rp);
4511
4512 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4513 nw64(RX_DMA_CTL_STAT(channel),
4514 (RX_DMA_CTL_STAT_MEX |
4515 RX_DMA_CTL_STAT_RCRTHRES |
4516 RX_DMA_CTL_STAT_RCRTO |
4517 RX_DMA_CTL_STAT_RBR_EMPTY));
4518 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4519 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4520 nw64(RBR_CFIG_A(channel),
4521 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4522 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4523 err = niu_compute_rbr_cfig_b(rp, &val);
4524 if (err)
4525 return err;
4526 nw64(RBR_CFIG_B(channel), val);
4527 nw64(RCRCFIG_A(channel),
4528 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4529 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4530 nw64(RCRCFIG_B(channel),
4531 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4532 RCRCFIG_B_ENTOUT |
4533 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4534
4535 err = niu_enable_rx_channel(np, channel, 1);
4536 if (err)
4537 return err;
4538
4539 nw64(RBR_KICK(channel), rp->rbr_index);
4540
4541 val = nr64(RX_DMA_CTL_STAT(channel));
4542 val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4543 nw64(RX_DMA_CTL_STAT(channel), val);
4544
4545 return 0;
4546}
4547
4548static int niu_init_rx_channels(struct niu *np)
4549{
4550 unsigned long flags;
4551 u64 seed = jiffies_64;
4552 int err, i;
4553
4554 niu_lock_parent(np, flags);
4555 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4556 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4557 niu_unlock_parent(np, flags);
4558
4559 /* XXX RXDMA 32bit mode? XXX */
4560
4561 niu_init_rdc_groups(np);
4562 niu_init_drr_weight(np);
4563
4564 err = niu_init_hostinfo(np);
4565 if (err)
4566 return err;
4567
4568 for (i = 0; i < np->num_rx_rings; i++) {
4569 struct rx_ring_info *rp = &np->rx_rings[i];
4570
4571 err = niu_init_one_rx_channel(np, rp);
4572 if (err)
4573 return err;
4574 }
4575
4576 return 0;
4577}
4578
4579static int niu_set_ip_frag_rule(struct niu *np)
4580{
4581 struct niu_parent *parent = np->parent;
4582 struct niu_classifier *cp = &np->clas;
4583 struct niu_tcam_entry *tp;
4584 int index, err;
4585
4586 /* XXX fix this allocation scheme XXX */
4587 index = cp->tcam_index;
4588 tp = &parent->tcam[index];
4589
4590 /* Note that the noport bit is the same in both ipv4 and
4591 * ipv6 format TCAM entries.
4592 */
4593 memset(tp, 0, sizeof(*tp));
4594 tp->key[1] = TCAM_V4KEY1_NOPORT;
4595 tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4596 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4597 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4598 err = tcam_write(np, index, tp->key, tp->key_mask);
4599 if (err)
4600 return err;
4601 err = tcam_assoc_write(np, index, tp->assoc_data);
4602 if (err)
4603 return err;
4604
4605 return 0;
4606}
4607
4608static int niu_init_classifier_hw(struct niu *np)
4609{
4610 struct niu_parent *parent = np->parent;
4611 struct niu_classifier *cp = &np->clas;
4612 int i, err;
4613
4614 nw64(H1POLY, cp->h1_init);
4615 nw64(H2POLY, cp->h2_init);
4616
4617 err = niu_init_hostinfo(np);
4618 if (err)
4619 return err;
4620
4621 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4622 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4623
4624 vlan_tbl_write(np, i, np->port,
4625 vp->vlan_pref, vp->rdc_num);
4626 }
4627
4628 for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4629 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4630
4631 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4632 ap->rdc_num, ap->mac_pref);
4633 if (err)
4634 return err;
4635 }
4636
4637 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4638 int index = i - CLASS_CODE_USER_PROG1;
4639
4640 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4641 if (err)
4642 return err;
4643 err = niu_set_flow_key(np, i, parent->flow_key[index]);
4644 if (err)
4645 return err;
4646 }
4647
4648 err = niu_set_ip_frag_rule(np);
4649 if (err)
4650 return err;
4651
4652 tcam_enable(np, 1);
4653
4654 return 0;
4655}
4656
4657static int niu_zcp_write(struct niu *np, int index, u64 *data)
4658{
4659 nw64(ZCP_RAM_DATA0, data[0]);
4660 nw64(ZCP_RAM_DATA1, data[1]);
4661 nw64(ZCP_RAM_DATA2, data[2]);
4662 nw64(ZCP_RAM_DATA3, data[3]);
4663 nw64(ZCP_RAM_DATA4, data[4]);
4664 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
4665 nw64(ZCP_RAM_ACC,
4666 (ZCP_RAM_ACC_WRITE |
4667 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4668 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4669
4670 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4671 1000, 100);
4672}
4673
4674static int niu_zcp_read(struct niu *np, int index, u64 *data)
4675{
4676 int err;
4677
4678 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4679 1000, 100);
4680 if (err) {
4681 dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
4682 "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4683 (unsigned long long) nr64(ZCP_RAM_ACC));
4684 return err;
4685 }
4686
4687 nw64(ZCP_RAM_ACC,
4688 (ZCP_RAM_ACC_READ |
4689 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4690 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4691
4692 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4693 1000, 100);
4694 if (err) {
4695 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
4696 "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4697 (unsigned long long) nr64(ZCP_RAM_ACC));
4698 return err;
4699 }
4700
4701 data[0] = nr64(ZCP_RAM_DATA0);
4702 data[1] = nr64(ZCP_RAM_DATA1);
4703 data[2] = nr64(ZCP_RAM_DATA2);
4704 data[3] = nr64(ZCP_RAM_DATA3);
4705 data[4] = nr64(ZCP_RAM_DATA4);
4706
4707 return 0;
4708}
4709
4710static void niu_zcp_cfifo_reset(struct niu *np)
4711{
4712 u64 val = nr64(RESET_CFIFO);
4713
4714 val |= RESET_CFIFO_RST(np->port);
4715 nw64(RESET_CFIFO, val);
4716 udelay(10);
4717
4718 val &= ~RESET_CFIFO_RST(np->port);
4719 nw64(RESET_CFIFO, val);
4720}
4721
4722static int niu_init_zcp(struct niu *np)
4723{
4724 u64 data[5], rbuf[5];
4725 int i, max, err;
4726
4727 if (np->parent->plat_type != PLAT_TYPE_NIU) {
4728 if (np->port == 0 || np->port == 1)
4729 max = ATLAS_P0_P1_CFIFO_ENTRIES;
4730 else
4731 max = ATLAS_P2_P3_CFIFO_ENTRIES;
4732 } else
4733 max = NIU_CFIFO_ENTRIES;
4734
4735 data[0] = 0;
4736 data[1] = 0;
4737 data[2] = 0;
4738 data[3] = 0;
4739 data[4] = 0;
4740
4741 for (i = 0; i < max; i++) {
4742 err = niu_zcp_write(np, i, data);
4743 if (err)
4744 return err;
4745 err = niu_zcp_read(np, i, rbuf);
4746 if (err)
4747 return err;
4748 }
4749
4750 niu_zcp_cfifo_reset(np);
4751 nw64(CFIFO_ECC(np->port), 0);
4752 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
4753 (void) nr64(ZCP_INT_STAT);
4754 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
4755
4756 return 0;
4757}
4758
4759static void niu_ipp_write(struct niu *np, int index, u64 *data)
4760{
4761 u64 val = nr64_ipp(IPP_CFIG);
4762
4763 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
4764 nw64_ipp(IPP_DFIFO_WR_PTR, index);
4765 nw64_ipp(IPP_DFIFO_WR0, data[0]);
4766 nw64_ipp(IPP_DFIFO_WR1, data[1]);
4767 nw64_ipp(IPP_DFIFO_WR2, data[2]);
4768 nw64_ipp(IPP_DFIFO_WR3, data[3]);
4769 nw64_ipp(IPP_DFIFO_WR4, data[4]);
4770 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
4771}
4772
4773static void niu_ipp_read(struct niu *np, int index, u64 *data)
4774{
4775 nw64_ipp(IPP_DFIFO_RD_PTR, index);
4776 data[0] = nr64_ipp(IPP_DFIFO_RD0);
4777 data[1] = nr64_ipp(IPP_DFIFO_RD1);
4778 data[2] = nr64_ipp(IPP_DFIFO_RD2);
4779 data[3] = nr64_ipp(IPP_DFIFO_RD3);
4780 data[4] = nr64_ipp(IPP_DFIFO_RD4);
4781}
4782
4783static int niu_ipp_reset(struct niu *np)
4784{
4785 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
4786 1000, 100, "IPP_CFIG");
4787}
4788
4789static int niu_init_ipp(struct niu *np)
4790{
4791 u64 data[5], rbuf[5], val;
4792 int i, max, err;
4793
4794 if (np->parent->plat_type != PLAT_TYPE_NIU) {
4795 if (np->port == 0 || np->port == 1)
4796 max = ATLAS_P0_P1_DFIFO_ENTRIES;
4797 else
4798 max = ATLAS_P2_P3_DFIFO_ENTRIES;
4799 } else
4800 max = NIU_DFIFO_ENTRIES;
4801
4802 data[0] = 0;
4803 data[1] = 0;
4804 data[2] = 0;
4805 data[3] = 0;
4806 data[4] = 0;
4807
4808 for (i = 0; i < max; i++) {
4809 niu_ipp_write(np, i, data);
4810 niu_ipp_read(np, i, rbuf);
4811 }
4812
4813 (void) nr64_ipp(IPP_INT_STAT);
4814 (void) nr64_ipp(IPP_INT_STAT);
4815
4816 err = niu_ipp_reset(np);
4817 if (err)
4818 return err;
4819
4820 (void) nr64_ipp(IPP_PKT_DIS);
4821 (void) nr64_ipp(IPP_BAD_CS_CNT);
4822 (void) nr64_ipp(IPP_ECC);
4823
4824 (void) nr64_ipp(IPP_INT_STAT);
4825
4826 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
4827
4828 val = nr64_ipp(IPP_CFIG);
4829 val &= ~IPP_CFIG_IP_MAX_PKT;
4830 val |= (IPP_CFIG_IPP_ENABLE |
4831 IPP_CFIG_DFIFO_ECC_EN |
4832 IPP_CFIG_DROP_BAD_CRC |
4833 IPP_CFIG_CKSUM_EN |
4834 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
4835 nw64_ipp(IPP_CFIG, val);
4836
4837 return 0;
4838}
4839
Mirko Lindner0c3b0912007-12-05 21:10:02 -08004840static void niu_handle_led(struct niu *np, int status)
David S. Millera3138df2007-10-09 01:54:01 -07004841{
David S. Millera3138df2007-10-09 01:54:01 -07004842 u64 val;
David S. Millera3138df2007-10-09 01:54:01 -07004843 val = nr64_mac(XMAC_CONFIG);
4844
4845 if ((np->flags & NIU_FLAGS_10G) != 0 &&
4846 (np->flags & NIU_FLAGS_FIBER) != 0) {
Mirko Lindner0c3b0912007-12-05 21:10:02 -08004847 if (status) {
David S. Millera3138df2007-10-09 01:54:01 -07004848 val |= XMAC_CONFIG_LED_POLARITY;
4849 val &= ~XMAC_CONFIG_FORCE_LED_ON;
4850 } else {
4851 val |= XMAC_CONFIG_FORCE_LED_ON;
4852 val &= ~XMAC_CONFIG_LED_POLARITY;
4853 }
4854 }
4855
Mirko Lindner0c3b0912007-12-05 21:10:02 -08004856 nw64_mac(XMAC_CONFIG, val);
4857}
4858
4859static void niu_init_xif_xmac(struct niu *np)
4860{
4861 struct niu_link_config *lp = &np->link_config;
4862 u64 val;
4863
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004864 if (np->flags & NIU_FLAGS_XCVR_SERDES) {
4865 val = nr64(MIF_CONFIG);
4866 val |= MIF_CONFIG_ATCA_GE;
4867 nw64(MIF_CONFIG, val);
4868 }
4869
Mirko Lindner0c3b0912007-12-05 21:10:02 -08004870 val = nr64_mac(XMAC_CONFIG);
David S. Millera3138df2007-10-09 01:54:01 -07004871 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
4872
4873 val |= XMAC_CONFIG_TX_OUTPUT_EN;
4874
4875 if (lp->loopback_mode == LOOPBACK_MAC) {
4876 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
4877 val |= XMAC_CONFIG_LOOPBACK;
4878 } else {
4879 val &= ~XMAC_CONFIG_LOOPBACK;
4880 }
4881
4882 if (np->flags & NIU_FLAGS_10G) {
4883 val &= ~XMAC_CONFIG_LFS_DISABLE;
4884 } else {
4885 val |= XMAC_CONFIG_LFS_DISABLE;
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004886 if (!(np->flags & NIU_FLAGS_FIBER) &&
4887 !(np->flags & NIU_FLAGS_XCVR_SERDES))
David S. Millera3138df2007-10-09 01:54:01 -07004888 val |= XMAC_CONFIG_1G_PCS_BYPASS;
4889 else
4890 val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
4891 }
4892
4893 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
4894
4895 if (lp->active_speed == SPEED_100)
4896 val |= XMAC_CONFIG_SEL_CLK_25MHZ;
4897 else
4898 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
4899
4900 nw64_mac(XMAC_CONFIG, val);
4901
4902 val = nr64_mac(XMAC_CONFIG);
4903 val &= ~XMAC_CONFIG_MODE_MASK;
4904 if (np->flags & NIU_FLAGS_10G) {
4905 val |= XMAC_CONFIG_MODE_XGMII;
4906 } else {
4907 if (lp->active_speed == SPEED_100)
4908 val |= XMAC_CONFIG_MODE_MII;
4909 else
4910 val |= XMAC_CONFIG_MODE_GMII;
4911 }
4912
4913 nw64_mac(XMAC_CONFIG, val);
4914}
4915
4916static void niu_init_xif_bmac(struct niu *np)
4917{
4918 struct niu_link_config *lp = &np->link_config;
4919 u64 val;
4920
4921 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
4922
4923 if (lp->loopback_mode == LOOPBACK_MAC)
4924 val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
4925 else
4926 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
4927
4928 if (lp->active_speed == SPEED_1000)
4929 val |= BMAC_XIF_CONFIG_GMII_MODE;
4930 else
4931 val &= ~BMAC_XIF_CONFIG_GMII_MODE;
4932
4933 val &= ~(BMAC_XIF_CONFIG_LINK_LED |
4934 BMAC_XIF_CONFIG_LED_POLARITY);
4935
4936 if (!(np->flags & NIU_FLAGS_10G) &&
4937 !(np->flags & NIU_FLAGS_FIBER) &&
4938 lp->active_speed == SPEED_100)
4939 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
4940 else
4941 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
4942
4943 nw64_mac(BMAC_XIF_CONFIG, val);
4944}
4945
4946static void niu_init_xif(struct niu *np)
4947{
4948 if (np->flags & NIU_FLAGS_XMAC)
4949 niu_init_xif_xmac(np);
4950 else
4951 niu_init_xif_bmac(np);
4952}
4953
4954static void niu_pcs_mii_reset(struct niu *np)
4955{
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004956 int limit = 1000;
David S. Millera3138df2007-10-09 01:54:01 -07004957 u64 val = nr64_pcs(PCS_MII_CTL);
4958 val |= PCS_MII_CTL_RST;
4959 nw64_pcs(PCS_MII_CTL, val);
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004960 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
4961 udelay(100);
4962 val = nr64_pcs(PCS_MII_CTL);
4963 }
David S. Millera3138df2007-10-09 01:54:01 -07004964}
4965
4966static void niu_xpcs_reset(struct niu *np)
4967{
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004968 int limit = 1000;
David S. Millera3138df2007-10-09 01:54:01 -07004969 u64 val = nr64_xpcs(XPCS_CONTROL1);
4970 val |= XPCS_CONTROL1_RESET;
4971 nw64_xpcs(XPCS_CONTROL1, val);
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004972 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
4973 udelay(100);
4974 val = nr64_xpcs(XPCS_CONTROL1);
4975 }
David S. Millera3138df2007-10-09 01:54:01 -07004976}
4977
4978static int niu_init_pcs(struct niu *np)
4979{
4980 struct niu_link_config *lp = &np->link_config;
4981 u64 val;
4982
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004983 switch (np->flags & (NIU_FLAGS_10G |
4984 NIU_FLAGS_FIBER |
4985 NIU_FLAGS_XCVR_SERDES)) {
David S. Millera3138df2007-10-09 01:54:01 -07004986 case NIU_FLAGS_FIBER:
4987 /* 1G fiber */
4988 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
4989 nw64_pcs(PCS_DPATH_MODE, 0);
4990 niu_pcs_mii_reset(np);
4991 break;
4992
4993 case NIU_FLAGS_10G:
4994 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
Matheos Worku5fbd7e22008-02-28 21:25:43 -08004995 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
4996 /* 10G SERDES */
David S. Millera3138df2007-10-09 01:54:01 -07004997 if (!(np->flags & NIU_FLAGS_XMAC))
4998 return -EINVAL;
4999
5000 /* 10G copper or fiber */
5001 val = nr64_mac(XMAC_CONFIG);
5002 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5003 nw64_mac(XMAC_CONFIG, val);
5004
5005 niu_xpcs_reset(np);
5006
5007 val = nr64_xpcs(XPCS_CONTROL1);
5008 if (lp->loopback_mode == LOOPBACK_PHY)
5009 val |= XPCS_CONTROL1_LOOPBACK;
5010 else
5011 val &= ~XPCS_CONTROL1_LOOPBACK;
5012 nw64_xpcs(XPCS_CONTROL1, val);
5013
5014 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5015 (void) nr64_xpcs(XPCS_SYMERR_CNT01);
5016 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
5017 break;
5018
Matheos Worku5fbd7e22008-02-28 21:25:43 -08005019
5020 case NIU_FLAGS_XCVR_SERDES:
5021 /* 1G SERDES */
5022 niu_pcs_mii_reset(np);
5023 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5024 nw64_pcs(PCS_DPATH_MODE, 0);
5025 break;
5026
David S. Millera3138df2007-10-09 01:54:01 -07005027 case 0:
5028 /* 1G copper */
Matheos Worku5fbd7e22008-02-28 21:25:43 -08005029 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5030 /* 1G RGMII FIBER */
David S. Millera3138df2007-10-09 01:54:01 -07005031 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5032 niu_pcs_mii_reset(np);
5033 break;
5034
5035 default:
5036 return -EINVAL;
5037 }
5038
5039 return 0;
5040}
5041
5042static int niu_reset_tx_xmac(struct niu *np)
5043{
5044 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5045 (XTXMAC_SW_RST_REG_RS |
5046 XTXMAC_SW_RST_SOFT_RST),
5047 1000, 100, "XTXMAC_SW_RST");
5048}
5049
5050static int niu_reset_tx_bmac(struct niu *np)
5051{
5052 int limit;
5053
5054 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5055 limit = 1000;
5056 while (--limit >= 0) {
5057 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5058 break;
5059 udelay(100);
5060 }
5061 if (limit < 0) {
5062 dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5063 "BTXMAC_SW_RST[%llx]\n",
5064 np->port,
5065 (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5066 return -ENODEV;
5067 }
5068
5069 return 0;
5070}
5071
5072static int niu_reset_tx_mac(struct niu *np)
5073{
5074 if (np->flags & NIU_FLAGS_XMAC)
5075 return niu_reset_tx_xmac(np);
5076 else
5077 return niu_reset_tx_bmac(np);
5078}
5079
5080static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5081{
5082 u64 val;
5083
5084 val = nr64_mac(XMAC_MIN);
5085 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5086 XMAC_MIN_RX_MIN_PKT_SIZE);
5087 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5088 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5089 nw64_mac(XMAC_MIN, val);
5090
5091 nw64_mac(XMAC_MAX, max);
5092
5093 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5094
5095 val = nr64_mac(XMAC_IPG);
5096 if (np->flags & NIU_FLAGS_10G) {
5097 val &= ~XMAC_IPG_IPG_XGMII;
5098 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5099 } else {
5100 val &= ~XMAC_IPG_IPG_MII_GMII;
5101 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5102 }
5103 nw64_mac(XMAC_IPG, val);
5104
5105 val = nr64_mac(XMAC_CONFIG);
5106 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5107 XMAC_CONFIG_STRETCH_MODE |
5108 XMAC_CONFIG_VAR_MIN_IPG_EN |
5109 XMAC_CONFIG_TX_ENABLE);
5110 nw64_mac(XMAC_CONFIG, val);
5111
5112 nw64_mac(TXMAC_FRM_CNT, 0);
5113 nw64_mac(TXMAC_BYTE_CNT, 0);
5114}
5115
5116static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5117{
5118 u64 val;
5119
5120 nw64_mac(BMAC_MIN_FRAME, min);
5121 nw64_mac(BMAC_MAX_FRAME, max);
5122
5123 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5124 nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5125 nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5126
5127 val = nr64_mac(BTXMAC_CONFIG);
5128 val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5129 BTXMAC_CONFIG_ENABLE);
5130 nw64_mac(BTXMAC_CONFIG, val);
5131}
5132
5133static void niu_init_tx_mac(struct niu *np)
5134{
5135 u64 min, max;
5136
5137 min = 64;
5138 if (np->dev->mtu > ETH_DATA_LEN)
5139 max = 9216;
5140 else
5141 max = 1522;
5142
5143 /* The XMAC_MIN register only accepts values for TX min which
5144 * have the low 3 bits cleared.
5145 */
5146 BUILD_BUG_ON(min & 0x7);
5147
5148 if (np->flags & NIU_FLAGS_XMAC)
5149 niu_init_tx_xmac(np, min, max);
5150 else
5151 niu_init_tx_bmac(np, min, max);
5152}
5153
5154static int niu_reset_rx_xmac(struct niu *np)
5155{
5156 int limit;
5157
5158 nw64_mac(XRXMAC_SW_RST,
5159 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5160 limit = 1000;
5161 while (--limit >= 0) {
5162 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5163 XRXMAC_SW_RST_SOFT_RST)))
5164 break;
5165 udelay(100);
5166 }
5167 if (limit < 0) {
5168 dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5169 "XRXMAC_SW_RST[%llx]\n",
5170 np->port,
5171 (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5172 return -ENODEV;
5173 }
5174
5175 return 0;
5176}
5177
5178static int niu_reset_rx_bmac(struct niu *np)
5179{
5180 int limit;
5181
5182 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5183 limit = 1000;
5184 while (--limit >= 0) {
5185 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5186 break;
5187 udelay(100);
5188 }
5189 if (limit < 0) {
5190 dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5191 "BRXMAC_SW_RST[%llx]\n",
5192 np->port,
5193 (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5194 return -ENODEV;
5195 }
5196
5197 return 0;
5198}
5199
5200static int niu_reset_rx_mac(struct niu *np)
5201{
5202 if (np->flags & NIU_FLAGS_XMAC)
5203 return niu_reset_rx_xmac(np);
5204 else
5205 return niu_reset_rx_bmac(np);
5206}
5207
5208static void niu_init_rx_xmac(struct niu *np)
5209{
5210 struct niu_parent *parent = np->parent;
5211 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5212 int first_rdc_table = tp->first_table_num;
5213 unsigned long i;
5214 u64 val;
5215
5216 nw64_mac(XMAC_ADD_FILT0, 0);
5217 nw64_mac(XMAC_ADD_FILT1, 0);
5218 nw64_mac(XMAC_ADD_FILT2, 0);
5219 nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5220 nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5221 for (i = 0; i < MAC_NUM_HASH; i++)
5222 nw64_mac(XMAC_HASH_TBL(i), 0);
5223 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5224 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5225 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5226
5227 val = nr64_mac(XMAC_CONFIG);
5228 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5229 XMAC_CONFIG_PROMISCUOUS |
5230 XMAC_CONFIG_PROMISC_GROUP |
5231 XMAC_CONFIG_ERR_CHK_DIS |
5232 XMAC_CONFIG_RX_CRC_CHK_DIS |
5233 XMAC_CONFIG_RESERVED_MULTICAST |
5234 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5235 XMAC_CONFIG_ADDR_FILTER_EN |
5236 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5237 XMAC_CONFIG_STRIP_CRC |
5238 XMAC_CONFIG_PASS_FLOW_CTRL |
5239 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5240 val |= (XMAC_CONFIG_HASH_FILTER_EN);
5241 nw64_mac(XMAC_CONFIG, val);
5242
5243 nw64_mac(RXMAC_BT_CNT, 0);
5244 nw64_mac(RXMAC_BC_FRM_CNT, 0);
5245 nw64_mac(RXMAC_MC_FRM_CNT, 0);
5246 nw64_mac(RXMAC_FRAG_CNT, 0);
5247 nw64_mac(RXMAC_HIST_CNT1, 0);
5248 nw64_mac(RXMAC_HIST_CNT2, 0);
5249 nw64_mac(RXMAC_HIST_CNT3, 0);
5250 nw64_mac(RXMAC_HIST_CNT4, 0);
5251 nw64_mac(RXMAC_HIST_CNT5, 0);
5252 nw64_mac(RXMAC_HIST_CNT6, 0);
5253 nw64_mac(RXMAC_HIST_CNT7, 0);
5254 nw64_mac(RXMAC_MPSZER_CNT, 0);
5255 nw64_mac(RXMAC_CRC_ER_CNT, 0);
5256 nw64_mac(RXMAC_CD_VIO_CNT, 0);
5257 nw64_mac(LINK_FAULT_CNT, 0);
5258}
5259
5260static void niu_init_rx_bmac(struct niu *np)
5261{
5262 struct niu_parent *parent = np->parent;
5263 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5264 int first_rdc_table = tp->first_table_num;
5265 unsigned long i;
5266 u64 val;
5267
5268 nw64_mac(BMAC_ADD_FILT0, 0);
5269 nw64_mac(BMAC_ADD_FILT1, 0);
5270 nw64_mac(BMAC_ADD_FILT2, 0);
5271 nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5272 nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5273 for (i = 0; i < MAC_NUM_HASH; i++)
5274 nw64_mac(BMAC_HASH_TBL(i), 0);
5275 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5276 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5277 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5278
5279 val = nr64_mac(BRXMAC_CONFIG);
5280 val &= ~(BRXMAC_CONFIG_ENABLE |
5281 BRXMAC_CONFIG_STRIP_PAD |
5282 BRXMAC_CONFIG_STRIP_FCS |
5283 BRXMAC_CONFIG_PROMISC |
5284 BRXMAC_CONFIG_PROMISC_GRP |
5285 BRXMAC_CONFIG_ADDR_FILT_EN |
5286 BRXMAC_CONFIG_DISCARD_DIS);
5287 val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5288 nw64_mac(BRXMAC_CONFIG, val);
5289
5290 val = nr64_mac(BMAC_ADDR_CMPEN);
5291 val |= BMAC_ADDR_CMPEN_EN0;
5292 nw64_mac(BMAC_ADDR_CMPEN, val);
5293}
5294
5295static void niu_init_rx_mac(struct niu *np)
5296{
5297 niu_set_primary_mac(np, np->dev->dev_addr);
5298
5299 if (np->flags & NIU_FLAGS_XMAC)
5300 niu_init_rx_xmac(np);
5301 else
5302 niu_init_rx_bmac(np);
5303}
5304
5305static void niu_enable_tx_xmac(struct niu *np, int on)
5306{
5307 u64 val = nr64_mac(XMAC_CONFIG);
5308
5309 if (on)
5310 val |= XMAC_CONFIG_TX_ENABLE;
5311 else
5312 val &= ~XMAC_CONFIG_TX_ENABLE;
5313 nw64_mac(XMAC_CONFIG, val);
5314}
5315
5316static void niu_enable_tx_bmac(struct niu *np, int on)
5317{
5318 u64 val = nr64_mac(BTXMAC_CONFIG);
5319
5320 if (on)
5321 val |= BTXMAC_CONFIG_ENABLE;
5322 else
5323 val &= ~BTXMAC_CONFIG_ENABLE;
5324 nw64_mac(BTXMAC_CONFIG, val);
5325}
5326
5327static void niu_enable_tx_mac(struct niu *np, int on)
5328{
5329 if (np->flags & NIU_FLAGS_XMAC)
5330 niu_enable_tx_xmac(np, on);
5331 else
5332 niu_enable_tx_bmac(np, on);
5333}
5334
5335static void niu_enable_rx_xmac(struct niu *np, int on)
5336{
5337 u64 val = nr64_mac(XMAC_CONFIG);
5338
5339 val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5340 XMAC_CONFIG_PROMISCUOUS);
5341
5342 if (np->flags & NIU_FLAGS_MCAST)
5343 val |= XMAC_CONFIG_HASH_FILTER_EN;
5344 if (np->flags & NIU_FLAGS_PROMISC)
5345 val |= XMAC_CONFIG_PROMISCUOUS;
5346
5347 if (on)
5348 val |= XMAC_CONFIG_RX_MAC_ENABLE;
5349 else
5350 val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5351 nw64_mac(XMAC_CONFIG, val);
5352}
5353
5354static void niu_enable_rx_bmac(struct niu *np, int on)
5355{
5356 u64 val = nr64_mac(BRXMAC_CONFIG);
5357
5358 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5359 BRXMAC_CONFIG_PROMISC);
5360
5361 if (np->flags & NIU_FLAGS_MCAST)
5362 val |= BRXMAC_CONFIG_HASH_FILT_EN;
5363 if (np->flags & NIU_FLAGS_PROMISC)
5364 val |= BRXMAC_CONFIG_PROMISC;
5365
5366 if (on)
5367 val |= BRXMAC_CONFIG_ENABLE;
5368 else
5369 val &= ~BRXMAC_CONFIG_ENABLE;
5370 nw64_mac(BRXMAC_CONFIG, val);
5371}
5372
5373static void niu_enable_rx_mac(struct niu *np, int on)
5374{
5375 if (np->flags & NIU_FLAGS_XMAC)
5376 niu_enable_rx_xmac(np, on);
5377 else
5378 niu_enable_rx_bmac(np, on);
5379}
5380
5381static int niu_init_mac(struct niu *np)
5382{
5383 int err;
5384
5385 niu_init_xif(np);
5386 err = niu_init_pcs(np);
5387 if (err)
5388 return err;
5389
5390 err = niu_reset_tx_mac(np);
5391 if (err)
5392 return err;
5393 niu_init_tx_mac(np);
5394 err = niu_reset_rx_mac(np);
5395 if (err)
5396 return err;
5397 niu_init_rx_mac(np);
5398
5399 /* This looks hookey but the RX MAC reset we just did will
5400 * undo some of the state we setup in niu_init_tx_mac() so we
5401 * have to call it again. In particular, the RX MAC reset will
5402 * set the XMAC_MAX register back to it's default value.
5403 */
5404 niu_init_tx_mac(np);
5405 niu_enable_tx_mac(np, 1);
5406
5407 niu_enable_rx_mac(np, 1);
5408
5409 return 0;
5410}
5411
5412static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5413{
5414 (void) niu_tx_channel_stop(np, rp->tx_channel);
5415}
5416
5417static void niu_stop_tx_channels(struct niu *np)
5418{
5419 int i;
5420
5421 for (i = 0; i < np->num_tx_rings; i++) {
5422 struct tx_ring_info *rp = &np->tx_rings[i];
5423
5424 niu_stop_one_tx_channel(np, rp);
5425 }
5426}
5427
5428static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5429{
5430 (void) niu_tx_channel_reset(np, rp->tx_channel);
5431}
5432
5433static void niu_reset_tx_channels(struct niu *np)
5434{
5435 int i;
5436
5437 for (i = 0; i < np->num_tx_rings; i++) {
5438 struct tx_ring_info *rp = &np->tx_rings[i];
5439
5440 niu_reset_one_tx_channel(np, rp);
5441 }
5442}
5443
5444static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5445{
5446 (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5447}
5448
5449static void niu_stop_rx_channels(struct niu *np)
5450{
5451 int i;
5452
5453 for (i = 0; i < np->num_rx_rings; i++) {
5454 struct rx_ring_info *rp = &np->rx_rings[i];
5455
5456 niu_stop_one_rx_channel(np, rp);
5457 }
5458}
5459
5460static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5461{
5462 int channel = rp->rx_channel;
5463
5464 (void) niu_rx_channel_reset(np, channel);
5465 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5466 nw64(RX_DMA_CTL_STAT(channel), 0);
5467 (void) niu_enable_rx_channel(np, channel, 0);
5468}
5469
5470static void niu_reset_rx_channels(struct niu *np)
5471{
5472 int i;
5473
5474 for (i = 0; i < np->num_rx_rings; i++) {
5475 struct rx_ring_info *rp = &np->rx_rings[i];
5476
5477 niu_reset_one_rx_channel(np, rp);
5478 }
5479}
5480
5481static void niu_disable_ipp(struct niu *np)
5482{
5483 u64 rd, wr, val;
5484 int limit;
5485
5486 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5487 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5488 limit = 100;
5489 while (--limit >= 0 && (rd != wr)) {
5490 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5491 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5492 }
5493 if (limit < 0 &&
5494 (rd != 0 && wr != 1)) {
5495 dev_err(np->device, PFX "%s: IPP would not quiesce, "
5496 "rd_ptr[%llx] wr_ptr[%llx]\n",
5497 np->dev->name,
5498 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5499 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5500 }
5501
5502 val = nr64_ipp(IPP_CFIG);
5503 val &= ~(IPP_CFIG_IPP_ENABLE |
5504 IPP_CFIG_DFIFO_ECC_EN |
5505 IPP_CFIG_DROP_BAD_CRC |
5506 IPP_CFIG_CKSUM_EN);
5507 nw64_ipp(IPP_CFIG, val);
5508
5509 (void) niu_ipp_reset(np);
5510}
5511
5512static int niu_init_hw(struct niu *np)
5513{
5514 int i, err;
5515
5516 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5517 niu_txc_enable_port(np, 1);
5518 niu_txc_port_dma_enable(np, 1);
5519 niu_txc_set_imask(np, 0);
5520
5521 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5522 for (i = 0; i < np->num_tx_rings; i++) {
5523 struct tx_ring_info *rp = &np->tx_rings[i];
5524
5525 err = niu_init_one_tx_channel(np, rp);
5526 if (err)
5527 return err;
5528 }
5529
5530 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5531 err = niu_init_rx_channels(np);
5532 if (err)
5533 goto out_uninit_tx_channels;
5534
5535 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
5536 err = niu_init_classifier_hw(np);
5537 if (err)
5538 goto out_uninit_rx_channels;
5539
5540 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
5541 err = niu_init_zcp(np);
5542 if (err)
5543 goto out_uninit_rx_channels;
5544
5545 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
5546 err = niu_init_ipp(np);
5547 if (err)
5548 goto out_uninit_rx_channels;
5549
5550 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
5551 err = niu_init_mac(np);
5552 if (err)
5553 goto out_uninit_ipp;
5554
5555 return 0;
5556
5557out_uninit_ipp:
5558 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
5559 niu_disable_ipp(np);
5560
5561out_uninit_rx_channels:
5562 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
5563 niu_stop_rx_channels(np);
5564 niu_reset_rx_channels(np);
5565
5566out_uninit_tx_channels:
5567 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
5568 niu_stop_tx_channels(np);
5569 niu_reset_tx_channels(np);
5570
5571 return err;
5572}
5573
5574static void niu_stop_hw(struct niu *np)
5575{
5576 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
5577 niu_enable_interrupts(np, 0);
5578
5579 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
5580 niu_enable_rx_mac(np, 0);
5581
5582 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
5583 niu_disable_ipp(np);
5584
5585 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
5586 niu_stop_tx_channels(np);
5587
5588 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
5589 niu_stop_rx_channels(np);
5590
5591 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
5592 niu_reset_tx_channels(np);
5593
5594 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
5595 niu_reset_rx_channels(np);
5596}
5597
5598static int niu_request_irq(struct niu *np)
5599{
5600 int i, j, err;
5601
5602 err = 0;
5603 for (i = 0; i < np->num_ldg; i++) {
5604 struct niu_ldg *lp = &np->ldg[i];
5605
5606 err = request_irq(lp->irq, niu_interrupt,
5607 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5608 np->dev->name, lp);
5609 if (err)
5610 goto out_free_irqs;
5611
5612 }
5613
5614 return 0;
5615
5616out_free_irqs:
5617 for (j = 0; j < i; j++) {
5618 struct niu_ldg *lp = &np->ldg[j];
5619
5620 free_irq(lp->irq, lp);
5621 }
5622 return err;
5623}
5624
5625static void niu_free_irq(struct niu *np)
5626{
5627 int i;
5628
5629 for (i = 0; i < np->num_ldg; i++) {
5630 struct niu_ldg *lp = &np->ldg[i];
5631
5632 free_irq(lp->irq, lp);
5633 }
5634}
5635
5636static void niu_enable_napi(struct niu *np)
5637{
5638 int i;
5639
5640 for (i = 0; i < np->num_ldg; i++)
5641 napi_enable(&np->ldg[i].napi);
5642}
5643
5644static void niu_disable_napi(struct niu *np)
5645{
5646 int i;
5647
5648 for (i = 0; i < np->num_ldg; i++)
5649 napi_disable(&np->ldg[i].napi);
5650}
5651
5652static int niu_open(struct net_device *dev)
5653{
5654 struct niu *np = netdev_priv(dev);
5655 int err;
5656
5657 netif_carrier_off(dev);
5658
5659 err = niu_alloc_channels(np);
5660 if (err)
5661 goto out_err;
5662
5663 err = niu_enable_interrupts(np, 0);
5664 if (err)
5665 goto out_free_channels;
5666
5667 err = niu_request_irq(np);
5668 if (err)
5669 goto out_free_channels;
5670
5671 niu_enable_napi(np);
5672
5673 spin_lock_irq(&np->lock);
5674
5675 err = niu_init_hw(np);
5676 if (!err) {
5677 init_timer(&np->timer);
5678 np->timer.expires = jiffies + HZ;
5679 np->timer.data = (unsigned long) np;
5680 np->timer.function = niu_timer;
5681
5682 err = niu_enable_interrupts(np, 1);
5683 if (err)
5684 niu_stop_hw(np);
5685 }
5686
5687 spin_unlock_irq(&np->lock);
5688
5689 if (err) {
5690 niu_disable_napi(np);
5691 goto out_free_irq;
5692 }
5693
David S. Millerb4c21632008-07-15 03:48:19 -07005694 netif_tx_start_all_queues(dev);
David S. Millera3138df2007-10-09 01:54:01 -07005695
5696 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
5697 netif_carrier_on(dev);
5698
5699 add_timer(&np->timer);
5700
5701 return 0;
5702
5703out_free_irq:
5704 niu_free_irq(np);
5705
5706out_free_channels:
5707 niu_free_channels(np);
5708
5709out_err:
5710 return err;
5711}
5712
5713static void niu_full_shutdown(struct niu *np, struct net_device *dev)
5714{
5715 cancel_work_sync(&np->reset_task);
5716
5717 niu_disable_napi(np);
David S. Millerb4c21632008-07-15 03:48:19 -07005718 netif_tx_stop_all_queues(dev);
David S. Millera3138df2007-10-09 01:54:01 -07005719
5720 del_timer_sync(&np->timer);
5721
5722 spin_lock_irq(&np->lock);
5723
5724 niu_stop_hw(np);
5725
5726 spin_unlock_irq(&np->lock);
5727}
5728
5729static int niu_close(struct net_device *dev)
5730{
5731 struct niu *np = netdev_priv(dev);
5732
5733 niu_full_shutdown(np, dev);
5734
5735 niu_free_irq(np);
5736
5737 niu_free_channels(np);
5738
Mirko Lindner0c3b0912007-12-05 21:10:02 -08005739 niu_handle_led(np, 0);
5740
David S. Millera3138df2007-10-09 01:54:01 -07005741 return 0;
5742}
5743
5744static void niu_sync_xmac_stats(struct niu *np)
5745{
5746 struct niu_xmac_stats *mp = &np->mac_stats.xmac;
5747
5748 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
5749 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
5750
5751 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
5752 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
5753 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
5754 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
5755 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
5756 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
5757 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
5758 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
5759 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
5760 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
5761 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
5762 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
5763 mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
5764 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
5765 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
5766 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
5767}
5768
5769static void niu_sync_bmac_stats(struct niu *np)
5770{
5771 struct niu_bmac_stats *mp = &np->mac_stats.bmac;
5772
5773 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
5774 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
5775
5776 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
5777 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
5778 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
5779 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
5780}
5781
5782static void niu_sync_mac_stats(struct niu *np)
5783{
5784 if (np->flags & NIU_FLAGS_XMAC)
5785 niu_sync_xmac_stats(np);
5786 else
5787 niu_sync_bmac_stats(np);
5788}
5789
5790static void niu_get_rx_stats(struct niu *np)
5791{
5792 unsigned long pkts, dropped, errors, bytes;
5793 int i;
5794
5795 pkts = dropped = errors = bytes = 0;
5796 for (i = 0; i < np->num_rx_rings; i++) {
5797 struct rx_ring_info *rp = &np->rx_rings[i];
5798
5799 pkts += rp->rx_packets;
5800 bytes += rp->rx_bytes;
5801 dropped += rp->rx_dropped;
5802 errors += rp->rx_errors;
5803 }
5804 np->net_stats.rx_packets = pkts;
5805 np->net_stats.rx_bytes = bytes;
5806 np->net_stats.rx_dropped = dropped;
5807 np->net_stats.rx_errors = errors;
5808}
5809
5810static void niu_get_tx_stats(struct niu *np)
5811{
5812 unsigned long pkts, errors, bytes;
5813 int i;
5814
5815 pkts = errors = bytes = 0;
5816 for (i = 0; i < np->num_tx_rings; i++) {
5817 struct tx_ring_info *rp = &np->tx_rings[i];
5818
5819 pkts += rp->tx_packets;
5820 bytes += rp->tx_bytes;
5821 errors += rp->tx_errors;
5822 }
5823 np->net_stats.tx_packets = pkts;
5824 np->net_stats.tx_bytes = bytes;
5825 np->net_stats.tx_errors = errors;
5826}
5827
5828static struct net_device_stats *niu_get_stats(struct net_device *dev)
5829{
5830 struct niu *np = netdev_priv(dev);
5831
5832 niu_get_rx_stats(np);
5833 niu_get_tx_stats(np);
5834
5835 return &np->net_stats;
5836}
5837
5838static void niu_load_hash_xmac(struct niu *np, u16 *hash)
5839{
5840 int i;
5841
5842 for (i = 0; i < 16; i++)
5843 nw64_mac(XMAC_HASH_TBL(i), hash[i]);
5844}
5845
5846static void niu_load_hash_bmac(struct niu *np, u16 *hash)
5847{
5848 int i;
5849
5850 for (i = 0; i < 16; i++)
5851 nw64_mac(BMAC_HASH_TBL(i), hash[i]);
5852}
5853
5854static void niu_load_hash(struct niu *np, u16 *hash)
5855{
5856 if (np->flags & NIU_FLAGS_XMAC)
5857 niu_load_hash_xmac(np, hash);
5858 else
5859 niu_load_hash_bmac(np, hash);
5860}
5861
5862static void niu_set_rx_mode(struct net_device *dev)
5863{
5864 struct niu *np = netdev_priv(dev);
5865 int i, alt_cnt, err;
5866 struct dev_addr_list *addr;
5867 unsigned long flags;
5868 u16 hash[16] = { 0, };
5869
5870 spin_lock_irqsave(&np->lock, flags);
5871 niu_enable_rx_mac(np, 0);
5872
5873 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
5874 if (dev->flags & IFF_PROMISC)
5875 np->flags |= NIU_FLAGS_PROMISC;
5876 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
5877 np->flags |= NIU_FLAGS_MCAST;
5878
5879 alt_cnt = dev->uc_count;
5880 if (alt_cnt > niu_num_alt_addr(np)) {
5881 alt_cnt = 0;
5882 np->flags |= NIU_FLAGS_PROMISC;
5883 }
5884
5885 if (alt_cnt) {
5886 int index = 0;
5887
5888 for (addr = dev->uc_list; addr; addr = addr->next) {
5889 err = niu_set_alt_mac(np, index,
5890 addr->da_addr);
5891 if (err)
5892 printk(KERN_WARNING PFX "%s: Error %d "
5893 "adding alt mac %d\n",
5894 dev->name, err, index);
5895 err = niu_enable_alt_mac(np, index, 1);
5896 if (err)
5897 printk(KERN_WARNING PFX "%s: Error %d "
5898 "enabling alt mac %d\n",
5899 dev->name, err, index);
5900
5901 index++;
5902 }
5903 } else {
Matheos Worku3b5bced2008-02-18 21:30:03 -08005904 int alt_start;
5905 if (np->flags & NIU_FLAGS_XMAC)
5906 alt_start = 0;
5907 else
5908 alt_start = 1;
5909 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
David S. Millera3138df2007-10-09 01:54:01 -07005910 err = niu_enable_alt_mac(np, i, 0);
5911 if (err)
5912 printk(KERN_WARNING PFX "%s: Error %d "
5913 "disabling alt mac %d\n",
5914 dev->name, err, i);
5915 }
5916 }
5917 if (dev->flags & IFF_ALLMULTI) {
5918 for (i = 0; i < 16; i++)
5919 hash[i] = 0xffff;
5920 } else if (dev->mc_count > 0) {
5921 for (addr = dev->mc_list; addr; addr = addr->next) {
5922 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
5923
5924 crc >>= 24;
5925 hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
5926 }
5927 }
5928
5929 if (np->flags & NIU_FLAGS_MCAST)
5930 niu_load_hash(np, hash);
5931
5932 niu_enable_rx_mac(np, 1);
5933 spin_unlock_irqrestore(&np->lock, flags);
5934}
5935
5936static int niu_set_mac_addr(struct net_device *dev, void *p)
5937{
5938 struct niu *np = netdev_priv(dev);
5939 struct sockaddr *addr = p;
5940 unsigned long flags;
5941
5942 if (!is_valid_ether_addr(addr->sa_data))
5943 return -EINVAL;
5944
5945 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
5946
5947 if (!netif_running(dev))
5948 return 0;
5949
5950 spin_lock_irqsave(&np->lock, flags);
5951 niu_enable_rx_mac(np, 0);
5952 niu_set_primary_mac(np, dev->dev_addr);
5953 niu_enable_rx_mac(np, 1);
5954 spin_unlock_irqrestore(&np->lock, flags);
5955
5956 return 0;
5957}
5958
5959static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5960{
5961 return -EOPNOTSUPP;
5962}
5963
5964static void niu_netif_stop(struct niu *np)
5965{
5966 np->dev->trans_start = jiffies; /* prevent tx timeout */
5967
5968 niu_disable_napi(np);
5969
5970 netif_tx_disable(np->dev);
5971}
5972
5973static void niu_netif_start(struct niu *np)
5974{
5975 /* NOTE: unconditional netif_wake_queue is only appropriate
5976 * so long as all callers are assured to have free tx slots
5977 * (such as after niu_init_hw).
5978 */
David S. Millerb4c21632008-07-15 03:48:19 -07005979 netif_tx_wake_all_queues(np->dev);
David S. Millera3138df2007-10-09 01:54:01 -07005980
5981 niu_enable_napi(np);
5982
5983 niu_enable_interrupts(np, 1);
5984}
5985
Santwona Beheracff502a2008-09-12 16:04:26 -07005986static void niu_reset_buffers(struct niu *np)
5987{
5988 int i, j, k, err;
5989
5990 if (np->rx_rings) {
5991 for (i = 0; i < np->num_rx_rings; i++) {
5992 struct rx_ring_info *rp = &np->rx_rings[i];
5993
5994 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
5995 struct page *page;
5996
5997 page = rp->rxhash[j];
5998 while (page) {
5999 struct page *next =
6000 (struct page *) page->mapping;
6001 u64 base = page->index;
6002 base = base >> RBR_DESCR_ADDR_SHIFT;
6003 rp->rbr[k++] = cpu_to_le32(base);
6004 page = next;
6005 }
6006 }
6007 for (; k < MAX_RBR_RING_SIZE; k++) {
6008 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6009 if (unlikely(err))
6010 break;
6011 }
6012
6013 rp->rbr_index = rp->rbr_table_size - 1;
6014 rp->rcr_index = 0;
6015 rp->rbr_pending = 0;
6016 rp->rbr_refill_pending = 0;
6017 }
6018 }
6019 if (np->tx_rings) {
6020 for (i = 0; i < np->num_tx_rings; i++) {
6021 struct tx_ring_info *rp = &np->tx_rings[i];
6022
6023 for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6024 if (rp->tx_buffs[j].skb)
6025 (void) release_tx_packet(np, rp, j);
6026 }
6027
6028 rp->pending = MAX_TX_RING_SIZE;
6029 rp->prod = 0;
6030 rp->cons = 0;
6031 rp->wrap_bit = 0;
6032 }
6033 }
6034}
6035
David S. Millera3138df2007-10-09 01:54:01 -07006036static void niu_reset_task(struct work_struct *work)
6037{
6038 struct niu *np = container_of(work, struct niu, reset_task);
6039 unsigned long flags;
6040 int err;
6041
6042 spin_lock_irqsave(&np->lock, flags);
6043 if (!netif_running(np->dev)) {
6044 spin_unlock_irqrestore(&np->lock, flags);
6045 return;
6046 }
6047
6048 spin_unlock_irqrestore(&np->lock, flags);
6049
6050 del_timer_sync(&np->timer);
6051
6052 niu_netif_stop(np);
6053
6054 spin_lock_irqsave(&np->lock, flags);
6055
6056 niu_stop_hw(np);
6057
Santwona Beheracff502a2008-09-12 16:04:26 -07006058 spin_unlock_irqrestore(&np->lock, flags);
6059
6060 niu_reset_buffers(np);
6061
6062 spin_lock_irqsave(&np->lock, flags);
6063
David S. Millera3138df2007-10-09 01:54:01 -07006064 err = niu_init_hw(np);
6065 if (!err) {
6066 np->timer.expires = jiffies + HZ;
6067 add_timer(&np->timer);
6068 niu_netif_start(np);
6069 }
6070
6071 spin_unlock_irqrestore(&np->lock, flags);
6072}
6073
6074static void niu_tx_timeout(struct net_device *dev)
6075{
6076 struct niu *np = netdev_priv(dev);
6077
6078 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
6079 dev->name);
6080
6081 schedule_work(&np->reset_task);
6082}
6083
6084static void niu_set_txd(struct tx_ring_info *rp, int index,
6085 u64 mapping, u64 len, u64 mark,
6086 u64 n_frags)
6087{
6088 __le64 *desc = &rp->descr[index];
6089
6090 *desc = cpu_to_le64(mark |
6091 (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6092 (len << TX_DESC_TR_LEN_SHIFT) |
6093 (mapping & TX_DESC_SAD));
6094}
6095
6096static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6097 u64 pad_bytes, u64 len)
6098{
6099 u16 eth_proto, eth_proto_inner;
6100 u64 csum_bits, l3off, ihl, ret;
6101 u8 ip_proto;
6102 int ipv6;
6103
6104 eth_proto = be16_to_cpu(ehdr->h_proto);
6105 eth_proto_inner = eth_proto;
6106 if (eth_proto == ETH_P_8021Q) {
6107 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6108 __be16 val = vp->h_vlan_encapsulated_proto;
6109
6110 eth_proto_inner = be16_to_cpu(val);
6111 }
6112
6113 ipv6 = ihl = 0;
6114 switch (skb->protocol) {
6115 case __constant_htons(ETH_P_IP):
6116 ip_proto = ip_hdr(skb)->protocol;
6117 ihl = ip_hdr(skb)->ihl;
6118 break;
6119 case __constant_htons(ETH_P_IPV6):
6120 ip_proto = ipv6_hdr(skb)->nexthdr;
6121 ihl = (40 >> 2);
6122 ipv6 = 1;
6123 break;
6124 default:
6125 ip_proto = ihl = 0;
6126 break;
6127 }
6128
6129 csum_bits = TXHDR_CSUM_NONE;
6130 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6131 u64 start, stuff;
6132
6133 csum_bits = (ip_proto == IPPROTO_TCP ?
6134 TXHDR_CSUM_TCP :
6135 (ip_proto == IPPROTO_UDP ?
6136 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6137
6138 start = skb_transport_offset(skb) -
6139 (pad_bytes + sizeof(struct tx_pkt_hdr));
6140 stuff = start + skb->csum_offset;
6141
6142 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6143 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6144 }
6145
6146 l3off = skb_network_offset(skb) -
6147 (pad_bytes + sizeof(struct tx_pkt_hdr));
6148
6149 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6150 (len << TXHDR_LEN_SHIFT) |
6151 ((l3off / 2) << TXHDR_L3START_SHIFT) |
6152 (ihl << TXHDR_IHL_SHIFT) |
6153 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6154 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6155 (ipv6 ? TXHDR_IP_VER : 0) |
6156 csum_bits);
6157
6158 return ret;
6159}
6160
David S. Millera3138df2007-10-09 01:54:01 -07006161static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6162{
6163 struct niu *np = netdev_priv(dev);
6164 unsigned long align, headroom;
David S. Millerb4c21632008-07-15 03:48:19 -07006165 struct netdev_queue *txq;
David S. Millera3138df2007-10-09 01:54:01 -07006166 struct tx_ring_info *rp;
6167 struct tx_pkt_hdr *tp;
6168 unsigned int len, nfg;
6169 struct ethhdr *ehdr;
6170 int prod, i, tlen;
6171 u64 mapping, mrk;
6172
David S. Millerb4c21632008-07-15 03:48:19 -07006173 i = skb_get_queue_mapping(skb);
6174 rp = &np->tx_rings[i];
6175 txq = netdev_get_tx_queue(dev, i);
David S. Millera3138df2007-10-09 01:54:01 -07006176
6177 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
David S. Millerb4c21632008-07-15 03:48:19 -07006178 netif_tx_stop_queue(txq);
David S. Millera3138df2007-10-09 01:54:01 -07006179 dev_err(np->device, PFX "%s: BUG! Tx ring full when "
6180 "queue awake!\n", dev->name);
6181 rp->tx_errors++;
6182 return NETDEV_TX_BUSY;
6183 }
6184
6185 if (skb->len < ETH_ZLEN) {
6186 unsigned int pad_bytes = ETH_ZLEN - skb->len;
6187
6188 if (skb_pad(skb, pad_bytes))
6189 goto out;
6190 skb_put(skb, pad_bytes);
6191 }
6192
6193 len = sizeof(struct tx_pkt_hdr) + 15;
6194 if (skb_headroom(skb) < len) {
6195 struct sk_buff *skb_new;
6196
6197 skb_new = skb_realloc_headroom(skb, len);
6198 if (!skb_new) {
6199 rp->tx_errors++;
6200 goto out_drop;
6201 }
6202 kfree_skb(skb);
6203 skb = skb_new;
David S. Miller3ebebcc2008-01-04 23:54:06 -08006204 } else
6205 skb_orphan(skb);
David S. Millera3138df2007-10-09 01:54:01 -07006206
6207 align = ((unsigned long) skb->data & (16 - 1));
6208 headroom = align + sizeof(struct tx_pkt_hdr);
6209
6210 ehdr = (struct ethhdr *) skb->data;
6211 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6212
6213 len = skb->len - sizeof(struct tx_pkt_hdr);
6214 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6215 tp->resv = 0;
6216
6217 len = skb_headlen(skb);
6218 mapping = np->ops->map_single(np->device, skb->data,
6219 len, DMA_TO_DEVICE);
6220
6221 prod = rp->prod;
6222
6223 rp->tx_buffs[prod].skb = skb;
6224 rp->tx_buffs[prod].mapping = mapping;
6225
6226 mrk = TX_DESC_SOP;
6227 if (++rp->mark_counter == rp->mark_freq) {
6228 rp->mark_counter = 0;
6229 mrk |= TX_DESC_MARK;
6230 rp->mark_pending++;
6231 }
6232
6233 tlen = len;
6234 nfg = skb_shinfo(skb)->nr_frags;
6235 while (tlen > 0) {
6236 tlen -= MAX_TX_DESC_LEN;
6237 nfg++;
6238 }
6239
6240 while (len > 0) {
6241 unsigned int this_len = len;
6242
6243 if (this_len > MAX_TX_DESC_LEN)
6244 this_len = MAX_TX_DESC_LEN;
6245
6246 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6247 mrk = nfg = 0;
6248
6249 prod = NEXT_TX(rp, prod);
6250 mapping += this_len;
6251 len -= this_len;
6252 }
6253
6254 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6256
6257 len = frag->size;
6258 mapping = np->ops->map_page(np->device, frag->page,
6259 frag->page_offset, len,
6260 DMA_TO_DEVICE);
6261
6262 rp->tx_buffs[prod].skb = NULL;
6263 rp->tx_buffs[prod].mapping = mapping;
6264
6265 niu_set_txd(rp, prod, mapping, len, 0, 0);
6266
6267 prod = NEXT_TX(rp, prod);
6268 }
6269
6270 if (prod < rp->prod)
6271 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6272 rp->prod = prod;
6273
6274 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6275
6276 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
David S. Millerb4c21632008-07-15 03:48:19 -07006277 netif_tx_stop_queue(txq);
David S. Millera3138df2007-10-09 01:54:01 -07006278 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
David S. Millerb4c21632008-07-15 03:48:19 -07006279 netif_tx_wake_queue(txq);
David S. Millera3138df2007-10-09 01:54:01 -07006280 }
6281
6282 dev->trans_start = jiffies;
6283
6284out:
6285 return NETDEV_TX_OK;
6286
6287out_drop:
6288 rp->tx_errors++;
6289 kfree_skb(skb);
6290 goto out;
6291}
6292
6293static int niu_change_mtu(struct net_device *dev, int new_mtu)
6294{
6295 struct niu *np = netdev_priv(dev);
6296 int err, orig_jumbo, new_jumbo;
6297
6298 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6299 return -EINVAL;
6300
6301 orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6302 new_jumbo = (new_mtu > ETH_DATA_LEN);
6303
6304 dev->mtu = new_mtu;
6305
6306 if (!netif_running(dev) ||
6307 (orig_jumbo == new_jumbo))
6308 return 0;
6309
6310 niu_full_shutdown(np, dev);
6311
6312 niu_free_channels(np);
6313
6314 niu_enable_napi(np);
6315
6316 err = niu_alloc_channels(np);
6317 if (err)
6318 return err;
6319
6320 spin_lock_irq(&np->lock);
6321
6322 err = niu_init_hw(np);
6323 if (!err) {
6324 init_timer(&np->timer);
6325 np->timer.expires = jiffies + HZ;
6326 np->timer.data = (unsigned long) np;
6327 np->timer.function = niu_timer;
6328
6329 err = niu_enable_interrupts(np, 1);
6330 if (err)
6331 niu_stop_hw(np);
6332 }
6333
6334 spin_unlock_irq(&np->lock);
6335
6336 if (!err) {
David S. Millerb4c21632008-07-15 03:48:19 -07006337 netif_tx_start_all_queues(dev);
David S. Millera3138df2007-10-09 01:54:01 -07006338 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6339 netif_carrier_on(dev);
6340
6341 add_timer(&np->timer);
6342 }
6343
6344 return err;
6345}
6346
6347static void niu_get_drvinfo(struct net_device *dev,
6348 struct ethtool_drvinfo *info)
6349{
6350 struct niu *np = netdev_priv(dev);
6351 struct niu_vpd *vpd = &np->vpd;
6352
6353 strcpy(info->driver, DRV_MODULE_NAME);
6354 strcpy(info->version, DRV_MODULE_VERSION);
6355 sprintf(info->fw_version, "%d.%d",
6356 vpd->fcode_major, vpd->fcode_minor);
6357 if (np->parent->plat_type != PLAT_TYPE_NIU)
6358 strcpy(info->bus_info, pci_name(np->pdev));
6359}
6360
6361static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6362{
6363 struct niu *np = netdev_priv(dev);
6364 struct niu_link_config *lp;
6365
6366 lp = &np->link_config;
6367
6368 memset(cmd, 0, sizeof(*cmd));
6369 cmd->phy_address = np->phy_addr;
6370 cmd->supported = lp->supported;
6371 cmd->advertising = lp->advertising;
6372 cmd->autoneg = lp->autoneg;
6373 cmd->speed = lp->active_speed;
6374 cmd->duplex = lp->active_duplex;
6375
6376 return 0;
6377}
6378
6379static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6380{
6381 return -EINVAL;
6382}
6383
6384static u32 niu_get_msglevel(struct net_device *dev)
6385{
6386 struct niu *np = netdev_priv(dev);
6387 return np->msg_enable;
6388}
6389
6390static void niu_set_msglevel(struct net_device *dev, u32 value)
6391{
6392 struct niu *np = netdev_priv(dev);
6393 np->msg_enable = value;
6394}
6395
6396static int niu_get_eeprom_len(struct net_device *dev)
6397{
6398 struct niu *np = netdev_priv(dev);
6399
6400 return np->eeprom_len;
6401}
6402
6403static int niu_get_eeprom(struct net_device *dev,
6404 struct ethtool_eeprom *eeprom, u8 *data)
6405{
6406 struct niu *np = netdev_priv(dev);
6407 u32 offset, len, val;
6408
6409 offset = eeprom->offset;
6410 len = eeprom->len;
6411
6412 if (offset + len < offset)
6413 return -EINVAL;
6414 if (offset >= np->eeprom_len)
6415 return -EINVAL;
6416 if (offset + len > np->eeprom_len)
6417 len = eeprom->len = np->eeprom_len - offset;
6418
6419 if (offset & 3) {
6420 u32 b_offset, b_count;
6421
6422 b_offset = offset & 3;
6423 b_count = 4 - b_offset;
6424 if (b_count > len)
6425 b_count = len;
6426
6427 val = nr64(ESPC_NCR((offset - b_offset) / 4));
6428 memcpy(data, ((char *)&val) + b_offset, b_count);
6429 data += b_count;
6430 len -= b_count;
6431 offset += b_count;
6432 }
6433 while (len >= 4) {
6434 val = nr64(ESPC_NCR(offset / 4));
6435 memcpy(data, &val, 4);
6436 data += 4;
6437 len -= 4;
6438 offset += 4;
6439 }
6440 if (len) {
6441 val = nr64(ESPC_NCR(offset / 4));
6442 memcpy(data, &val, len);
6443 }
6444 return 0;
6445}
6446
Santwona Beherab4653e92008-07-02 03:49:11 -07006447static int niu_ethflow_to_class(int flow_type, u64 *class)
6448{
6449 switch (flow_type) {
6450 case TCP_V4_FLOW:
6451 *class = CLASS_CODE_TCP_IPV4;
6452 break;
6453 case UDP_V4_FLOW:
6454 *class = CLASS_CODE_UDP_IPV4;
6455 break;
6456 case AH_ESP_V4_FLOW:
6457 *class = CLASS_CODE_AH_ESP_IPV4;
6458 break;
6459 case SCTP_V4_FLOW:
6460 *class = CLASS_CODE_SCTP_IPV4;
6461 break;
6462 case TCP_V6_FLOW:
6463 *class = CLASS_CODE_TCP_IPV6;
6464 break;
6465 case UDP_V6_FLOW:
6466 *class = CLASS_CODE_UDP_IPV6;
6467 break;
6468 case AH_ESP_V6_FLOW:
6469 *class = CLASS_CODE_AH_ESP_IPV6;
6470 break;
6471 case SCTP_V6_FLOW:
6472 *class = CLASS_CODE_SCTP_IPV6;
6473 break;
6474 default:
Andreas Schwab38c080f2008-07-29 23:59:20 -07006475 return 0;
Santwona Beherab4653e92008-07-02 03:49:11 -07006476 }
6477
6478 return 1;
6479}
6480
6481static u64 niu_flowkey_to_ethflow(u64 flow_key)
6482{
6483 u64 ethflow = 0;
6484
6485 if (flow_key & FLOW_KEY_PORT)
6486 ethflow |= RXH_DEV_PORT;
6487 if (flow_key & FLOW_KEY_L2DA)
6488 ethflow |= RXH_L2DA;
6489 if (flow_key & FLOW_KEY_VLAN)
6490 ethflow |= RXH_VLAN;
6491 if (flow_key & FLOW_KEY_IPSA)
6492 ethflow |= RXH_IP_SRC;
6493 if (flow_key & FLOW_KEY_IPDA)
6494 ethflow |= RXH_IP_DST;
6495 if (flow_key & FLOW_KEY_PROTO)
6496 ethflow |= RXH_L3_PROTO;
6497 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
6498 ethflow |= RXH_L4_B_0_1;
6499 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
6500 ethflow |= RXH_L4_B_2_3;
6501
6502 return ethflow;
6503
6504}
6505
6506static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
6507{
6508 u64 key = 0;
6509
6510 if (ethflow & RXH_DEV_PORT)
6511 key |= FLOW_KEY_PORT;
6512 if (ethflow & RXH_L2DA)
6513 key |= FLOW_KEY_L2DA;
6514 if (ethflow & RXH_VLAN)
6515 key |= FLOW_KEY_VLAN;
6516 if (ethflow & RXH_IP_SRC)
6517 key |= FLOW_KEY_IPSA;
6518 if (ethflow & RXH_IP_DST)
6519 key |= FLOW_KEY_IPDA;
6520 if (ethflow & RXH_L3_PROTO)
6521 key |= FLOW_KEY_PROTO;
6522 if (ethflow & RXH_L4_B_0_1)
6523 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
6524 if (ethflow & RXH_L4_B_2_3)
6525 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
6526
6527 *flow_key = key;
6528
6529 return 1;
6530
6531}
6532
6533static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6534{
6535 struct niu *np = netdev_priv(dev);
6536 u64 class;
6537
6538 cmd->data = 0;
6539
6540 if (!niu_ethflow_to_class(cmd->flow_type, &class))
6541 return -EINVAL;
6542
6543 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6544 TCAM_KEY_DISC)
6545 cmd->data = RXH_DISCARD;
6546 else
6547
6548 cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
6549 CLASS_CODE_USER_PROG1]);
6550 return 0;
6551}
6552
6553static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6554{
6555 struct niu *np = netdev_priv(dev);
6556 u64 class;
6557 u64 flow_key = 0;
6558 unsigned long flags;
6559
6560 if (!niu_ethflow_to_class(cmd->flow_type, &class))
6561 return -EINVAL;
6562
6563 if (class < CLASS_CODE_USER_PROG1 ||
6564 class > CLASS_CODE_SCTP_IPV6)
6565 return -EINVAL;
6566
6567 if (cmd->data & RXH_DISCARD) {
6568 niu_lock_parent(np, flags);
6569 flow_key = np->parent->tcam_key[class -
6570 CLASS_CODE_USER_PROG1];
6571 flow_key |= TCAM_KEY_DISC;
6572 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6573 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6574 niu_unlock_parent(np, flags);
6575 return 0;
6576 } else {
6577 /* Discard was set before, but is not set now */
6578 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6579 TCAM_KEY_DISC) {
6580 niu_lock_parent(np, flags);
6581 flow_key = np->parent->tcam_key[class -
6582 CLASS_CODE_USER_PROG1];
6583 flow_key &= ~TCAM_KEY_DISC;
6584 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
6585 flow_key);
6586 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
6587 flow_key;
6588 niu_unlock_parent(np, flags);
6589 }
6590 }
6591
6592 if (!niu_ethflow_to_flowkey(cmd->data, &flow_key))
6593 return -EINVAL;
6594
6595 niu_lock_parent(np, flags);
6596 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6597 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6598 niu_unlock_parent(np, flags);
6599
6600 return 0;
6601}
6602
David S. Millera3138df2007-10-09 01:54:01 -07006603static const struct {
6604 const char string[ETH_GSTRING_LEN];
6605} niu_xmac_stat_keys[] = {
6606 { "tx_frames" },
6607 { "tx_bytes" },
6608 { "tx_fifo_errors" },
6609 { "tx_overflow_errors" },
6610 { "tx_max_pkt_size_errors" },
6611 { "tx_underflow_errors" },
6612 { "rx_local_faults" },
6613 { "rx_remote_faults" },
6614 { "rx_link_faults" },
6615 { "rx_align_errors" },
6616 { "rx_frags" },
6617 { "rx_mcasts" },
6618 { "rx_bcasts" },
6619 { "rx_hist_cnt1" },
6620 { "rx_hist_cnt2" },
6621 { "rx_hist_cnt3" },
6622 { "rx_hist_cnt4" },
6623 { "rx_hist_cnt5" },
6624 { "rx_hist_cnt6" },
6625 { "rx_hist_cnt7" },
6626 { "rx_octets" },
6627 { "rx_code_violations" },
6628 { "rx_len_errors" },
6629 { "rx_crc_errors" },
6630 { "rx_underflows" },
6631 { "rx_overflows" },
6632 { "pause_off_state" },
6633 { "pause_on_state" },
6634 { "pause_received" },
6635};
6636
6637#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
6638
6639static const struct {
6640 const char string[ETH_GSTRING_LEN];
6641} niu_bmac_stat_keys[] = {
6642 { "tx_underflow_errors" },
6643 { "tx_max_pkt_size_errors" },
6644 { "tx_bytes" },
6645 { "tx_frames" },
6646 { "rx_overflows" },
6647 { "rx_frames" },
6648 { "rx_align_errors" },
6649 { "rx_crc_errors" },
6650 { "rx_len_errors" },
6651 { "pause_off_state" },
6652 { "pause_on_state" },
6653 { "pause_received" },
6654};
6655
6656#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
6657
6658static const struct {
6659 const char string[ETH_GSTRING_LEN];
6660} niu_rxchan_stat_keys[] = {
6661 { "rx_channel" },
6662 { "rx_packets" },
6663 { "rx_bytes" },
6664 { "rx_dropped" },
6665 { "rx_errors" },
6666};
6667
6668#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
6669
6670static const struct {
6671 const char string[ETH_GSTRING_LEN];
6672} niu_txchan_stat_keys[] = {
6673 { "tx_channel" },
6674 { "tx_packets" },
6675 { "tx_bytes" },
6676 { "tx_errors" },
6677};
6678
6679#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
6680
6681static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6682{
6683 struct niu *np = netdev_priv(dev);
6684 int i;
6685
6686 if (stringset != ETH_SS_STATS)
6687 return;
6688
6689 if (np->flags & NIU_FLAGS_XMAC) {
6690 memcpy(data, niu_xmac_stat_keys,
6691 sizeof(niu_xmac_stat_keys));
6692 data += sizeof(niu_xmac_stat_keys);
6693 } else {
6694 memcpy(data, niu_bmac_stat_keys,
6695 sizeof(niu_bmac_stat_keys));
6696 data += sizeof(niu_bmac_stat_keys);
6697 }
6698 for (i = 0; i < np->num_rx_rings; i++) {
6699 memcpy(data, niu_rxchan_stat_keys,
6700 sizeof(niu_rxchan_stat_keys));
6701 data += sizeof(niu_rxchan_stat_keys);
6702 }
6703 for (i = 0; i < np->num_tx_rings; i++) {
6704 memcpy(data, niu_txchan_stat_keys,
6705 sizeof(niu_txchan_stat_keys));
6706 data += sizeof(niu_txchan_stat_keys);
6707 }
6708}
6709
6710static int niu_get_stats_count(struct net_device *dev)
6711{
6712 struct niu *np = netdev_priv(dev);
6713
6714 return ((np->flags & NIU_FLAGS_XMAC ?
6715 NUM_XMAC_STAT_KEYS :
6716 NUM_BMAC_STAT_KEYS) +
6717 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
6718 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS));
6719}
6720
6721static void niu_get_ethtool_stats(struct net_device *dev,
6722 struct ethtool_stats *stats, u64 *data)
6723{
6724 struct niu *np = netdev_priv(dev);
6725 int i;
6726
6727 niu_sync_mac_stats(np);
6728 if (np->flags & NIU_FLAGS_XMAC) {
6729 memcpy(data, &np->mac_stats.xmac,
6730 sizeof(struct niu_xmac_stats));
6731 data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
6732 } else {
6733 memcpy(data, &np->mac_stats.bmac,
6734 sizeof(struct niu_bmac_stats));
6735 data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
6736 }
6737 for (i = 0; i < np->num_rx_rings; i++) {
6738 struct rx_ring_info *rp = &np->rx_rings[i];
6739
6740 data[0] = rp->rx_channel;
6741 data[1] = rp->rx_packets;
6742 data[2] = rp->rx_bytes;
6743 data[3] = rp->rx_dropped;
6744 data[4] = rp->rx_errors;
6745 data += 5;
6746 }
6747 for (i = 0; i < np->num_tx_rings; i++) {
6748 struct tx_ring_info *rp = &np->tx_rings[i];
6749
6750 data[0] = rp->tx_channel;
6751 data[1] = rp->tx_packets;
6752 data[2] = rp->tx_bytes;
6753 data[3] = rp->tx_errors;
6754 data += 4;
6755 }
6756}
6757
6758static u64 niu_led_state_save(struct niu *np)
6759{
6760 if (np->flags & NIU_FLAGS_XMAC)
6761 return nr64_mac(XMAC_CONFIG);
6762 else
6763 return nr64_mac(BMAC_XIF_CONFIG);
6764}
6765
6766static void niu_led_state_restore(struct niu *np, u64 val)
6767{
6768 if (np->flags & NIU_FLAGS_XMAC)
6769 nw64_mac(XMAC_CONFIG, val);
6770 else
6771 nw64_mac(BMAC_XIF_CONFIG, val);
6772}
6773
6774static void niu_force_led(struct niu *np, int on)
6775{
6776 u64 val, reg, bit;
6777
6778 if (np->flags & NIU_FLAGS_XMAC) {
6779 reg = XMAC_CONFIG;
6780 bit = XMAC_CONFIG_FORCE_LED_ON;
6781 } else {
6782 reg = BMAC_XIF_CONFIG;
6783 bit = BMAC_XIF_CONFIG_LINK_LED;
6784 }
6785
6786 val = nr64_mac(reg);
6787 if (on)
6788 val |= bit;
6789 else
6790 val &= ~bit;
6791 nw64_mac(reg, val);
6792}
6793
6794static int niu_phys_id(struct net_device *dev, u32 data)
6795{
6796 struct niu *np = netdev_priv(dev);
6797 u64 orig_led_state;
6798 int i;
6799
6800 if (!netif_running(dev))
6801 return -EAGAIN;
6802
6803 if (data == 0)
6804 data = 2;
6805
6806 orig_led_state = niu_led_state_save(np);
6807 for (i = 0; i < (data * 2); i++) {
6808 int on = ((i % 2) == 0);
6809
6810 niu_force_led(np, on);
6811
6812 if (msleep_interruptible(500))
6813 break;
6814 }
6815 niu_led_state_restore(np, orig_led_state);
6816
6817 return 0;
6818}
6819
6820static const struct ethtool_ops niu_ethtool_ops = {
6821 .get_drvinfo = niu_get_drvinfo,
6822 .get_link = ethtool_op_get_link,
6823 .get_msglevel = niu_get_msglevel,
6824 .set_msglevel = niu_set_msglevel,
6825 .get_eeprom_len = niu_get_eeprom_len,
6826 .get_eeprom = niu_get_eeprom,
6827 .get_settings = niu_get_settings,
6828 .set_settings = niu_set_settings,
6829 .get_strings = niu_get_strings,
6830 .get_stats_count = niu_get_stats_count,
6831 .get_ethtool_stats = niu_get_ethtool_stats,
6832 .phys_id = niu_phys_id,
Santwona Beherab4653e92008-07-02 03:49:11 -07006833 .get_rxhash = niu_get_hash_opts,
6834 .set_rxhash = niu_set_hash_opts,
David S. Millera3138df2007-10-09 01:54:01 -07006835};
6836
6837static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
6838 int ldg, int ldn)
6839{
6840 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
6841 return -EINVAL;
6842 if (ldn < 0 || ldn > LDN_MAX)
6843 return -EINVAL;
6844
6845 parent->ldg_map[ldn] = ldg;
6846
6847 if (np->parent->plat_type == PLAT_TYPE_NIU) {
6848 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
6849 * the firmware, and we're not supposed to change them.
6850 * Validate the mapping, because if it's wrong we probably
6851 * won't get any interrupts and that's painful to debug.
6852 */
6853 if (nr64(LDG_NUM(ldn)) != ldg) {
6854 dev_err(np->device, PFX "Port %u, mis-matched "
6855 "LDG assignment "
6856 "for ldn %d, should be %d is %llu\n",
6857 np->port, ldn, ldg,
6858 (unsigned long long) nr64(LDG_NUM(ldn)));
6859 return -EINVAL;
6860 }
6861 } else
6862 nw64(LDG_NUM(ldn), ldg);
6863
6864 return 0;
6865}
6866
6867static int niu_set_ldg_timer_res(struct niu *np, int res)
6868{
6869 if (res < 0 || res > LDG_TIMER_RES_VAL)
6870 return -EINVAL;
6871
6872
6873 nw64(LDG_TIMER_RES, res);
6874
6875 return 0;
6876}
6877
6878static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
6879{
6880 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
6881 (func < 0 || func > 3) ||
6882 (vector < 0 || vector > 0x1f))
6883 return -EINVAL;
6884
6885 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
6886
6887 return 0;
6888}
6889
6890static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
6891{
6892 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
6893 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
6894 int limit;
6895
6896 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
6897 return -EINVAL;
6898
6899 frame = frame_base;
6900 nw64(ESPC_PIO_STAT, frame);
6901 limit = 64;
6902 do {
6903 udelay(5);
6904 frame = nr64(ESPC_PIO_STAT);
6905 if (frame & ESPC_PIO_STAT_READ_END)
6906 break;
6907 } while (limit--);
6908 if (!(frame & ESPC_PIO_STAT_READ_END)) {
6909 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
6910 (unsigned long long) frame);
6911 return -ENODEV;
6912 }
6913
6914 frame = frame_base;
6915 nw64(ESPC_PIO_STAT, frame);
6916 limit = 64;
6917 do {
6918 udelay(5);
6919 frame = nr64(ESPC_PIO_STAT);
6920 if (frame & ESPC_PIO_STAT_READ_END)
6921 break;
6922 } while (limit--);
6923 if (!(frame & ESPC_PIO_STAT_READ_END)) {
6924 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
6925 (unsigned long long) frame);
6926 return -ENODEV;
6927 }
6928
6929 frame = nr64(ESPC_PIO_STAT);
6930 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
6931}
6932
6933static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
6934{
6935 int err = niu_pci_eeprom_read(np, off);
6936 u16 val;
6937
6938 if (err < 0)
6939 return err;
6940 val = (err << 8);
6941 err = niu_pci_eeprom_read(np, off + 1);
6942 if (err < 0)
6943 return err;
6944 val |= (err & 0xff);
6945
6946 return val;
6947}
6948
6949static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
6950{
6951 int err = niu_pci_eeprom_read(np, off);
6952 u16 val;
6953
6954 if (err < 0)
6955 return err;
6956
6957 val = (err & 0xff);
6958 err = niu_pci_eeprom_read(np, off + 1);
6959 if (err < 0)
6960 return err;
6961
6962 val |= (err & 0xff) << 8;
6963
6964 return val;
6965}
6966
6967static int __devinit niu_pci_vpd_get_propname(struct niu *np,
6968 u32 off,
6969 char *namebuf,
6970 int namebuf_len)
6971{
6972 int i;
6973
6974 for (i = 0; i < namebuf_len; i++) {
6975 int err = niu_pci_eeprom_read(np, off + i);
6976 if (err < 0)
6977 return err;
6978 *namebuf++ = err;
6979 if (!err)
6980 break;
6981 }
6982 if (i >= namebuf_len)
6983 return -EINVAL;
6984
6985 return i + 1;
6986}
6987
6988static void __devinit niu_vpd_parse_version(struct niu *np)
6989{
6990 struct niu_vpd *vpd = &np->vpd;
6991 int len = strlen(vpd->version) + 1;
6992 const char *s = vpd->version;
6993 int i;
6994
6995 for (i = 0; i < len - 5; i++) {
6996 if (!strncmp(s + i, "FCode ", 5))
6997 break;
6998 }
6999 if (i >= len - 5)
7000 return;
7001
7002 s += i + 5;
7003 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
7004
7005 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7006 vpd->fcode_major, vpd->fcode_minor);
7007 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
7008 (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
7009 vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
7010 np->flags |= NIU_FLAGS_VPD_VALID;
7011}
7012
7013/* ESPC_PIO_EN_ENABLE must be set */
7014static int __devinit niu_pci_vpd_scan_props(struct niu *np,
7015 u32 start, u32 end)
7016{
7017 unsigned int found_mask = 0;
7018#define FOUND_MASK_MODEL 0x00000001
7019#define FOUND_MASK_BMODEL 0x00000002
7020#define FOUND_MASK_VERS 0x00000004
7021#define FOUND_MASK_MAC 0x00000008
7022#define FOUND_MASK_NMAC 0x00000010
7023#define FOUND_MASK_PHY 0x00000020
7024#define FOUND_MASK_ALL 0x0000003f
7025
7026 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
7027 start, end);
7028 while (start < end) {
7029 int len, err, instance, type, prop_len;
7030 char namebuf[64];
7031 u8 *prop_buf;
7032 int max_len;
7033
7034 if (found_mask == FOUND_MASK_ALL) {
7035 niu_vpd_parse_version(np);
7036 return 1;
7037 }
7038
7039 err = niu_pci_eeprom_read(np, start + 2);
7040 if (err < 0)
7041 return err;
7042 len = err;
7043 start += 3;
7044
7045 instance = niu_pci_eeprom_read(np, start);
7046 type = niu_pci_eeprom_read(np, start + 3);
7047 prop_len = niu_pci_eeprom_read(np, start + 4);
7048 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
7049 if (err < 0)
7050 return err;
7051
7052 prop_buf = NULL;
7053 max_len = 0;
7054 if (!strcmp(namebuf, "model")) {
7055 prop_buf = np->vpd.model;
7056 max_len = NIU_VPD_MODEL_MAX;
7057 found_mask |= FOUND_MASK_MODEL;
7058 } else if (!strcmp(namebuf, "board-model")) {
7059 prop_buf = np->vpd.board_model;
7060 max_len = NIU_VPD_BD_MODEL_MAX;
7061 found_mask |= FOUND_MASK_BMODEL;
7062 } else if (!strcmp(namebuf, "version")) {
7063 prop_buf = np->vpd.version;
7064 max_len = NIU_VPD_VERSION_MAX;
7065 found_mask |= FOUND_MASK_VERS;
7066 } else if (!strcmp(namebuf, "local-mac-address")) {
7067 prop_buf = np->vpd.local_mac;
7068 max_len = ETH_ALEN;
7069 found_mask |= FOUND_MASK_MAC;
7070 } else if (!strcmp(namebuf, "num-mac-addresses")) {
7071 prop_buf = &np->vpd.mac_num;
7072 max_len = 1;
7073 found_mask |= FOUND_MASK_NMAC;
7074 } else if (!strcmp(namebuf, "phy-type")) {
7075 prop_buf = np->vpd.phy_type;
7076 max_len = NIU_VPD_PHY_TYPE_MAX;
7077 found_mask |= FOUND_MASK_PHY;
7078 }
7079
7080 if (max_len && prop_len > max_len) {
7081 dev_err(np->device, PFX "Property '%s' length (%d) is "
7082 "too long.\n", namebuf, prop_len);
7083 return -EINVAL;
7084 }
7085
7086 if (prop_buf) {
7087 u32 off = start + 5 + err;
7088 int i;
7089
7090 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
7091 "len[%d]\n", namebuf, prop_len);
7092 for (i = 0; i < prop_len; i++)
7093 *prop_buf++ = niu_pci_eeprom_read(np, off + i);
7094 }
7095
7096 start += len;
7097 }
7098
7099 return 0;
7100}
7101
7102/* ESPC_PIO_EN_ENABLE must be set */
7103static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
7104{
7105 u32 offset;
7106 int err;
7107
7108 err = niu_pci_eeprom_read16_swp(np, start + 1);
7109 if (err < 0)
7110 return;
7111
7112 offset = err + 3;
7113
7114 while (start + offset < ESPC_EEPROM_SIZE) {
7115 u32 here = start + offset;
7116 u32 end;
7117
7118 err = niu_pci_eeprom_read(np, here);
7119 if (err != 0x90)
7120 return;
7121
7122 err = niu_pci_eeprom_read16_swp(np, here + 1);
7123 if (err < 0)
7124 return;
7125
7126 here = start + offset + 3;
7127 end = start + offset + err;
7128
7129 offset += err;
7130
7131 err = niu_pci_vpd_scan_props(np, here, end);
7132 if (err < 0 || err == 1)
7133 return;
7134 }
7135}
7136
7137/* ESPC_PIO_EN_ENABLE must be set */
7138static u32 __devinit niu_pci_vpd_offset(struct niu *np)
7139{
7140 u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
7141 int err;
7142
7143 while (start < end) {
7144 ret = start;
7145
7146 /* ROM header signature? */
7147 err = niu_pci_eeprom_read16(np, start + 0);
7148 if (err != 0x55aa)
7149 return 0;
7150
7151 /* Apply offset to PCI data structure. */
7152 err = niu_pci_eeprom_read16(np, start + 23);
7153 if (err < 0)
7154 return 0;
7155 start += err;
7156
7157 /* Check for "PCIR" signature. */
7158 err = niu_pci_eeprom_read16(np, start + 0);
7159 if (err != 0x5043)
7160 return 0;
7161 err = niu_pci_eeprom_read16(np, start + 2);
7162 if (err != 0x4952)
7163 return 0;
7164
7165 /* Check for OBP image type. */
7166 err = niu_pci_eeprom_read(np, start + 20);
7167 if (err < 0)
7168 return 0;
7169 if (err != 0x01) {
7170 err = niu_pci_eeprom_read(np, ret + 2);
7171 if (err < 0)
7172 return 0;
7173
7174 start = ret + (err * 512);
7175 continue;
7176 }
7177
7178 err = niu_pci_eeprom_read16_swp(np, start + 8);
7179 if (err < 0)
7180 return err;
7181 ret += err;
7182
7183 err = niu_pci_eeprom_read(np, ret + 0);
7184 if (err != 0x82)
7185 return 0;
7186
7187 return ret;
7188 }
7189
7190 return 0;
7191}
7192
7193static int __devinit niu_phy_type_prop_decode(struct niu *np,
7194 const char *phy_prop)
7195{
7196 if (!strcmp(phy_prop, "mif")) {
7197 /* 1G copper, MII */
7198 np->flags &= ~(NIU_FLAGS_FIBER |
7199 NIU_FLAGS_10G);
7200 np->mac_xcvr = MAC_XCVR_MII;
7201 } else if (!strcmp(phy_prop, "xgf")) {
7202 /* 10G fiber, XPCS */
7203 np->flags |= (NIU_FLAGS_10G |
7204 NIU_FLAGS_FIBER);
7205 np->mac_xcvr = MAC_XCVR_XPCS;
7206 } else if (!strcmp(phy_prop, "pcs")) {
7207 /* 1G fiber, PCS */
7208 np->flags &= ~NIU_FLAGS_10G;
7209 np->flags |= NIU_FLAGS_FIBER;
7210 np->mac_xcvr = MAC_XCVR_PCS;
7211 } else if (!strcmp(phy_prop, "xgc")) {
7212 /* 10G copper, XPCS */
7213 np->flags |= NIU_FLAGS_10G;
7214 np->flags &= ~NIU_FLAGS_FIBER;
7215 np->mac_xcvr = MAC_XCVR_XPCS;
7216 } else {
7217 return -EINVAL;
7218 }
7219 return 0;
7220}
7221
Matheos Worku7f7c4072008-04-24 21:02:37 -07007222static int niu_pci_vpd_get_nports(struct niu *np)
7223{
7224 int ports = 0;
7225
Matheos Workuf9af8572008-05-12 03:10:59 -07007226 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
7227 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
7228 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
7229 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
7230 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
Matheos Worku7f7c4072008-04-24 21:02:37 -07007231 ports = 4;
Matheos Workuf9af8572008-05-12 03:10:59 -07007232 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
7233 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
7234 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
7235 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
Matheos Worku7f7c4072008-04-24 21:02:37 -07007236 ports = 2;
7237 }
7238
7239 return ports;
7240}
7241
David S. Millera3138df2007-10-09 01:54:01 -07007242static void __devinit niu_pci_vpd_validate(struct niu *np)
7243{
7244 struct net_device *dev = np->dev;
7245 struct niu_vpd *vpd = &np->vpd;
7246 u8 val8;
7247
7248 if (!is_valid_ether_addr(&vpd->local_mac[0])) {
7249 dev_err(np->device, PFX "VPD MAC invalid, "
7250 "falling back to SPROM.\n");
7251
7252 np->flags &= ~NIU_FLAGS_VPD_VALID;
7253 return;
7254 }
7255
Matheos Workuf9af8572008-05-12 03:10:59 -07007256 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7257 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
Matheos Worku5fbd7e22008-02-28 21:25:43 -08007258 np->flags |= NIU_FLAGS_10G;
7259 np->flags &= ~NIU_FLAGS_FIBER;
7260 np->flags |= NIU_FLAGS_XCVR_SERDES;
7261 np->mac_xcvr = MAC_XCVR_PCS;
7262 if (np->port > 1) {
7263 np->flags |= NIU_FLAGS_FIBER;
7264 np->flags &= ~NIU_FLAGS_10G;
7265 }
7266 if (np->flags & NIU_FLAGS_10G)
7267 np->mac_xcvr = MAC_XCVR_XPCS;
Matheos Workuf9af8572008-05-12 03:10:59 -07007268 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
Matheos Workua5d6ab52008-04-24 21:09:20 -07007269 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
7270 NIU_FLAGS_HOTPLUG_PHY);
Matheos Worku5fbd7e22008-02-28 21:25:43 -08007271 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
David S. Millera3138df2007-10-09 01:54:01 -07007272 dev_err(np->device, PFX "Illegal phy string [%s].\n",
7273 np->vpd.phy_type);
7274 dev_err(np->device, PFX "Falling back to SPROM.\n");
7275 np->flags &= ~NIU_FLAGS_VPD_VALID;
7276 return;
7277 }
7278
7279 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
7280
7281 val8 = dev->perm_addr[5];
7282 dev->perm_addr[5] += np->port;
7283 if (dev->perm_addr[5] < val8)
7284 dev->perm_addr[4]++;
7285
7286 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7287}
7288
7289static int __devinit niu_pci_probe_sprom(struct niu *np)
7290{
7291 struct net_device *dev = np->dev;
7292 int len, i;
7293 u64 val, sum;
7294 u8 val8;
7295
7296 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
7297 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
7298 len = val / 4;
7299
7300 np->eeprom_len = len;
7301
7302 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
7303
7304 sum = 0;
7305 for (i = 0; i < len; i++) {
7306 val = nr64(ESPC_NCR(i));
7307 sum += (val >> 0) & 0xff;
7308 sum += (val >> 8) & 0xff;
7309 sum += (val >> 16) & 0xff;
7310 sum += (val >> 24) & 0xff;
7311 }
7312 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
7313 if ((sum & 0xff) != 0xab) {
7314 dev_err(np->device, PFX "Bad SPROM checksum "
7315 "(%x, should be 0xab)\n", (int) (sum & 0xff));
7316 return -EINVAL;
7317 }
7318
7319 val = nr64(ESPC_PHY_TYPE);
7320 switch (np->port) {
7321 case 0:
Al Viroa9d41192007-10-15 01:42:31 -07007322 val8 = (val & ESPC_PHY_TYPE_PORT0) >>
David S. Millera3138df2007-10-09 01:54:01 -07007323 ESPC_PHY_TYPE_PORT0_SHIFT;
7324 break;
7325 case 1:
Al Viroa9d41192007-10-15 01:42:31 -07007326 val8 = (val & ESPC_PHY_TYPE_PORT1) >>
David S. Millera3138df2007-10-09 01:54:01 -07007327 ESPC_PHY_TYPE_PORT1_SHIFT;
7328 break;
7329 case 2:
Al Viroa9d41192007-10-15 01:42:31 -07007330 val8 = (val & ESPC_PHY_TYPE_PORT2) >>
David S. Millera3138df2007-10-09 01:54:01 -07007331 ESPC_PHY_TYPE_PORT2_SHIFT;
7332 break;
7333 case 3:
Al Viroa9d41192007-10-15 01:42:31 -07007334 val8 = (val & ESPC_PHY_TYPE_PORT3) >>
David S. Millera3138df2007-10-09 01:54:01 -07007335 ESPC_PHY_TYPE_PORT3_SHIFT;
7336 break;
7337 default:
7338 dev_err(np->device, PFX "Bogus port number %u\n",
7339 np->port);
7340 return -EINVAL;
7341 }
Al Viroa9d41192007-10-15 01:42:31 -07007342 niudbg(PROBE, "SPROM: PHY type %x\n", val8);
David S. Millera3138df2007-10-09 01:54:01 -07007343
Al Viroa9d41192007-10-15 01:42:31 -07007344 switch (val8) {
David S. Millera3138df2007-10-09 01:54:01 -07007345 case ESPC_PHY_TYPE_1G_COPPER:
7346 /* 1G copper, MII */
7347 np->flags &= ~(NIU_FLAGS_FIBER |
7348 NIU_FLAGS_10G);
7349 np->mac_xcvr = MAC_XCVR_MII;
7350 break;
7351
7352 case ESPC_PHY_TYPE_1G_FIBER:
7353 /* 1G fiber, PCS */
7354 np->flags &= ~NIU_FLAGS_10G;
7355 np->flags |= NIU_FLAGS_FIBER;
7356 np->mac_xcvr = MAC_XCVR_PCS;
7357 break;
7358
7359 case ESPC_PHY_TYPE_10G_COPPER:
7360 /* 10G copper, XPCS */
7361 np->flags |= NIU_FLAGS_10G;
7362 np->flags &= ~NIU_FLAGS_FIBER;
7363 np->mac_xcvr = MAC_XCVR_XPCS;
7364 break;
7365
7366 case ESPC_PHY_TYPE_10G_FIBER:
7367 /* 10G fiber, XPCS */
7368 np->flags |= (NIU_FLAGS_10G |
7369 NIU_FLAGS_FIBER);
7370 np->mac_xcvr = MAC_XCVR_XPCS;
7371 break;
7372
7373 default:
Al Viroa9d41192007-10-15 01:42:31 -07007374 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
David S. Millera3138df2007-10-09 01:54:01 -07007375 return -EINVAL;
7376 }
7377
7378 val = nr64(ESPC_MAC_ADDR0);
7379 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
7380 (unsigned long long) val);
7381 dev->perm_addr[0] = (val >> 0) & 0xff;
7382 dev->perm_addr[1] = (val >> 8) & 0xff;
7383 dev->perm_addr[2] = (val >> 16) & 0xff;
7384 dev->perm_addr[3] = (val >> 24) & 0xff;
7385
7386 val = nr64(ESPC_MAC_ADDR1);
7387 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
7388 (unsigned long long) val);
7389 dev->perm_addr[4] = (val >> 0) & 0xff;
7390 dev->perm_addr[5] = (val >> 8) & 0xff;
7391
7392 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
7393 dev_err(np->device, PFX "SPROM MAC address invalid\n");
7394 dev_err(np->device, PFX "[ \n");
7395 for (i = 0; i < 6; i++)
7396 printk("%02x ", dev->perm_addr[i]);
7397 printk("]\n");
7398 return -EINVAL;
7399 }
7400
7401 val8 = dev->perm_addr[5];
7402 dev->perm_addr[5] += np->port;
7403 if (dev->perm_addr[5] < val8)
7404 dev->perm_addr[4]++;
7405
7406 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7407
7408 val = nr64(ESPC_MOD_STR_LEN);
7409 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
7410 (unsigned long long) val);
David S. Millere6a5fdf2007-10-15 01:36:24 -07007411 if (val >= 8 * 4)
David S. Millera3138df2007-10-09 01:54:01 -07007412 return -EINVAL;
7413
7414 for (i = 0; i < val; i += 4) {
7415 u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
7416
7417 np->vpd.model[i + 3] = (tmp >> 0) & 0xff;
7418 np->vpd.model[i + 2] = (tmp >> 8) & 0xff;
7419 np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
7420 np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
7421 }
7422 np->vpd.model[val] = '\0';
7423
7424 val = nr64(ESPC_BD_MOD_STR_LEN);
7425 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7426 (unsigned long long) val);
David S. Millere6a5fdf2007-10-15 01:36:24 -07007427 if (val >= 4 * 4)
David S. Millera3138df2007-10-09 01:54:01 -07007428 return -EINVAL;
7429
7430 for (i = 0; i < val; i += 4) {
7431 u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
7432
7433 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff;
7434 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff;
7435 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
7436 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
7437 }
7438 np->vpd.board_model[val] = '\0';
7439
7440 np->vpd.mac_num =
7441 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
7442 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
7443 np->vpd.mac_num);
7444
7445 return 0;
7446}
7447
7448static int __devinit niu_get_and_validate_port(struct niu *np)
7449{
7450 struct niu_parent *parent = np->parent;
7451
7452 if (np->port <= 1)
7453 np->flags |= NIU_FLAGS_XMAC;
7454
7455 if (!parent->num_ports) {
7456 if (parent->plat_type == PLAT_TYPE_NIU) {
7457 parent->num_ports = 2;
7458 } else {
Matheos Worku7f7c4072008-04-24 21:02:37 -07007459 parent->num_ports = niu_pci_vpd_get_nports(np);
7460 if (!parent->num_ports) {
7461 /* Fall back to SPROM as last resort.
7462 * This will fail on most cards.
7463 */
7464 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7465 ESPC_NUM_PORTS_MACS_VAL;
David S. Millera3138df2007-10-09 01:54:01 -07007466
David S. Millerbe0c0072008-05-04 01:34:31 -07007467 /* All of the current probing methods fail on
7468 * Maramba on-board parts.
7469 */
Matheos Worku7f7c4072008-04-24 21:02:37 -07007470 if (!parent->num_ports)
David S. Millerbe0c0072008-05-04 01:34:31 -07007471 parent->num_ports = 4;
Matheos Worku7f7c4072008-04-24 21:02:37 -07007472 }
David S. Millera3138df2007-10-09 01:54:01 -07007473 }
7474 }
7475
7476 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7477 np->port, parent->num_ports);
7478 if (np->port >= parent->num_ports)
7479 return -ENODEV;
7480
7481 return 0;
7482}
7483
7484static int __devinit phy_record(struct niu_parent *parent,
7485 struct phy_probe_info *p,
7486 int dev_id_1, int dev_id_2, u8 phy_port,
7487 int type)
7488{
7489 u32 id = (dev_id_1 << 16) | dev_id_2;
7490 u8 idx;
7491
7492 if (dev_id_1 < 0 || dev_id_2 < 0)
7493 return 0;
7494 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
Mirko Lindnerb0de8e42008-01-10 02:12:44 -08007495 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
Matheos Workua5d6ab52008-04-24 21:09:20 -07007496 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
7497 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
David S. Millera3138df2007-10-09 01:54:01 -07007498 return 0;
7499 } else {
7500 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
7501 return 0;
7502 }
7503
7504 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7505 parent->index, id,
7506 (type == PHY_TYPE_PMA_PMD ?
7507 "PMA/PMD" :
7508 (type == PHY_TYPE_PCS ?
7509 "PCS" : "MII")),
7510 phy_port);
7511
7512 if (p->cur[type] >= NIU_MAX_PORTS) {
7513 printk(KERN_ERR PFX "Too many PHY ports.\n");
7514 return -EINVAL;
7515 }
7516 idx = p->cur[type];
7517 p->phy_id[type][idx] = id;
7518 p->phy_port[type][idx] = phy_port;
7519 p->cur[type] = idx + 1;
7520 return 0;
7521}
7522
7523static int __devinit port_has_10g(struct phy_probe_info *p, int port)
7524{
7525 int i;
7526
7527 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
7528 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
7529 return 1;
7530 }
7531 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
7532 if (p->phy_port[PHY_TYPE_PCS][i] == port)
7533 return 1;
7534 }
7535
7536 return 0;
7537}
7538
7539static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
7540{
7541 int port, cnt;
7542
7543 cnt = 0;
7544 *lowest = 32;
7545 for (port = 8; port < 32; port++) {
7546 if (port_has_10g(p, port)) {
7547 if (!cnt)
7548 *lowest = port;
7549 cnt++;
7550 }
7551 }
7552
7553 return cnt;
7554}
7555
7556static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
7557{
7558 *lowest = 32;
7559 if (p->cur[PHY_TYPE_MII])
7560 *lowest = p->phy_port[PHY_TYPE_MII][0];
7561
7562 return p->cur[PHY_TYPE_MII];
7563}
7564
7565static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
7566{
7567 int num_ports = parent->num_ports;
7568 int i;
7569
7570 for (i = 0; i < num_ports; i++) {
7571 parent->rxchan_per_port[i] = (16 / num_ports);
7572 parent->txchan_per_port[i] = (16 / num_ports);
7573
7574 pr_info(PFX "niu%d: Port %u [%u RX chans] "
7575 "[%u TX chans]\n",
7576 parent->index, i,
7577 parent->rxchan_per_port[i],
7578 parent->txchan_per_port[i]);
7579 }
7580}
7581
7582static void __devinit niu_divide_channels(struct niu_parent *parent,
7583 int num_10g, int num_1g)
7584{
7585 int num_ports = parent->num_ports;
7586 int rx_chans_per_10g, rx_chans_per_1g;
7587 int tx_chans_per_10g, tx_chans_per_1g;
7588 int i, tot_rx, tot_tx;
7589
7590 if (!num_10g || !num_1g) {
7591 rx_chans_per_10g = rx_chans_per_1g =
7592 (NIU_NUM_RXCHAN / num_ports);
7593 tx_chans_per_10g = tx_chans_per_1g =
7594 (NIU_NUM_TXCHAN / num_ports);
7595 } else {
7596 rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
7597 rx_chans_per_10g = (NIU_NUM_RXCHAN -
7598 (rx_chans_per_1g * num_1g)) /
7599 num_10g;
7600
7601 tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
7602 tx_chans_per_10g = (NIU_NUM_TXCHAN -
7603 (tx_chans_per_1g * num_1g)) /
7604 num_10g;
7605 }
7606
7607 tot_rx = tot_tx = 0;
7608 for (i = 0; i < num_ports; i++) {
7609 int type = phy_decode(parent->port_phy, i);
7610
7611 if (type == PORT_TYPE_10G) {
7612 parent->rxchan_per_port[i] = rx_chans_per_10g;
7613 parent->txchan_per_port[i] = tx_chans_per_10g;
7614 } else {
7615 parent->rxchan_per_port[i] = rx_chans_per_1g;
7616 parent->txchan_per_port[i] = tx_chans_per_1g;
7617 }
7618 pr_info(PFX "niu%d: Port %u [%u RX chans] "
7619 "[%u TX chans]\n",
7620 parent->index, i,
7621 parent->rxchan_per_port[i],
7622 parent->txchan_per_port[i]);
7623 tot_rx += parent->rxchan_per_port[i];
7624 tot_tx += parent->txchan_per_port[i];
7625 }
7626
7627 if (tot_rx > NIU_NUM_RXCHAN) {
7628 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
7629 "resetting to one per port.\n",
7630 parent->index, tot_rx);
7631 for (i = 0; i < num_ports; i++)
7632 parent->rxchan_per_port[i] = 1;
7633 }
7634 if (tot_tx > NIU_NUM_TXCHAN) {
7635 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
7636 "resetting to one per port.\n",
7637 parent->index, tot_tx);
7638 for (i = 0; i < num_ports; i++)
7639 parent->txchan_per_port[i] = 1;
7640 }
7641 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
7642 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
7643 "RX[%d] TX[%d]\n",
7644 parent->index, tot_rx, tot_tx);
7645 }
7646}
7647
7648static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
7649 int num_10g, int num_1g)
7650{
7651 int i, num_ports = parent->num_ports;
7652 int rdc_group, rdc_groups_per_port;
7653 int rdc_channel_base;
7654
7655 rdc_group = 0;
7656 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
7657
7658 rdc_channel_base = 0;
7659
7660 for (i = 0; i < num_ports; i++) {
7661 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
7662 int grp, num_channels = parent->rxchan_per_port[i];
7663 int this_channel_offset;
7664
7665 tp->first_table_num = rdc_group;
7666 tp->num_tables = rdc_groups_per_port;
7667 this_channel_offset = 0;
7668 for (grp = 0; grp < tp->num_tables; grp++) {
7669 struct rdc_table *rt = &tp->tables[grp];
7670 int slot;
7671
7672 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
7673 parent->index, i, tp->first_table_num + grp);
7674 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
7675 rt->rxdma_channel[slot] =
7676 rdc_channel_base + this_channel_offset;
7677
7678 printk("%d ", rt->rxdma_channel[slot]);
7679
7680 if (++this_channel_offset == num_channels)
7681 this_channel_offset = 0;
7682 }
7683 printk("]\n");
7684 }
7685
7686 parent->rdc_default[i] = rdc_channel_base;
7687
7688 rdc_channel_base += num_channels;
7689 rdc_group += rdc_groups_per_port;
7690 }
7691}
7692
7693static int __devinit fill_phy_probe_info(struct niu *np,
7694 struct niu_parent *parent,
7695 struct phy_probe_info *info)
7696{
7697 unsigned long flags;
7698 int port, err;
7699
7700 memset(info, 0, sizeof(*info));
7701
7702 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
7703 niu_lock_parent(np, flags);
7704 err = 0;
7705 for (port = 8; port < 32; port++) {
7706 int dev_id_1, dev_id_2;
7707
7708 dev_id_1 = mdio_read(np, port,
7709 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
7710 dev_id_2 = mdio_read(np, port,
7711 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
7712 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7713 PHY_TYPE_PMA_PMD);
7714 if (err)
7715 break;
7716 dev_id_1 = mdio_read(np, port,
7717 NIU_PCS_DEV_ADDR, MII_PHYSID1);
7718 dev_id_2 = mdio_read(np, port,
7719 NIU_PCS_DEV_ADDR, MII_PHYSID2);
7720 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7721 PHY_TYPE_PCS);
7722 if (err)
7723 break;
7724 dev_id_1 = mii_read(np, port, MII_PHYSID1);
7725 dev_id_2 = mii_read(np, port, MII_PHYSID2);
7726 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
7727 PHY_TYPE_MII);
7728 if (err)
7729 break;
7730 }
7731 niu_unlock_parent(np, flags);
7732
7733 return err;
7734}
7735
7736static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
7737{
7738 struct phy_probe_info *info = &parent->phy_probe_info;
7739 int lowest_10g, lowest_1g;
7740 int num_10g, num_1g;
7741 u32 val;
7742 int err;
7743
Matheos Workuf9af8572008-05-12 03:10:59 -07007744 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7745 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
Matheos Worku5fbd7e22008-02-28 21:25:43 -08007746 num_10g = 0;
7747 num_1g = 2;
7748 parent->plat_type = PLAT_TYPE_ATCA_CP3220;
7749 parent->num_ports = 4;
David S. Millera3138df2007-10-09 01:54:01 -07007750 val = (phy_encode(PORT_TYPE_1G, 0) |
7751 phy_encode(PORT_TYPE_1G, 1) |
7752 phy_encode(PORT_TYPE_1G, 2) |
7753 phy_encode(PORT_TYPE_1G, 3));
Matheos Workuf9af8572008-05-12 03:10:59 -07007754 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
Matheos Workua5d6ab52008-04-24 21:09:20 -07007755 num_10g = 2;
7756 num_1g = 0;
7757 parent->num_ports = 2;
7758 val = (phy_encode(PORT_TYPE_10G, 0) |
7759 phy_encode(PORT_TYPE_10G, 1));
Matheos Worku5fbd7e22008-02-28 21:25:43 -08007760 } else {
7761 err = fill_phy_probe_info(np, parent, info);
7762 if (err)
7763 return err;
David S. Millera3138df2007-10-09 01:54:01 -07007764
Matheos Worku5fbd7e22008-02-28 21:25:43 -08007765 num_10g = count_10g_ports(info, &lowest_10g);
7766 num_1g = count_1g_ports(info, &lowest_1g);
7767
7768 switch ((num_10g << 4) | num_1g) {
7769 case 0x24:
7770 if (lowest_1g == 10)
7771 parent->plat_type = PLAT_TYPE_VF_P0;
7772 else if (lowest_1g == 26)
7773 parent->plat_type = PLAT_TYPE_VF_P1;
7774 else
7775 goto unknown_vg_1g_port;
7776
7777 /* fallthru */
7778 case 0x22:
7779 val = (phy_encode(PORT_TYPE_10G, 0) |
7780 phy_encode(PORT_TYPE_10G, 1) |
7781 phy_encode(PORT_TYPE_1G, 2) |
7782 phy_encode(PORT_TYPE_1G, 3));
7783 break;
7784
7785 case 0x20:
7786 val = (phy_encode(PORT_TYPE_10G, 0) |
7787 phy_encode(PORT_TYPE_10G, 1));
7788 break;
7789
7790 case 0x10:
7791 val = phy_encode(PORT_TYPE_10G, np->port);
7792 break;
7793
7794 case 0x14:
7795 if (lowest_1g == 10)
7796 parent->plat_type = PLAT_TYPE_VF_P0;
7797 else if (lowest_1g == 26)
7798 parent->plat_type = PLAT_TYPE_VF_P1;
7799 else
7800 goto unknown_vg_1g_port;
7801
7802 /* fallthru */
7803 case 0x13:
7804 if ((lowest_10g & 0x7) == 0)
7805 val = (phy_encode(PORT_TYPE_10G, 0) |
7806 phy_encode(PORT_TYPE_1G, 1) |
7807 phy_encode(PORT_TYPE_1G, 2) |
7808 phy_encode(PORT_TYPE_1G, 3));
7809 else
7810 val = (phy_encode(PORT_TYPE_1G, 0) |
7811 phy_encode(PORT_TYPE_10G, 1) |
7812 phy_encode(PORT_TYPE_1G, 2) |
7813 phy_encode(PORT_TYPE_1G, 3));
7814 break;
7815
7816 case 0x04:
7817 if (lowest_1g == 10)
7818 parent->plat_type = PLAT_TYPE_VF_P0;
7819 else if (lowest_1g == 26)
7820 parent->plat_type = PLAT_TYPE_VF_P1;
7821 else
7822 goto unknown_vg_1g_port;
7823
7824 val = (phy_encode(PORT_TYPE_1G, 0) |
7825 phy_encode(PORT_TYPE_1G, 1) |
7826 phy_encode(PORT_TYPE_1G, 2) |
7827 phy_encode(PORT_TYPE_1G, 3));
7828 break;
7829
7830 default:
7831 printk(KERN_ERR PFX "Unsupported port config "
7832 "10G[%d] 1G[%d]\n",
7833 num_10g, num_1g);
7834 return -EINVAL;
7835 }
David S. Millera3138df2007-10-09 01:54:01 -07007836 }
7837
7838 parent->port_phy = val;
7839
7840 if (parent->plat_type == PLAT_TYPE_NIU)
7841 niu_n2_divide_channels(parent);
7842 else
7843 niu_divide_channels(parent, num_10g, num_1g);
7844
7845 niu_divide_rdc_groups(parent, num_10g, num_1g);
7846
7847 return 0;
7848
7849unknown_vg_1g_port:
7850 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
7851 lowest_1g);
7852 return -EINVAL;
7853}
7854
7855static int __devinit niu_probe_ports(struct niu *np)
7856{
7857 struct niu_parent *parent = np->parent;
7858 int err, i;
7859
7860 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
7861 parent->port_phy);
7862
7863 if (parent->port_phy == PORT_PHY_UNKNOWN) {
7864 err = walk_phys(np, parent);
7865 if (err)
7866 return err;
7867
7868 niu_set_ldg_timer_res(np, 2);
7869 for (i = 0; i <= LDN_MAX; i++)
7870 niu_ldn_irq_enable(np, i, 0);
7871 }
7872
7873 if (parent->port_phy == PORT_PHY_INVALID)
7874 return -EINVAL;
7875
7876 return 0;
7877}
7878
7879static int __devinit niu_classifier_swstate_init(struct niu *np)
7880{
7881 struct niu_classifier *cp = &np->clas;
7882
7883 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
7884 np->parent->tcam_num_entries);
7885
7886 cp->tcam_index = (u16) np->port;
7887 cp->h1_init = 0xffffffff;
7888 cp->h2_init = 0xffff;
7889
7890 return fflp_early_init(np);
7891}
7892
7893static void __devinit niu_link_config_init(struct niu *np)
7894{
7895 struct niu_link_config *lp = &np->link_config;
7896
7897 lp->advertising = (ADVERTISED_10baseT_Half |
7898 ADVERTISED_10baseT_Full |
7899 ADVERTISED_100baseT_Half |
7900 ADVERTISED_100baseT_Full |
7901 ADVERTISED_1000baseT_Half |
7902 ADVERTISED_1000baseT_Full |
7903 ADVERTISED_10000baseT_Full |
7904 ADVERTISED_Autoneg);
7905 lp->speed = lp->active_speed = SPEED_INVALID;
7906 lp->duplex = lp->active_duplex = DUPLEX_INVALID;
7907#if 0
7908 lp->loopback_mode = LOOPBACK_MAC;
7909 lp->active_speed = SPEED_10000;
7910 lp->active_duplex = DUPLEX_FULL;
7911#else
7912 lp->loopback_mode = LOOPBACK_DISABLED;
7913#endif
7914}
7915
7916static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
7917{
7918 switch (np->port) {
7919 case 0:
7920 np->mac_regs = np->regs + XMAC_PORT0_OFF;
7921 np->ipp_off = 0x00000;
7922 np->pcs_off = 0x04000;
7923 np->xpcs_off = 0x02000;
7924 break;
7925
7926 case 1:
7927 np->mac_regs = np->regs + XMAC_PORT1_OFF;
7928 np->ipp_off = 0x08000;
7929 np->pcs_off = 0x0a000;
7930 np->xpcs_off = 0x08000;
7931 break;
7932
7933 case 2:
7934 np->mac_regs = np->regs + BMAC_PORT2_OFF;
7935 np->ipp_off = 0x04000;
7936 np->pcs_off = 0x0e000;
7937 np->xpcs_off = ~0UL;
7938 break;
7939
7940 case 3:
7941 np->mac_regs = np->regs + BMAC_PORT3_OFF;
7942 np->ipp_off = 0x0c000;
7943 np->pcs_off = 0x12000;
7944 np->xpcs_off = ~0UL;
7945 break;
7946
7947 default:
7948 dev_err(np->device, PFX "Port %u is invalid, cannot "
7949 "compute MAC block offset.\n", np->port);
7950 return -EINVAL;
7951 }
7952
7953 return 0;
7954}
7955
7956static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
7957{
7958 struct msix_entry msi_vec[NIU_NUM_LDG];
7959 struct niu_parent *parent = np->parent;
7960 struct pci_dev *pdev = np->pdev;
7961 int i, num_irqs, err;
7962 u8 first_ldg;
7963
7964 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
7965 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
7966 ldg_num_map[i] = first_ldg + i;
7967
7968 num_irqs = (parent->rxchan_per_port[np->port] +
7969 parent->txchan_per_port[np->port] +
7970 (np->port == 0 ? 3 : 1));
7971 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
7972
7973retry:
7974 for (i = 0; i < num_irqs; i++) {
7975 msi_vec[i].vector = 0;
7976 msi_vec[i].entry = i;
7977 }
7978
7979 err = pci_enable_msix(pdev, msi_vec, num_irqs);
7980 if (err < 0) {
7981 np->flags &= ~NIU_FLAGS_MSIX;
7982 return;
7983 }
7984 if (err > 0) {
7985 num_irqs = err;
7986 goto retry;
7987 }
7988
7989 np->flags |= NIU_FLAGS_MSIX;
7990 for (i = 0; i < num_irqs; i++)
7991 np->ldg[i].irq = msi_vec[i].vector;
7992 np->num_ldg = num_irqs;
7993}
7994
7995static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
7996{
7997#ifdef CONFIG_SPARC64
7998 struct of_device *op = np->op;
7999 const u32 *int_prop;
8000 int i;
8001
8002 int_prop = of_get_property(op->node, "interrupts", NULL);
8003 if (!int_prop)
8004 return -ENODEV;
8005
8006 for (i = 0; i < op->num_irqs; i++) {
8007 ldg_num_map[i] = int_prop[i];
8008 np->ldg[i].irq = op->irqs[i];
8009 }
8010
8011 np->num_ldg = op->num_irqs;
8012
8013 return 0;
8014#else
8015 return -EINVAL;
8016#endif
8017}
8018
8019static int __devinit niu_ldg_init(struct niu *np)
8020{
8021 struct niu_parent *parent = np->parent;
8022 u8 ldg_num_map[NIU_NUM_LDG];
8023 int first_chan, num_chan;
8024 int i, err, ldg_rotor;
8025 u8 port;
8026
8027 np->num_ldg = 1;
8028 np->ldg[0].irq = np->dev->irq;
8029 if (parent->plat_type == PLAT_TYPE_NIU) {
8030 err = niu_n2_irq_init(np, ldg_num_map);
8031 if (err)
8032 return err;
8033 } else
8034 niu_try_msix(np, ldg_num_map);
8035
8036 port = np->port;
8037 for (i = 0; i < np->num_ldg; i++) {
8038 struct niu_ldg *lp = &np->ldg[i];
8039
8040 netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
8041
8042 lp->np = np;
8043 lp->ldg_num = ldg_num_map[i];
8044 lp->timer = 2; /* XXX */
8045
8046 /* On N2 NIU the firmware has setup the SID mappings so they go
8047 * to the correct values that will route the LDG to the proper
8048 * interrupt in the NCU interrupt table.
8049 */
8050 if (np->parent->plat_type != PLAT_TYPE_NIU) {
8051 err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
8052 if (err)
8053 return err;
8054 }
8055 }
8056
8057 /* We adopt the LDG assignment ordering used by the N2 NIU
8058 * 'interrupt' properties because that simplifies a lot of
8059 * things. This ordering is:
8060 *
8061 * MAC
8062 * MIF (if port zero)
8063 * SYSERR (if port zero)
8064 * RX channels
8065 * TX channels
8066 */
8067
8068 ldg_rotor = 0;
8069
8070 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
8071 LDN_MAC(port));
8072 if (err)
8073 return err;
8074
8075 ldg_rotor++;
8076 if (ldg_rotor == np->num_ldg)
8077 ldg_rotor = 0;
8078
8079 if (port == 0) {
8080 err = niu_ldg_assign_ldn(np, parent,
8081 ldg_num_map[ldg_rotor],
8082 LDN_MIF);
8083 if (err)
8084 return err;
8085
8086 ldg_rotor++;
8087 if (ldg_rotor == np->num_ldg)
8088 ldg_rotor = 0;
8089
8090 err = niu_ldg_assign_ldn(np, parent,
8091 ldg_num_map[ldg_rotor],
8092 LDN_DEVICE_ERROR);
8093 if (err)
8094 return err;
8095
8096 ldg_rotor++;
8097 if (ldg_rotor == np->num_ldg)
8098 ldg_rotor = 0;
8099
8100 }
8101
8102 first_chan = 0;
8103 for (i = 0; i < port; i++)
8104 first_chan += parent->rxchan_per_port[port];
8105 num_chan = parent->rxchan_per_port[port];
8106
8107 for (i = first_chan; i < (first_chan + num_chan); i++) {
8108 err = niu_ldg_assign_ldn(np, parent,
8109 ldg_num_map[ldg_rotor],
8110 LDN_RXDMA(i));
8111 if (err)
8112 return err;
8113 ldg_rotor++;
8114 if (ldg_rotor == np->num_ldg)
8115 ldg_rotor = 0;
8116 }
8117
8118 first_chan = 0;
8119 for (i = 0; i < port; i++)
8120 first_chan += parent->txchan_per_port[port];
8121 num_chan = parent->txchan_per_port[port];
8122 for (i = first_chan; i < (first_chan + num_chan); i++) {
8123 err = niu_ldg_assign_ldn(np, parent,
8124 ldg_num_map[ldg_rotor],
8125 LDN_TXDMA(i));
8126 if (err)
8127 return err;
8128 ldg_rotor++;
8129 if (ldg_rotor == np->num_ldg)
8130 ldg_rotor = 0;
8131 }
8132
8133 return 0;
8134}
8135
8136static void __devexit niu_ldg_free(struct niu *np)
8137{
8138 if (np->flags & NIU_FLAGS_MSIX)
8139 pci_disable_msix(np->pdev);
8140}
8141
8142static int __devinit niu_get_of_props(struct niu *np)
8143{
8144#ifdef CONFIG_SPARC64
8145 struct net_device *dev = np->dev;
8146 struct device_node *dp;
8147 const char *phy_type;
8148 const u8 *mac_addr;
Matheos Workuf9af8572008-05-12 03:10:59 -07008149 const char *model;
David S. Millera3138df2007-10-09 01:54:01 -07008150 int prop_len;
8151
8152 if (np->parent->plat_type == PLAT_TYPE_NIU)
8153 dp = np->op->node;
8154 else
8155 dp = pci_device_to_OF_node(np->pdev);
8156
8157 phy_type = of_get_property(dp, "phy-type", &prop_len);
8158 if (!phy_type) {
8159 dev_err(np->device, PFX "%s: OF node lacks "
8160 "phy-type property\n",
8161 dp->full_name);
8162 return -EINVAL;
8163 }
8164
8165 if (!strcmp(phy_type, "none"))
8166 return -ENODEV;
8167
8168 strcpy(np->vpd.phy_type, phy_type);
8169
8170 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8171 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
8172 dp->full_name, np->vpd.phy_type);
8173 return -EINVAL;
8174 }
8175
8176 mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
8177 if (!mac_addr) {
8178 dev_err(np->device, PFX "%s: OF node lacks "
8179 "local-mac-address property\n",
8180 dp->full_name);
8181 return -EINVAL;
8182 }
8183 if (prop_len != dev->addr_len) {
8184 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
8185 "is wrong.\n",
8186 dp->full_name, prop_len);
8187 }
8188 memcpy(dev->perm_addr, mac_addr, dev->addr_len);
8189 if (!is_valid_ether_addr(&dev->perm_addr[0])) {
8190 int i;
8191
8192 dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
8193 dp->full_name);
8194 dev_err(np->device, PFX "%s: [ \n",
8195 dp->full_name);
8196 for (i = 0; i < 6; i++)
8197 printk("%02x ", dev->perm_addr[i]);
8198 printk("]\n");
8199 return -EINVAL;
8200 }
8201
8202 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8203
Matheos Workuf9af8572008-05-12 03:10:59 -07008204 model = of_get_property(dp, "model", &prop_len);
8205
8206 if (model)
8207 strcpy(np->vpd.model, model);
8208
David S. Millera3138df2007-10-09 01:54:01 -07008209 return 0;
8210#else
8211 return -EINVAL;
8212#endif
8213}
8214
8215static int __devinit niu_get_invariants(struct niu *np)
8216{
8217 int err, have_props;
8218 u32 offset;
8219
8220 err = niu_get_of_props(np);
8221 if (err == -ENODEV)
8222 return err;
8223
8224 have_props = !err;
8225
David S. Millera3138df2007-10-09 01:54:01 -07008226 err = niu_init_mac_ipp_pcs_base(np);
8227 if (err)
8228 return err;
8229
Matheos Worku7f7c4072008-04-24 21:02:37 -07008230 if (have_props) {
8231 err = niu_get_and_validate_port(np);
8232 if (err)
8233 return err;
8234
8235 } else {
David S. Millera3138df2007-10-09 01:54:01 -07008236 if (np->parent->plat_type == PLAT_TYPE_NIU)
8237 return -EINVAL;
8238
8239 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
8240 offset = niu_pci_vpd_offset(np);
8241 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
8242 offset);
8243 if (offset)
8244 niu_pci_vpd_fetch(np, offset);
8245 nw64(ESPC_PIO_EN, 0);
8246
Matheos Worku7f7c4072008-04-24 21:02:37 -07008247 if (np->flags & NIU_FLAGS_VPD_VALID) {
David S. Millera3138df2007-10-09 01:54:01 -07008248 niu_pci_vpd_validate(np);
Matheos Worku7f7c4072008-04-24 21:02:37 -07008249 err = niu_get_and_validate_port(np);
8250 if (err)
8251 return err;
8252 }
David S. Millera3138df2007-10-09 01:54:01 -07008253
8254 if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
Matheos Worku7f7c4072008-04-24 21:02:37 -07008255 err = niu_get_and_validate_port(np);
8256 if (err)
8257 return err;
David S. Millera3138df2007-10-09 01:54:01 -07008258 err = niu_pci_probe_sprom(np);
8259 if (err)
8260 return err;
8261 }
8262 }
8263
8264 err = niu_probe_ports(np);
8265 if (err)
8266 return err;
8267
8268 niu_ldg_init(np);
8269
8270 niu_classifier_swstate_init(np);
8271 niu_link_config_init(np);
8272
8273 err = niu_determine_phy_disposition(np);
8274 if (!err)
8275 err = niu_init_link(np);
8276
8277 return err;
8278}
8279
8280static LIST_HEAD(niu_parent_list);
8281static DEFINE_MUTEX(niu_parent_lock);
8282static int niu_parent_index;
8283
8284static ssize_t show_port_phy(struct device *dev,
8285 struct device_attribute *attr, char *buf)
8286{
8287 struct platform_device *plat_dev = to_platform_device(dev);
8288 struct niu_parent *p = plat_dev->dev.platform_data;
8289 u32 port_phy = p->port_phy;
8290 char *orig_buf = buf;
8291 int i;
8292
8293 if (port_phy == PORT_PHY_UNKNOWN ||
8294 port_phy == PORT_PHY_INVALID)
8295 return 0;
8296
8297 for (i = 0; i < p->num_ports; i++) {
8298 const char *type_str;
8299 int type;
8300
8301 type = phy_decode(port_phy, i);
8302 if (type == PORT_TYPE_10G)
8303 type_str = "10G";
8304 else
8305 type_str = "1G";
8306 buf += sprintf(buf,
8307 (i == 0) ? "%s" : " %s",
8308 type_str);
8309 }
8310 buf += sprintf(buf, "\n");
8311 return buf - orig_buf;
8312}
8313
8314static ssize_t show_plat_type(struct device *dev,
8315 struct device_attribute *attr, char *buf)
8316{
8317 struct platform_device *plat_dev = to_platform_device(dev);
8318 struct niu_parent *p = plat_dev->dev.platform_data;
8319 const char *type_str;
8320
8321 switch (p->plat_type) {
8322 case PLAT_TYPE_ATLAS:
8323 type_str = "atlas";
8324 break;
8325 case PLAT_TYPE_NIU:
8326 type_str = "niu";
8327 break;
8328 case PLAT_TYPE_VF_P0:
8329 type_str = "vf_p0";
8330 break;
8331 case PLAT_TYPE_VF_P1:
8332 type_str = "vf_p1";
8333 break;
8334 default:
8335 type_str = "unknown";
8336 break;
8337 }
8338
8339 return sprintf(buf, "%s\n", type_str);
8340}
8341
8342static ssize_t __show_chan_per_port(struct device *dev,
8343 struct device_attribute *attr, char *buf,
8344 int rx)
8345{
8346 struct platform_device *plat_dev = to_platform_device(dev);
8347 struct niu_parent *p = plat_dev->dev.platform_data;
8348 char *orig_buf = buf;
8349 u8 *arr;
8350 int i;
8351
8352 arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
8353
8354 for (i = 0; i < p->num_ports; i++) {
8355 buf += sprintf(buf,
8356 (i == 0) ? "%d" : " %d",
8357 arr[i]);
8358 }
8359 buf += sprintf(buf, "\n");
8360
8361 return buf - orig_buf;
8362}
8363
8364static ssize_t show_rxchan_per_port(struct device *dev,
8365 struct device_attribute *attr, char *buf)
8366{
8367 return __show_chan_per_port(dev, attr, buf, 1);
8368}
8369
8370static ssize_t show_txchan_per_port(struct device *dev,
8371 struct device_attribute *attr, char *buf)
8372{
8373 return __show_chan_per_port(dev, attr, buf, 1);
8374}
8375
8376static ssize_t show_num_ports(struct device *dev,
8377 struct device_attribute *attr, char *buf)
8378{
8379 struct platform_device *plat_dev = to_platform_device(dev);
8380 struct niu_parent *p = plat_dev->dev.platform_data;
8381
8382 return sprintf(buf, "%d\n", p->num_ports);
8383}
8384
8385static struct device_attribute niu_parent_attributes[] = {
8386 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
8387 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
8388 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
8389 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
8390 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
8391 {}
8392};
8393
8394static struct niu_parent * __devinit niu_new_parent(struct niu *np,
8395 union niu_parent_id *id,
8396 u8 ptype)
8397{
8398 struct platform_device *plat_dev;
8399 struct niu_parent *p;
8400 int i;
8401
8402 niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
8403
8404 plat_dev = platform_device_register_simple("niu", niu_parent_index,
8405 NULL, 0);
8406 if (!plat_dev)
8407 return NULL;
8408
8409 for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
8410 int err = device_create_file(&plat_dev->dev,
8411 &niu_parent_attributes[i]);
8412 if (err)
8413 goto fail_unregister;
8414 }
8415
8416 p = kzalloc(sizeof(*p), GFP_KERNEL);
8417 if (!p)
8418 goto fail_unregister;
8419
8420 p->index = niu_parent_index++;
8421
8422 plat_dev->dev.platform_data = p;
8423 p->plat_dev = plat_dev;
8424
8425 memcpy(&p->id, id, sizeof(*id));
8426 p->plat_type = ptype;
8427 INIT_LIST_HEAD(&p->list);
8428 atomic_set(&p->refcnt, 0);
8429 list_add(&p->list, &niu_parent_list);
8430 spin_lock_init(&p->lock);
8431
8432 p->rxdma_clock_divider = 7500;
8433
8434 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
8435 if (p->plat_type == PLAT_TYPE_NIU)
8436 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
8437
8438 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
8439 int index = i - CLASS_CODE_USER_PROG1;
8440
8441 p->tcam_key[index] = TCAM_KEY_TSEL;
8442 p->flow_key[index] = (FLOW_KEY_IPSA |
8443 FLOW_KEY_IPDA |
8444 FLOW_KEY_PROTO |
8445 (FLOW_KEY_L4_BYTE12 <<
8446 FLOW_KEY_L4_0_SHIFT) |
8447 (FLOW_KEY_L4_BYTE12 <<
8448 FLOW_KEY_L4_1_SHIFT));
8449 }
8450
8451 for (i = 0; i < LDN_MAX + 1; i++)
8452 p->ldg_map[i] = LDG_INVALID;
8453
8454 return p;
8455
8456fail_unregister:
8457 platform_device_unregister(plat_dev);
8458 return NULL;
8459}
8460
8461static struct niu_parent * __devinit niu_get_parent(struct niu *np,
8462 union niu_parent_id *id,
8463 u8 ptype)
8464{
8465 struct niu_parent *p, *tmp;
8466 int port = np->port;
8467
8468 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
8469 ptype, port);
8470
8471 mutex_lock(&niu_parent_lock);
8472 p = NULL;
8473 list_for_each_entry(tmp, &niu_parent_list, list) {
8474 if (!memcmp(id, &tmp->id, sizeof(*id))) {
8475 p = tmp;
8476 break;
8477 }
8478 }
8479 if (!p)
8480 p = niu_new_parent(np, id, ptype);
8481
8482 if (p) {
8483 char port_name[6];
8484 int err;
8485
8486 sprintf(port_name, "port%d", port);
8487 err = sysfs_create_link(&p->plat_dev->dev.kobj,
8488 &np->device->kobj,
8489 port_name);
8490 if (!err) {
8491 p->ports[port] = np;
8492 atomic_inc(&p->refcnt);
8493 }
8494 }
8495 mutex_unlock(&niu_parent_lock);
8496
8497 return p;
8498}
8499
8500static void niu_put_parent(struct niu *np)
8501{
8502 struct niu_parent *p = np->parent;
8503 u8 port = np->port;
8504 char port_name[6];
8505
8506 BUG_ON(!p || p->ports[port] != np);
8507
8508 niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
8509
8510 sprintf(port_name, "port%d", port);
8511
8512 mutex_lock(&niu_parent_lock);
8513
8514 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
8515
8516 p->ports[port] = NULL;
8517 np->parent = NULL;
8518
8519 if (atomic_dec_and_test(&p->refcnt)) {
8520 list_del(&p->list);
8521 platform_device_unregister(p->plat_dev);
8522 }
8523
8524 mutex_unlock(&niu_parent_lock);
8525}
8526
8527static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
8528 u64 *handle, gfp_t flag)
8529{
8530 dma_addr_t dh;
8531 void *ret;
8532
8533 ret = dma_alloc_coherent(dev, size, &dh, flag);
8534 if (ret)
8535 *handle = dh;
8536 return ret;
8537}
8538
8539static void niu_pci_free_coherent(struct device *dev, size_t size,
8540 void *cpu_addr, u64 handle)
8541{
8542 dma_free_coherent(dev, size, cpu_addr, handle);
8543}
8544
8545static u64 niu_pci_map_page(struct device *dev, struct page *page,
8546 unsigned long offset, size_t size,
8547 enum dma_data_direction direction)
8548{
8549 return dma_map_page(dev, page, offset, size, direction);
8550}
8551
8552static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
8553 size_t size, enum dma_data_direction direction)
8554{
8555 return dma_unmap_page(dev, dma_address, size, direction);
8556}
8557
8558static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
8559 size_t size,
8560 enum dma_data_direction direction)
8561{
8562 return dma_map_single(dev, cpu_addr, size, direction);
8563}
8564
8565static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
8566 size_t size,
8567 enum dma_data_direction direction)
8568{
8569 dma_unmap_single(dev, dma_address, size, direction);
8570}
8571
8572static const struct niu_ops niu_pci_ops = {
8573 .alloc_coherent = niu_pci_alloc_coherent,
8574 .free_coherent = niu_pci_free_coherent,
8575 .map_page = niu_pci_map_page,
8576 .unmap_page = niu_pci_unmap_page,
8577 .map_single = niu_pci_map_single,
8578 .unmap_single = niu_pci_unmap_single,
8579};
8580
8581static void __devinit niu_driver_version(void)
8582{
8583 static int niu_version_printed;
8584
8585 if (niu_version_printed++ == 0)
8586 pr_info("%s", version);
8587}
8588
8589static struct net_device * __devinit niu_alloc_and_init(
8590 struct device *gen_dev, struct pci_dev *pdev,
8591 struct of_device *op, const struct niu_ops *ops,
8592 u8 port)
8593{
David S. Millerb4c21632008-07-15 03:48:19 -07008594 struct net_device *dev;
David S. Millera3138df2007-10-09 01:54:01 -07008595 struct niu *np;
8596
David S. Millerb4c21632008-07-15 03:48:19 -07008597 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
David S. Millera3138df2007-10-09 01:54:01 -07008598 if (!dev) {
8599 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
8600 return NULL;
8601 }
8602
8603 SET_NETDEV_DEV(dev, gen_dev);
8604
8605 np = netdev_priv(dev);
8606 np->dev = dev;
8607 np->pdev = pdev;
8608 np->op = op;
8609 np->device = gen_dev;
8610 np->ops = ops;
8611
8612 np->msg_enable = niu_debug;
8613
8614 spin_lock_init(&np->lock);
8615 INIT_WORK(&np->reset_task, niu_reset_task);
8616
8617 np->port = port;
8618
8619 return dev;
8620}
8621
8622static void __devinit niu_assign_netdev_ops(struct net_device *dev)
8623{
8624 dev->open = niu_open;
8625 dev->stop = niu_close;
8626 dev->get_stats = niu_get_stats;
8627 dev->set_multicast_list = niu_set_rx_mode;
8628 dev->set_mac_address = niu_set_mac_addr;
8629 dev->do_ioctl = niu_ioctl;
8630 dev->tx_timeout = niu_tx_timeout;
8631 dev->hard_start_xmit = niu_start_xmit;
8632 dev->ethtool_ops = &niu_ethtool_ops;
8633 dev->watchdog_timeo = NIU_TX_TIMEOUT;
8634 dev->change_mtu = niu_change_mtu;
8635}
8636
8637static void __devinit niu_device_announce(struct niu *np)
8638{
8639 struct net_device *dev = np->dev;
Joe Perches2caf62f2007-12-20 04:07:35 -08008640 DECLARE_MAC_BUF(mac);
David S. Millera3138df2007-10-09 01:54:01 -07008641
Joe Perches2caf62f2007-12-20 04:07:35 -08008642 pr_info("%s: NIU Ethernet %s\n",
8643 dev->name, print_mac(mac, dev->dev_addr));
David S. Millera3138df2007-10-09 01:54:01 -07008644
Matheos Worku5fbd7e22008-02-28 21:25:43 -08008645 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
8646 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8647 dev->name,
8648 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8649 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8650 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
8651 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8652 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8653 np->vpd.phy_type);
8654 } else {
8655 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8656 dev->name,
8657 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8658 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8659 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"),
8660 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8661 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8662 np->vpd.phy_type);
8663 }
David S. Millera3138df2007-10-09 01:54:01 -07008664}
8665
8666static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8667 const struct pci_device_id *ent)
8668{
David S. Millera3138df2007-10-09 01:54:01 -07008669 union niu_parent_id parent_id;
8670 struct net_device *dev;
8671 struct niu *np;
8672 int err, pos;
8673 u64 dma_mask;
8674 u16 val16;
8675
8676 niu_driver_version();
8677
8678 err = pci_enable_device(pdev);
8679 if (err) {
8680 dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
8681 "aborting.\n");
8682 return err;
8683 }
8684
8685 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
8686 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8687 dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
8688 "base addresses, aborting.\n");
8689 err = -ENODEV;
8690 goto err_out_disable_pdev;
8691 }
8692
8693 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8694 if (err) {
8695 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
8696 "aborting.\n");
8697 goto err_out_disable_pdev;
8698 }
8699
8700 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8701 if (pos <= 0) {
8702 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
8703 "aborting.\n");
8704 goto err_out_free_res;
8705 }
8706
8707 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
8708 &niu_pci_ops, PCI_FUNC(pdev->devfn));
8709 if (!dev) {
8710 err = -ENOMEM;
8711 goto err_out_free_res;
8712 }
8713 np = netdev_priv(dev);
8714
8715 memset(&parent_id, 0, sizeof(parent_id));
8716 parent_id.pci.domain = pci_domain_nr(pdev->bus);
8717 parent_id.pci.bus = pdev->bus->number;
8718 parent_id.pci.device = PCI_SLOT(pdev->devfn);
8719
8720 np->parent = niu_get_parent(np, &parent_id,
8721 PLAT_TYPE_ATLAS);
8722 if (!np->parent) {
8723 err = -ENOMEM;
8724 goto err_out_free_dev;
8725 }
8726
8727 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
8728 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
8729 val16 |= (PCI_EXP_DEVCTL_CERE |
8730 PCI_EXP_DEVCTL_NFERE |
8731 PCI_EXP_DEVCTL_FERE |
8732 PCI_EXP_DEVCTL_URRE |
8733 PCI_EXP_DEVCTL_RELAX_EN);
8734 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
8735
8736 dma_mask = DMA_44BIT_MASK;
8737 err = pci_set_dma_mask(pdev, dma_mask);
8738 if (!err) {
8739 dev->features |= NETIF_F_HIGHDMA;
8740 err = pci_set_consistent_dma_mask(pdev, dma_mask);
8741 if (err) {
8742 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
8743 "DMA for consistent allocations, "
8744 "aborting.\n");
8745 goto err_out_release_parent;
8746 }
8747 }
8748 if (err || dma_mask == DMA_32BIT_MASK) {
8749 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
8750 if (err) {
8751 dev_err(&pdev->dev, PFX "No usable DMA configuration, "
8752 "aborting.\n");
8753 goto err_out_release_parent;
8754 }
8755 }
8756
8757 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
8758
David S. Miller19ecb6ba2008-11-03 17:05:16 -08008759 np->regs = pci_ioremap_bar(pdev, 0);
David S. Millera3138df2007-10-09 01:54:01 -07008760 if (!np->regs) {
8761 dev_err(&pdev->dev, PFX "Cannot map device registers, "
8762 "aborting.\n");
8763 err = -ENOMEM;
8764 goto err_out_release_parent;
8765 }
8766
8767 pci_set_master(pdev);
8768 pci_save_state(pdev);
8769
8770 dev->irq = pdev->irq;
8771
8772 niu_assign_netdev_ops(dev);
8773
8774 err = niu_get_invariants(np);
8775 if (err) {
8776 if (err != -ENODEV)
8777 dev_err(&pdev->dev, PFX "Problem fetching invariants "
8778 "of chip, aborting.\n");
8779 goto err_out_iounmap;
8780 }
8781
8782 err = register_netdev(dev);
8783 if (err) {
8784 dev_err(&pdev->dev, PFX "Cannot register net device, "
8785 "aborting.\n");
8786 goto err_out_iounmap;
8787 }
8788
8789 pci_set_drvdata(pdev, dev);
8790
8791 niu_device_announce(np);
8792
8793 return 0;
8794
8795err_out_iounmap:
8796 if (np->regs) {
8797 iounmap(np->regs);
8798 np->regs = NULL;
8799 }
8800
8801err_out_release_parent:
8802 niu_put_parent(np);
8803
8804err_out_free_dev:
8805 free_netdev(dev);
8806
8807err_out_free_res:
8808 pci_release_regions(pdev);
8809
8810err_out_disable_pdev:
8811 pci_disable_device(pdev);
8812 pci_set_drvdata(pdev, NULL);
8813
8814 return err;
8815}
8816
8817static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
8818{
8819 struct net_device *dev = pci_get_drvdata(pdev);
8820
8821 if (dev) {
8822 struct niu *np = netdev_priv(dev);
8823
8824 unregister_netdev(dev);
8825 if (np->regs) {
8826 iounmap(np->regs);
8827 np->regs = NULL;
8828 }
8829
8830 niu_ldg_free(np);
8831
8832 niu_put_parent(np);
8833
8834 free_netdev(dev);
8835 pci_release_regions(pdev);
8836 pci_disable_device(pdev);
8837 pci_set_drvdata(pdev, NULL);
8838 }
8839}
8840
8841static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
8842{
8843 struct net_device *dev = pci_get_drvdata(pdev);
8844 struct niu *np = netdev_priv(dev);
8845 unsigned long flags;
8846
8847 if (!netif_running(dev))
8848 return 0;
8849
8850 flush_scheduled_work();
8851 niu_netif_stop(np);
8852
8853 del_timer_sync(&np->timer);
8854
8855 spin_lock_irqsave(&np->lock, flags);
8856 niu_enable_interrupts(np, 0);
8857 spin_unlock_irqrestore(&np->lock, flags);
8858
8859 netif_device_detach(dev);
8860
8861 spin_lock_irqsave(&np->lock, flags);
8862 niu_stop_hw(np);
8863 spin_unlock_irqrestore(&np->lock, flags);
8864
8865 pci_save_state(pdev);
8866
8867 return 0;
8868}
8869
8870static int niu_resume(struct pci_dev *pdev)
8871{
8872 struct net_device *dev = pci_get_drvdata(pdev);
8873 struct niu *np = netdev_priv(dev);
8874 unsigned long flags;
8875 int err;
8876
8877 if (!netif_running(dev))
8878 return 0;
8879
8880 pci_restore_state(pdev);
8881
8882 netif_device_attach(dev);
8883
8884 spin_lock_irqsave(&np->lock, flags);
8885
8886 err = niu_init_hw(np);
8887 if (!err) {
8888 np->timer.expires = jiffies + HZ;
8889 add_timer(&np->timer);
8890 niu_netif_start(np);
8891 }
8892
8893 spin_unlock_irqrestore(&np->lock, flags);
8894
8895 return err;
8896}
8897
8898static struct pci_driver niu_pci_driver = {
8899 .name = DRV_MODULE_NAME,
8900 .id_table = niu_pci_tbl,
8901 .probe = niu_pci_init_one,
8902 .remove = __devexit_p(niu_pci_remove_one),
8903 .suspend = niu_suspend,
8904 .resume = niu_resume,
8905};
8906
8907#ifdef CONFIG_SPARC64
8908static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
8909 u64 *dma_addr, gfp_t flag)
8910{
8911 unsigned long order = get_order(size);
8912 unsigned long page = __get_free_pages(flag, order);
8913
8914 if (page == 0UL)
8915 return NULL;
8916 memset((char *)page, 0, PAGE_SIZE << order);
8917 *dma_addr = __pa(page);
8918
8919 return (void *) page;
8920}
8921
8922static void niu_phys_free_coherent(struct device *dev, size_t size,
8923 void *cpu_addr, u64 handle)
8924{
8925 unsigned long order = get_order(size);
8926
8927 free_pages((unsigned long) cpu_addr, order);
8928}
8929
8930static u64 niu_phys_map_page(struct device *dev, struct page *page,
8931 unsigned long offset, size_t size,
8932 enum dma_data_direction direction)
8933{
8934 return page_to_phys(page) + offset;
8935}
8936
8937static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
8938 size_t size, enum dma_data_direction direction)
8939{
8940 /* Nothing to do. */
8941}
8942
8943static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
8944 size_t size,
8945 enum dma_data_direction direction)
8946{
8947 return __pa(cpu_addr);
8948}
8949
8950static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
8951 size_t size,
8952 enum dma_data_direction direction)
8953{
8954 /* Nothing to do. */
8955}
8956
8957static const struct niu_ops niu_phys_ops = {
8958 .alloc_coherent = niu_phys_alloc_coherent,
8959 .free_coherent = niu_phys_free_coherent,
8960 .map_page = niu_phys_map_page,
8961 .unmap_page = niu_phys_unmap_page,
8962 .map_single = niu_phys_map_single,
8963 .unmap_single = niu_phys_unmap_single,
8964};
8965
8966static unsigned long res_size(struct resource *r)
8967{
8968 return r->end - r->start + 1UL;
8969}
8970
8971static int __devinit niu_of_probe(struct of_device *op,
8972 const struct of_device_id *match)
8973{
8974 union niu_parent_id parent_id;
8975 struct net_device *dev;
8976 struct niu *np;
8977 const u32 *reg;
8978 int err;
8979
8980 niu_driver_version();
8981
8982 reg = of_get_property(op->node, "reg", NULL);
8983 if (!reg) {
8984 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
8985 op->node->full_name);
8986 return -ENODEV;
8987 }
8988
8989 dev = niu_alloc_and_init(&op->dev, NULL, op,
8990 &niu_phys_ops, reg[0] & 0x1);
8991 if (!dev) {
8992 err = -ENOMEM;
8993 goto err_out;
8994 }
8995 np = netdev_priv(dev);
8996
8997 memset(&parent_id, 0, sizeof(parent_id));
8998 parent_id.of = of_get_parent(op->node);
8999
9000 np->parent = niu_get_parent(np, &parent_id,
9001 PLAT_TYPE_NIU);
9002 if (!np->parent) {
9003 err = -ENOMEM;
9004 goto err_out_free_dev;
9005 }
9006
9007 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
9008
9009 np->regs = of_ioremap(&op->resource[1], 0,
9010 res_size(&op->resource[1]),
9011 "niu regs");
9012 if (!np->regs) {
9013 dev_err(&op->dev, PFX "Cannot map device registers, "
9014 "aborting.\n");
9015 err = -ENOMEM;
9016 goto err_out_release_parent;
9017 }
9018
9019 np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
9020 res_size(&op->resource[2]),
9021 "niu vregs-1");
9022 if (!np->vir_regs_1) {
9023 dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
9024 "aborting.\n");
9025 err = -ENOMEM;
9026 goto err_out_iounmap;
9027 }
9028
9029 np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
9030 res_size(&op->resource[3]),
9031 "niu vregs-2");
9032 if (!np->vir_regs_2) {
9033 dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
9034 "aborting.\n");
9035 err = -ENOMEM;
9036 goto err_out_iounmap;
9037 }
9038
9039 niu_assign_netdev_ops(dev);
9040
9041 err = niu_get_invariants(np);
9042 if (err) {
9043 if (err != -ENODEV)
9044 dev_err(&op->dev, PFX "Problem fetching invariants "
9045 "of chip, aborting.\n");
9046 goto err_out_iounmap;
9047 }
9048
9049 err = register_netdev(dev);
9050 if (err) {
9051 dev_err(&op->dev, PFX "Cannot register net device, "
9052 "aborting.\n");
9053 goto err_out_iounmap;
9054 }
9055
9056 dev_set_drvdata(&op->dev, dev);
9057
9058 niu_device_announce(np);
9059
9060 return 0;
9061
9062err_out_iounmap:
9063 if (np->vir_regs_1) {
9064 of_iounmap(&op->resource[2], np->vir_regs_1,
9065 res_size(&op->resource[2]));
9066 np->vir_regs_1 = NULL;
9067 }
9068
9069 if (np->vir_regs_2) {
9070 of_iounmap(&op->resource[3], np->vir_regs_2,
9071 res_size(&op->resource[3]));
9072 np->vir_regs_2 = NULL;
9073 }
9074
9075 if (np->regs) {
9076 of_iounmap(&op->resource[1], np->regs,
9077 res_size(&op->resource[1]));
9078 np->regs = NULL;
9079 }
9080
9081err_out_release_parent:
9082 niu_put_parent(np);
9083
9084err_out_free_dev:
9085 free_netdev(dev);
9086
9087err_out:
9088 return err;
9089}
9090
9091static int __devexit niu_of_remove(struct of_device *op)
9092{
9093 struct net_device *dev = dev_get_drvdata(&op->dev);
9094
9095 if (dev) {
9096 struct niu *np = netdev_priv(dev);
9097
9098 unregister_netdev(dev);
9099
9100 if (np->vir_regs_1) {
9101 of_iounmap(&op->resource[2], np->vir_regs_1,
9102 res_size(&op->resource[2]));
9103 np->vir_regs_1 = NULL;
9104 }
9105
9106 if (np->vir_regs_2) {
9107 of_iounmap(&op->resource[3], np->vir_regs_2,
9108 res_size(&op->resource[3]));
9109 np->vir_regs_2 = NULL;
9110 }
9111
9112 if (np->regs) {
9113 of_iounmap(&op->resource[1], np->regs,
9114 res_size(&op->resource[1]));
9115 np->regs = NULL;
9116 }
9117
9118 niu_ldg_free(np);
9119
9120 niu_put_parent(np);
9121
9122 free_netdev(dev);
9123 dev_set_drvdata(&op->dev, NULL);
9124 }
9125 return 0;
9126}
9127
David S. Millerfd098312008-08-31 01:23:17 -07009128static const struct of_device_id niu_match[] = {
David S. Millera3138df2007-10-09 01:54:01 -07009129 {
9130 .name = "network",
9131 .compatible = "SUNW,niusl",
9132 },
9133 {},
9134};
9135MODULE_DEVICE_TABLE(of, niu_match);
9136
9137static struct of_platform_driver niu_of_driver = {
9138 .name = "niu",
9139 .match_table = niu_match,
9140 .probe = niu_of_probe,
9141 .remove = __devexit_p(niu_of_remove),
9142};
9143
9144#endif /* CONFIG_SPARC64 */
9145
9146static int __init niu_init(void)
9147{
9148 int err = 0;
9149
Olof Johansson81429972007-10-21 16:32:58 -07009150 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
David S. Millera3138df2007-10-09 01:54:01 -07009151
9152 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
9153
9154#ifdef CONFIG_SPARC64
9155 err = of_register_driver(&niu_of_driver, &of_bus_type);
9156#endif
9157
9158 if (!err) {
9159 err = pci_register_driver(&niu_pci_driver);
9160#ifdef CONFIG_SPARC64
9161 if (err)
9162 of_unregister_driver(&niu_of_driver);
9163#endif
9164 }
9165
9166 return err;
9167}
9168
9169static void __exit niu_exit(void)
9170{
9171 pci_unregister_driver(&niu_pci_driver);
9172#ifdef CONFIG_SPARC64
9173 of_unregister_driver(&niu_of_driver);
9174#endif
9175}
9176
9177module_init(niu_init);
9178module_exit(niu_exit);