blob: 7112bacd8c5800447a23581fd389b2a4badbcb12 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14#include "sge_defs.h"
15#include "firmware_exports.h"
16
17 /**
18 * t3_wait_op_done_val - wait until an operation is completed
19 * @adapter: the adapter performing the operation
20 * @reg: the register to check for completion
21 * @mask: a single-bit field within @reg that indicates completion
22 * @polarity: the value of the field when the operation is completed
23 * @attempts: number of check iterations
24 * @delay: delay in usecs between iterations
25 * @valp: where to store the value of the register at completion time
26 *
27 * Wait until an operation is completed by checking a bit in a register
28 * up to @attempts times. If @valp is not NULL the value of the register
29 * at the time it indicated completion is stored there. Returns 0 if the
30 * operation completes and -EAGAIN otherwise.
31 */
32
33int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
34 int polarity, int attempts, int delay, u32 *valp)
35{
36 while (1) {
37 u32 val = t3_read_reg(adapter, reg);
38
39 if (!!(val & mask) == polarity) {
40 if (valp)
41 *valp = val;
42 return 0;
43 }
44 if (--attempts == 0)
45 return -EAGAIN;
46 if (delay)
47 udelay(delay);
48 }
49}
50
51/**
52 * t3_write_regs - write a bunch of registers
53 * @adapter: the adapter to program
54 * @p: an array of register address/register value pairs
55 * @n: the number of address/value pairs
56 * @offset: register address offset
57 *
58 * Takes an array of register address/register value pairs and writes each
59 * value to the corresponding register. Register addresses are adjusted
60 * by the supplied offset.
61 */
62void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
63 int n, unsigned int offset)
64{
65 while (n--) {
66 t3_write_reg(adapter, p->reg_addr + offset, p->val);
67 p++;
68 }
69}
70
71/**
72 * t3_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
77 *
78 * Sets a register field specified by the supplied mask to the
79 * given value.
80 */
81void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
82 u32 val)
83{
84 u32 v = t3_read_reg(adapter, addr) & ~mask;
85
86 t3_write_reg(adapter, addr, v | val);
87 t3_read_reg(adapter, addr); /* flush */
88}
89
90/**
91 * t3_read_indirect - read indirectly addressed registers
92 * @adap: the adapter
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @start_idx: index of first indirect register to read
97 * @nregs: how many indirect registers to read
98 *
99 * Reads registers that are accessed indirectly through an address/data
100 * register pair.
101 */
102void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 unsigned int data_reg, u32 *vals, unsigned int nregs,
104 unsigned int start_idx)
105{
106 while (nregs--) {
107 t3_write_reg(adap, addr_reg, start_idx);
108 *vals++ = t3_read_reg(adap, data_reg);
109 start_idx++;
110 }
111}
112
113/**
114 * t3_mc7_bd_read - read from MC7 through backdoor accesses
115 * @mc7: identifies MC7 to read from
116 * @start: index of first 64-bit word to read
117 * @n: number of 64-bit words to read
118 * @buf: where to store the read result
119 *
120 * Read n 64-bit words from MC7 starting at word start, using backdoor
121 * accesses.
122 */
123int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
124 u64 *buf)
125{
126 static const int shift[] = { 0, 0, 16, 24 };
127 static const int step[] = { 0, 32, 16, 8 };
128
129 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
130 struct adapter *adap = mc7->adapter;
131
132 if (start >= size64 || start + n > size64)
133 return -EINVAL;
134
135 start *= (8 << mc7->width);
136 while (n--) {
137 int i;
138 u64 val64 = 0;
139
140 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
141 int attempts = 10;
142 u32 val;
143
144 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
145 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
146 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
147 while ((val & F_BUSY) && attempts--)
148 val = t3_read_reg(adap,
149 mc7->offset + A_MC7_BD_OP);
150 if (val & F_BUSY)
151 return -EIO;
152
153 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
154 if (mc7->width == 0) {
155 val64 = t3_read_reg(adap,
156 mc7->offset +
157 A_MC7_BD_DATA0);
158 val64 |= (u64) val << 32;
159 } else {
160 if (mc7->width > 1)
161 val >>= shift[mc7->width];
162 val64 |= (u64) val << (step[mc7->width] * i);
163 }
164 start += 8;
165 }
166 *buf++ = val64;
167 }
168 return 0;
169}
170
171/*
172 * Initialize MI1.
173 */
174static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
175{
176 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
177 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
178 V_CLKDIV(clkdiv);
179
180 if (!(ai->caps & SUPPORTED_10000baseT_Full))
181 val |= V_ST(1);
182 t3_write_reg(adap, A_MI1_CFG, val);
183}
184
185#define MDIO_ATTEMPTS 10
186
187/*
188 * MI1 read/write operations for direct-addressed PHYs.
189 */
190static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
191 int reg_addr, unsigned int *valp)
192{
193 int ret;
194 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
195
196 if (mmd_addr)
197 return -EINVAL;
198
199 mutex_lock(&adapter->mdio_lock);
200 t3_write_reg(adapter, A_MI1_ADDR, addr);
201 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
202 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
203 if (!ret)
204 *valp = t3_read_reg(adapter, A_MI1_DATA);
205 mutex_unlock(&adapter->mdio_lock);
206 return ret;
207}
208
209static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
210 int reg_addr, unsigned int val)
211{
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215 if (mmd_addr)
216 return -EINVAL;
217
218 mutex_lock(&adapter->mdio_lock);
219 t3_write_reg(adapter, A_MI1_ADDR, addr);
220 t3_write_reg(adapter, A_MI1_DATA, val);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
227static const struct mdio_ops mi1_mdio_ops = {
228 mi1_read,
229 mi1_write
230};
231
232/*
233 * MI1 read/write operations for indirect-addressed PHYs.
234 */
235static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
236 int reg_addr, unsigned int *valp)
237{
238 int ret;
239 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
240
241 mutex_lock(&adapter->mdio_lock);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 if (!ret) {
247 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
248 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
249 MDIO_ATTEMPTS, 20);
250 if (!ret)
251 *valp = t3_read_reg(adapter, A_MI1_DATA);
252 }
253 mutex_unlock(&adapter->mdio_lock);
254 return ret;
255}
256
257static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
258 int reg_addr, unsigned int val)
259{
260 int ret;
261 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
262
263 mutex_lock(&adapter->mdio_lock);
264 t3_write_reg(adapter, A_MI1_ADDR, addr);
265 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
266 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
267 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
268 if (!ret) {
269 t3_write_reg(adapter, A_MI1_DATA, val);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
272 MDIO_ATTEMPTS, 20);
273 }
274 mutex_unlock(&adapter->mdio_lock);
275 return ret;
276}
277
278static const struct mdio_ops mi1_mdio_ext_ops = {
279 mi1_ext_read,
280 mi1_ext_write
281};
282
283/**
284 * t3_mdio_change_bits - modify the value of a PHY register
285 * @phy: the PHY to operate on
286 * @mmd: the device address
287 * @reg: the register address
288 * @clear: what part of the register value to mask off
289 * @set: what part of the register value to set
290 *
291 * Changes the value of a PHY register by applying a mask to its current
292 * value and ORing the result with a new value.
293 */
294int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
295 unsigned int set)
296{
297 int ret;
298 unsigned int val;
299
300 ret = mdio_read(phy, mmd, reg, &val);
301 if (!ret) {
302 val &= ~clear;
303 ret = mdio_write(phy, mmd, reg, val | set);
304 }
305 return ret;
306}
307
308/**
309 * t3_phy_reset - reset a PHY block
310 * @phy: the PHY to operate on
311 * @mmd: the device address of the PHY block to reset
312 * @wait: how long to wait for the reset to complete in 1ms increments
313 *
314 * Resets a PHY block and optionally waits for the reset to complete.
315 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
316 * for 10G PHYs.
317 */
318int t3_phy_reset(struct cphy *phy, int mmd, int wait)
319{
320 int err;
321 unsigned int ctl;
322
323 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
324 if (err || !wait)
325 return err;
326
327 do {
328 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
329 if (err)
330 return err;
331 ctl &= BMCR_RESET;
332 if (ctl)
333 msleep(1);
334 } while (ctl && --wait);
335
336 return ctl ? -1 : 0;
337}
338
339/**
340 * t3_phy_advertise - set the PHY advertisement registers for autoneg
341 * @phy: the PHY to operate on
342 * @advert: bitmap of capabilities the PHY should advertise
343 *
344 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
345 * requested capabilities.
346 */
347int t3_phy_advertise(struct cphy *phy, unsigned int advert)
348{
349 int err;
350 unsigned int val = 0;
351
352 err = mdio_read(phy, 0, MII_CTRL1000, &val);
353 if (err)
354 return err;
355
356 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
357 if (advert & ADVERTISED_1000baseT_Half)
358 val |= ADVERTISE_1000HALF;
359 if (advert & ADVERTISED_1000baseT_Full)
360 val |= ADVERTISE_1000FULL;
361
362 err = mdio_write(phy, 0, MII_CTRL1000, val);
363 if (err)
364 return err;
365
366 val = 1;
367 if (advert & ADVERTISED_10baseT_Half)
368 val |= ADVERTISE_10HALF;
369 if (advert & ADVERTISED_10baseT_Full)
370 val |= ADVERTISE_10FULL;
371 if (advert & ADVERTISED_100baseT_Half)
372 val |= ADVERTISE_100HALF;
373 if (advert & ADVERTISED_100baseT_Full)
374 val |= ADVERTISE_100FULL;
375 if (advert & ADVERTISED_Pause)
376 val |= ADVERTISE_PAUSE_CAP;
377 if (advert & ADVERTISED_Asym_Pause)
378 val |= ADVERTISE_PAUSE_ASYM;
379 return mdio_write(phy, 0, MII_ADVERTISE, val);
380}
381
382/**
383 * t3_set_phy_speed_duplex - force PHY speed and duplex
384 * @phy: the PHY to operate on
385 * @speed: requested PHY speed
386 * @duplex: requested PHY duplex
387 *
388 * Force a 10/100/1000 PHY's speed and duplex. This also disables
389 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
390 */
391int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
392{
393 int err;
394 unsigned int ctl;
395
396 err = mdio_read(phy, 0, MII_BMCR, &ctl);
397 if (err)
398 return err;
399
400 if (speed >= 0) {
401 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
402 if (speed == SPEED_100)
403 ctl |= BMCR_SPEED100;
404 else if (speed == SPEED_1000)
405 ctl |= BMCR_SPEED1000;
406 }
407 if (duplex >= 0) {
408 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
409 if (duplex == DUPLEX_FULL)
410 ctl |= BMCR_FULLDPLX;
411 }
412 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
413 ctl |= BMCR_ANENABLE;
414 return mdio_write(phy, 0, MII_BMCR, ctl);
415}
416
417static const struct adapter_info t3_adap_info[] = {
418 {2, 0, 0, 0,
419 F_GPIO2_OEN | F_GPIO4_OEN |
420 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
421 SUPPORTED_OFFLOAD,
422 &mi1_mdio_ops, "Chelsio PE9000"},
423 {2, 0, 0, 0,
424 F_GPIO2_OEN | F_GPIO4_OEN |
425 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
426 SUPPORTED_OFFLOAD,
427 &mi1_mdio_ops, "Chelsio T302"},
428 {1, 0, 0, 0,
429 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
430 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
431 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
432 &mi1_mdio_ext_ops, "Chelsio T310"},
433 {2, 0, 0, 0,
434 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
435 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
436 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
437 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
438 &mi1_mdio_ext_ops, "Chelsio T320"},
439};
440
441/*
442 * Return the adapter_info structure with a given index. Out-of-range indices
443 * return NULL.
444 */
445const struct adapter_info *t3_get_adapter_info(unsigned int id)
446{
447 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
448}
449
450#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
451 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
452#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
453
454static const struct port_type_info port_types[] = {
455 {NULL},
456 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
457 "10GBASE-XR"},
458 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
459 "10/100/1000BASE-T"},
460 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
461 "10/100/1000BASE-T"},
462 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
463 {NULL, CAPS_10G, "10GBASE-KX4"},
464 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
465 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
466 "10GBASE-SR"},
467 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
468};
469
470#undef CAPS_1G
471#undef CAPS_10G
472
473#define VPD_ENTRY(name, len) \
474 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
475
476/*
477 * Partial EEPROM Vital Product Data structure. Includes only the ID and
478 * VPD-R sections.
479 */
480struct t3_vpd {
481 u8 id_tag;
482 u8 id_len[2];
483 u8 id_data[16];
484 u8 vpdr_tag;
485 u8 vpdr_len[2];
486 VPD_ENTRY(pn, 16); /* part number */
487 VPD_ENTRY(ec, 16); /* EC level */
488 VPD_ENTRY(sn, 16); /* serial number */
489 VPD_ENTRY(na, 12); /* MAC address base */
490 VPD_ENTRY(cclk, 6); /* core clock */
491 VPD_ENTRY(mclk, 6); /* mem clock */
492 VPD_ENTRY(uclk, 6); /* uP clk */
493 VPD_ENTRY(mdc, 6); /* MDIO clk */
494 VPD_ENTRY(mt, 2); /* mem timing */
495 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
496 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
497 VPD_ENTRY(port0, 2); /* PHY0 complex */
498 VPD_ENTRY(port1, 2); /* PHY1 complex */
499 VPD_ENTRY(port2, 2); /* PHY2 complex */
500 VPD_ENTRY(port3, 2); /* PHY3 complex */
501 VPD_ENTRY(rv, 1); /* csum */
502 u32 pad; /* for multiple-of-4 sizing and alignment */
503};
504
505#define EEPROM_MAX_POLL 4
506#define EEPROM_STAT_ADDR 0x4000
507#define VPD_BASE 0xc00
508
509/**
510 * t3_seeprom_read - read a VPD EEPROM location
511 * @adapter: adapter to read
512 * @addr: EEPROM address
513 * @data: where to store the read data
514 *
515 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
516 * VPD ROM capability. A zero is written to the flag bit when the
517 * addres is written to the control register. The hardware device will
518 * set the flag to 1 when 4 bytes have been read into the data register.
519 */
520int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
521{
522 u16 val;
523 int attempts = EEPROM_MAX_POLL;
524 unsigned int base = adapter->params.pci.vpd_cap_addr;
525
526 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
527 return -EINVAL;
528
529 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
530 do {
531 udelay(10);
532 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
533 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
534
535 if (!(val & PCI_VPD_ADDR_F)) {
536 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
537 return -EIO;
538 }
539 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
540 *data = le32_to_cpu(*data);
541 return 0;
542}
543
544/**
545 * t3_seeprom_write - write a VPD EEPROM location
546 * @adapter: adapter to write
547 * @addr: EEPROM address
548 * @data: value to write
549 *
550 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
551 * VPD ROM capability.
552 */
553int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
554{
555 u16 val;
556 int attempts = EEPROM_MAX_POLL;
557 unsigned int base = adapter->params.pci.vpd_cap_addr;
558
559 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
560 return -EINVAL;
561
562 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
563 cpu_to_le32(data));
564 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
565 addr | PCI_VPD_ADDR_F);
566 do {
567 msleep(1);
568 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
569 } while ((val & PCI_VPD_ADDR_F) && --attempts);
570
571 if (val & PCI_VPD_ADDR_F) {
572 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
573 return -EIO;
574 }
575 return 0;
576}
577
578/**
579 * t3_seeprom_wp - enable/disable EEPROM write protection
580 * @adapter: the adapter
581 * @enable: 1 to enable write protection, 0 to disable it
582 *
583 * Enables or disables write protection on the serial EEPROM.
584 */
585int t3_seeprom_wp(struct adapter *adapter, int enable)
586{
587 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
588}
589
590/*
591 * Convert a character holding a hex digit to a number.
592 */
593static unsigned int hex2int(unsigned char c)
594{
595 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
596}
597
598/**
599 * get_vpd_params - read VPD parameters from VPD EEPROM
600 * @adapter: adapter to read
601 * @p: where to store the parameters
602 *
603 * Reads card parameters stored in VPD EEPROM.
604 */
605static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
606{
607 int i, addr, ret;
608 struct t3_vpd vpd;
609
610 /*
611 * Card information is normally at VPD_BASE but some early cards had
612 * it at 0.
613 */
614 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
615 if (ret)
616 return ret;
617 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
618
619 for (i = 0; i < sizeof(vpd); i += 4) {
620 ret = t3_seeprom_read(adapter, addr + i,
621 (u32 *)((u8 *)&vpd + i));
622 if (ret)
623 return ret;
624 }
625
626 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
627 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
628 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
629 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
630 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
631
632 /* Old eeproms didn't have port information */
633 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
634 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
635 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
636 } else {
637 p->port_type[0] = hex2int(vpd.port0_data[0]);
638 p->port_type[1] = hex2int(vpd.port1_data[0]);
639 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
640 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
641 }
642
643 for (i = 0; i < 6; i++)
644 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
645 hex2int(vpd.na_data[2 * i + 1]);
646 return 0;
647}
648
649/* serial flash and firmware constants */
650enum {
651 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
652 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
653 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
654
655 /* flash command opcodes */
656 SF_PROG_PAGE = 2, /* program page */
657 SF_WR_DISABLE = 4, /* disable writes */
658 SF_RD_STATUS = 5, /* read status register */
659 SF_WR_ENABLE = 6, /* enable writes */
660 SF_RD_DATA_FAST = 0xb, /* read flash */
661 SF_ERASE_SECTOR = 0xd8, /* erase sector */
662
663 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
664 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
665};
666
667/**
668 * sf1_read - read data from the serial flash
669 * @adapter: the adapter
670 * @byte_cnt: number of bytes to read
671 * @cont: whether another operation will be chained
672 * @valp: where to store the read data
673 *
674 * Reads up to 4 bytes of data from the serial flash. The location of
675 * the read needs to be specified prior to calling this by issuing the
676 * appropriate commands to the serial flash.
677 */
678static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
679 u32 *valp)
680{
681 int ret;
682
683 if (!byte_cnt || byte_cnt > 4)
684 return -EINVAL;
685 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
686 return -EBUSY;
687 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
688 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
689 if (!ret)
690 *valp = t3_read_reg(adapter, A_SF_DATA);
691 return ret;
692}
693
694/**
695 * sf1_write - write data to the serial flash
696 * @adapter: the adapter
697 * @byte_cnt: number of bytes to write
698 * @cont: whether another operation will be chained
699 * @val: value to write
700 *
701 * Writes up to 4 bytes of data to the serial flash. The location of
702 * the write needs to be specified prior to calling this by issuing the
703 * appropriate commands to the serial flash.
704 */
705static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
706 u32 val)
707{
708 if (!byte_cnt || byte_cnt > 4)
709 return -EINVAL;
710 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
711 return -EBUSY;
712 t3_write_reg(adapter, A_SF_DATA, val);
713 t3_write_reg(adapter, A_SF_OP,
714 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
715 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
716}
717
718/**
719 * flash_wait_op - wait for a flash operation to complete
720 * @adapter: the adapter
721 * @attempts: max number of polls of the status register
722 * @delay: delay between polls in ms
723 *
724 * Wait for a flash operation to complete by polling the status register.
725 */
726static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
727{
728 int ret;
729 u32 status;
730
731 while (1) {
732 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
733 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
734 return ret;
735 if (!(status & 1))
736 return 0;
737 if (--attempts == 0)
738 return -EAGAIN;
739 if (delay)
740 msleep(delay);
741 }
742}
743
744/**
745 * t3_read_flash - read words from serial flash
746 * @adapter: the adapter
747 * @addr: the start address for the read
748 * @nwords: how many 32-bit words to read
749 * @data: where to store the read data
750 * @byte_oriented: whether to store data as bytes or as words
751 *
752 * Read the specified number of 32-bit words from the serial flash.
753 * If @byte_oriented is set the read data is stored as a byte array
754 * (i.e., big-endian), otherwise as 32-bit words in the platform's
755 * natural endianess.
756 */
757int t3_read_flash(struct adapter *adapter, unsigned int addr,
758 unsigned int nwords, u32 *data, int byte_oriented)
759{
760 int ret;
761
762 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
763 return -EINVAL;
764
765 addr = swab32(addr) | SF_RD_DATA_FAST;
766
767 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
768 (ret = sf1_read(adapter, 1, 1, data)) != 0)
769 return ret;
770
771 for (; nwords; nwords--, data++) {
772 ret = sf1_read(adapter, 4, nwords > 1, data);
773 if (ret)
774 return ret;
775 if (byte_oriented)
776 *data = htonl(*data);
777 }
778 return 0;
779}
780
781/**
782 * t3_write_flash - write up to a page of data to the serial flash
783 * @adapter: the adapter
784 * @addr: the start address to write
785 * @n: length of data to write
786 * @data: the data to write
787 *
788 * Writes up to a page of data (256 bytes) to the serial flash starting
789 * at the given address.
790 */
791static int t3_write_flash(struct adapter *adapter, unsigned int addr,
792 unsigned int n, const u8 *data)
793{
794 int ret;
795 u32 buf[64];
796 unsigned int i, c, left, val, offset = addr & 0xff;
797
798 if (addr + n > SF_SIZE || offset + n > 256)
799 return -EINVAL;
800
801 val = swab32(addr) | SF_PROG_PAGE;
802
803 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
804 (ret = sf1_write(adapter, 4, 1, val)) != 0)
805 return ret;
806
807 for (left = n; left; left -= c) {
808 c = min(left, 4U);
809 for (val = 0, i = 0; i < c; ++i)
810 val = (val << 8) + *data++;
811
812 ret = sf1_write(adapter, c, c != left, val);
813 if (ret)
814 return ret;
815 }
816 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
817 return ret;
818
819 /* Read the page to verify the write succeeded */
820 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
821 if (ret)
822 return ret;
823
824 if (memcmp(data - n, (u8 *) buf + offset, n))
825 return -EIO;
826 return 0;
827}
828
Divy Le Ray4aac3892007-01-30 19:43:45 -0800829enum fw_version_type {
830 FW_VERSION_N3,
831 FW_VERSION_T3
832};
833
Divy Le Ray4d22de32007-01-18 22:04:14 -0500834/**
835 * t3_get_fw_version - read the firmware version
836 * @adapter: the adapter
837 * @vers: where to place the version
838 *
839 * Reads the FW version from flash.
840 */
841int t3_get_fw_version(struct adapter *adapter, u32 *vers)
842{
843 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
844}
845
846/**
847 * t3_check_fw_version - check if the FW is compatible with this driver
848 * @adapter: the adapter
849 *
850 * Checks if an adapter's FW is compatible with the driver. Returns 0
851 * if the versions are compatible, a negative error otherwise.
852 */
853int t3_check_fw_version(struct adapter *adapter)
854{
855 int ret;
856 u32 vers;
Divy Le Ray4aac3892007-01-30 19:43:45 -0800857 unsigned int type, major, minor;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500858
859 ret = t3_get_fw_version(adapter, &vers);
860 if (ret)
861 return ret;
862
Divy Le Ray4aac3892007-01-30 19:43:45 -0800863 type = G_FW_VERSION_TYPE(vers);
864 major = G_FW_VERSION_MAJOR(vers);
865 minor = G_FW_VERSION_MINOR(vers);
866
867 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500868 return 0;
869
Divy Le Ray4aac3892007-01-30 19:43:45 -0800870 CH_ERR(adapter, "found wrong FW version(%u.%u), "
871 "driver needs version 3.1\n", major, minor);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500872 return -EINVAL;
873}
874
875/**
876 * t3_flash_erase_sectors - erase a range of flash sectors
877 * @adapter: the adapter
878 * @start: the first sector to erase
879 * @end: the last sector to erase
880 *
881 * Erases the sectors in the given range.
882 */
883static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
884{
885 while (start <= end) {
886 int ret;
887
888 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 0,
890 SF_ERASE_SECTOR | (start << 8))) != 0 ||
891 (ret = flash_wait_op(adapter, 5, 500)) != 0)
892 return ret;
893 start++;
894 }
895 return 0;
896}
897
898/*
899 * t3_load_fw - download firmware
900 * @adapter: the adapter
901 * @fw_data: the firrware image to write
902 * @size: image size
903 *
904 * Write the supplied firmware image to the card's serial flash.
905 * The FW image has the following sections: @size - 8 bytes of code and
906 * data, followed by 4 bytes of FW version, followed by the 32-bit
907 * 1's complement checksum of the whole image.
908 */
909int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
910{
911 u32 csum;
912 unsigned int i;
913 const u32 *p = (const u32 *)fw_data;
914 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
915
916 if (size & 3)
917 return -EINVAL;
918 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
919 return -EFBIG;
920
921 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
922 csum += ntohl(p[i]);
923 if (csum != 0xffffffff) {
924 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
925 csum);
926 return -EINVAL;
927 }
928
929 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
930 if (ret)
931 goto out;
932
933 size -= 8; /* trim off version and checksum */
934 for (addr = FW_FLASH_BOOT_ADDR; size;) {
935 unsigned int chunk_size = min(size, 256U);
936
937 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
938 if (ret)
939 goto out;
940
941 addr += chunk_size;
942 fw_data += chunk_size;
943 size -= chunk_size;
944 }
945
946 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
947out:
948 if (ret)
949 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
950 return ret;
951}
952
953#define CIM_CTL_BASE 0x2000
954
955/**
956 * t3_cim_ctl_blk_read - read a block from CIM control region
957 *
958 * @adap: the adapter
959 * @addr: the start address within the CIM control region
960 * @n: number of words to read
961 * @valp: where to store the result
962 *
963 * Reads a block of 4-byte words from the CIM control region.
964 */
965int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
966 unsigned int n, unsigned int *valp)
967{
968 int ret = 0;
969
970 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
971 return -EBUSY;
972
973 for ( ; !ret && n--; addr += 4) {
974 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
975 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
976 0, 5, 2);
977 if (!ret)
978 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
979 }
980 return ret;
981}
982
983
984/**
985 * t3_link_changed - handle interface link changes
986 * @adapter: the adapter
987 * @port_id: the port index that changed link state
988 *
989 * Called when a port's link settings change to propagate the new values
990 * to the associated PHY and MAC. After performing the common tasks it
991 * invokes an OS-specific handler.
992 */
993void t3_link_changed(struct adapter *adapter, int port_id)
994{
995 int link_ok, speed, duplex, fc;
996 struct port_info *pi = adap2pinfo(adapter, port_id);
997 struct cphy *phy = &pi->phy;
998 struct cmac *mac = &pi->mac;
999 struct link_config *lc = &pi->link_config;
1000
1001 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1002
1003 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1004 uses_xaui(adapter)) {
1005 if (link_ok)
1006 t3b_pcs_reset(mac);
1007 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1008 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1009 }
1010 lc->link_ok = link_ok;
1011 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1012 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1013 if (lc->requested_fc & PAUSE_AUTONEG)
1014 fc &= lc->requested_fc;
1015 else
1016 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1017
1018 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1019 /* Set MAC speed, duplex, and flow control to match PHY. */
1020 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1021 lc->fc = fc;
1022 }
1023
1024 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1025}
1026
1027/**
1028 * t3_link_start - apply link configuration to MAC/PHY
1029 * @phy: the PHY to setup
1030 * @mac: the MAC to setup
1031 * @lc: the requested link configuration
1032 *
1033 * Set up a port's MAC and PHY according to a desired link configuration.
1034 * - If the PHY can auto-negotiate first decide what to advertise, then
1035 * enable/disable auto-negotiation as desired, and reset.
1036 * - If the PHY does not auto-negotiate just reset it.
1037 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1038 * otherwise do it later based on the outcome of auto-negotiation.
1039 */
1040int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1041{
1042 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1043
1044 lc->link_ok = 0;
1045 if (lc->supported & SUPPORTED_Autoneg) {
1046 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1047 if (fc) {
1048 lc->advertising |= ADVERTISED_Asym_Pause;
1049 if (fc & PAUSE_RX)
1050 lc->advertising |= ADVERTISED_Pause;
1051 }
1052 phy->ops->advertise(phy, lc->advertising);
1053
1054 if (lc->autoneg == AUTONEG_DISABLE) {
1055 lc->speed = lc->requested_speed;
1056 lc->duplex = lc->requested_duplex;
1057 lc->fc = (unsigned char)fc;
1058 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1059 fc);
1060 /* Also disables autoneg */
1061 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1062 phy->ops->reset(phy, 0);
1063 } else
1064 phy->ops->autoneg_enable(phy);
1065 } else {
1066 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1067 lc->fc = (unsigned char)fc;
1068 phy->ops->reset(phy, 0);
1069 }
1070 return 0;
1071}
1072
1073/**
1074 * t3_set_vlan_accel - control HW VLAN extraction
1075 * @adapter: the adapter
1076 * @ports: bitmap of adapter ports to operate on
1077 * @on: enable (1) or disable (0) HW VLAN extraction
1078 *
1079 * Enables or disables HW extraction of VLAN tags for the given port.
1080 */
1081void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1082{
1083 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1084 ports << S_VLANEXTRACTIONENABLE,
1085 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1086}
1087
1088struct intr_info {
1089 unsigned int mask; /* bits to check in interrupt status */
1090 const char *msg; /* message to print or NULL */
1091 short stat_idx; /* stat counter to increment or -1 */
1092 unsigned short fatal:1; /* whether the condition reported is fatal */
1093};
1094
1095/**
1096 * t3_handle_intr_status - table driven interrupt handler
1097 * @adapter: the adapter that generated the interrupt
1098 * @reg: the interrupt status register to process
1099 * @mask: a mask to apply to the interrupt status
1100 * @acts: table of interrupt actions
1101 * @stats: statistics counters tracking interrupt occurences
1102 *
1103 * A table driven interrupt handler that applies a set of masks to an
1104 * interrupt status word and performs the corresponding actions if the
1105 * interrupts described by the mask have occured. The actions include
1106 * optionally printing a warning or alert message, and optionally
1107 * incrementing a stat counter. The table is terminated by an entry
1108 * specifying mask 0. Returns the number of fatal interrupt conditions.
1109 */
1110static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1111 unsigned int mask,
1112 const struct intr_info *acts,
1113 unsigned long *stats)
1114{
1115 int fatal = 0;
1116 unsigned int status = t3_read_reg(adapter, reg) & mask;
1117
1118 for (; acts->mask; ++acts) {
1119 if (!(status & acts->mask))
1120 continue;
1121 if (acts->fatal) {
1122 fatal++;
1123 CH_ALERT(adapter, "%s (0x%x)\n",
1124 acts->msg, status & acts->mask);
1125 } else if (acts->msg)
1126 CH_WARN(adapter, "%s (0x%x)\n",
1127 acts->msg, status & acts->mask);
1128 if (acts->stat_idx >= 0)
1129 stats[acts->stat_idx]++;
1130 }
1131 if (status) /* clear processed interrupts */
1132 t3_write_reg(adapter, reg, status);
1133 return fatal;
1134}
1135
1136#define SGE_INTR_MASK (F_RSPQDISABLED)
1137#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1138 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1139 F_NFASRCHFAIL)
1140#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1141#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1142 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1143 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1144#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1145 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1146 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1147 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1148 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1149 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1150#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1151 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1152 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1153 V_BISTERR(M_BISTERR) | F_PEXERR)
1154#define ULPRX_INTR_MASK F_PARERR
1155#define ULPTX_INTR_MASK 0
1156#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1157 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1158 F_ZERO_SWITCH_ERROR)
1159#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1160 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1161 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1162 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1163#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1164 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1165 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1166#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1167 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1168 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1169#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1170 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1171 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1172 V_MCAPARERRENB(M_MCAPARERRENB))
1173#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1174 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1175 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1176 F_MPS0 | F_CPL_SWITCH)
1177
1178/*
1179 * Interrupt handler for the PCIX1 module.
1180 */
1181static void pci_intr_handler(struct adapter *adapter)
1182{
1183 static const struct intr_info pcix1_intr_info[] = {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001184 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1185 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1186 {F_RCVTARABT, "PCI received target abort", -1, 1},
1187 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1188 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1189 {F_DETPARERR, "PCI detected parity error", -1, 1},
1190 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1191 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1192 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1193 1},
1194 {F_DETCORECCERR, "PCI correctable ECC error",
1195 STAT_PCI_CORR_ECC, 0},
1196 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1197 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1198 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1199 1},
1200 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1201 1},
1202 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1203 1},
1204 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1205 "error", -1, 1},
1206 {0}
1207 };
1208
1209 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1210 pcix1_intr_info, adapter->irq_stats))
1211 t3_fatal_err(adapter);
1212}
1213
1214/*
1215 * Interrupt handler for the PCIE module.
1216 */
1217static void pcie_intr_handler(struct adapter *adapter)
1218{
1219 static const struct intr_info pcie_intr_info[] = {
Divy Le Rayb5a44bc2007-01-30 19:44:01 -08001220 {F_PEXERR, "PCI PEX error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001221 {F_UNXSPLCPLERRR,
1222 "PCI unexpected split completion DMA read error", -1, 1},
1223 {F_UNXSPLCPLERRC,
1224 "PCI unexpected split completion DMA command error", -1, 1},
1225 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1226 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1227 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1228 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1229 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1230 "PCI MSI-X table/PBA parity error", -1, 1},
1231 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1232 {0}
1233 };
1234
1235 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1236 pcie_intr_info, adapter->irq_stats))
1237 t3_fatal_err(adapter);
1238}
1239
1240/*
1241 * TP interrupt handler.
1242 */
1243static void tp_intr_handler(struct adapter *adapter)
1244{
1245 static const struct intr_info tp_intr_info[] = {
1246 {0xffffff, "TP parity error", -1, 1},
1247 {0x1000000, "TP out of Rx pages", -1, 1},
1248 {0x2000000, "TP out of Tx pages", -1, 1},
1249 {0}
1250 };
1251
1252 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1253 tp_intr_info, NULL))
1254 t3_fatal_err(adapter);
1255}
1256
1257/*
1258 * CIM interrupt handler.
1259 */
1260static void cim_intr_handler(struct adapter *adapter)
1261{
1262 static const struct intr_info cim_intr_info[] = {
1263 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1264 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1265 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1266 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1267 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1268 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1269 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1270 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1271 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1272 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1273 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1274 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1275 {0}
1276 };
1277
1278 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1279 cim_intr_info, NULL))
1280 t3_fatal_err(adapter);
1281}
1282
1283/*
1284 * ULP RX interrupt handler.
1285 */
1286static void ulprx_intr_handler(struct adapter *adapter)
1287{
1288 static const struct intr_info ulprx_intr_info[] = {
1289 {F_PARERR, "ULP RX parity error", -1, 1},
1290 {0}
1291 };
1292
1293 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1294 ulprx_intr_info, NULL))
1295 t3_fatal_err(adapter);
1296}
1297
1298/*
1299 * ULP TX interrupt handler.
1300 */
1301static void ulptx_intr_handler(struct adapter *adapter)
1302{
1303 static const struct intr_info ulptx_intr_info[] = {
1304 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1305 STAT_ULP_CH0_PBL_OOB, 0},
1306 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1307 STAT_ULP_CH1_PBL_OOB, 0},
1308 {0}
1309 };
1310
1311 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1312 ulptx_intr_info, adapter->irq_stats))
1313 t3_fatal_err(adapter);
1314}
1315
1316#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1317 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1318 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1319 F_ICSPI1_TX_FRAMING_ERROR)
1320#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1321 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1322 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1323 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1324
1325/*
1326 * PM TX interrupt handler.
1327 */
1328static void pmtx_intr_handler(struct adapter *adapter)
1329{
1330 static const struct intr_info pmtx_intr_info[] = {
1331 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1332 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1333 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1334 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1335 "PMTX ispi parity error", -1, 1},
1336 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1337 "PMTX ospi parity error", -1, 1},
1338 {0}
1339 };
1340
1341 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1342 pmtx_intr_info, NULL))
1343 t3_fatal_err(adapter);
1344}
1345
1346#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1347 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1348 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1349 F_IESPI1_TX_FRAMING_ERROR)
1350#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1351 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1352 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1353 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1354
1355/*
1356 * PM RX interrupt handler.
1357 */
1358static void pmrx_intr_handler(struct adapter *adapter)
1359{
1360 static const struct intr_info pmrx_intr_info[] = {
1361 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1362 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1363 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1364 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1365 "PMRX ispi parity error", -1, 1},
1366 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1367 "PMRX ospi parity error", -1, 1},
1368 {0}
1369 };
1370
1371 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1372 pmrx_intr_info, NULL))
1373 t3_fatal_err(adapter);
1374}
1375
1376/*
1377 * CPL switch interrupt handler.
1378 */
1379static void cplsw_intr_handler(struct adapter *adapter)
1380{
1381 static const struct intr_info cplsw_intr_info[] = {
1382/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1383 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1384 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1385 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1386 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1387 {0}
1388 };
1389
1390 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1391 cplsw_intr_info, NULL))
1392 t3_fatal_err(adapter);
1393}
1394
1395/*
1396 * MPS interrupt handler.
1397 */
1398static void mps_intr_handler(struct adapter *adapter)
1399{
1400 static const struct intr_info mps_intr_info[] = {
1401 {0x1ff, "MPS parity error", -1, 1},
1402 {0}
1403 };
1404
1405 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1406 mps_intr_info, NULL))
1407 t3_fatal_err(adapter);
1408}
1409
1410#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1411
1412/*
1413 * MC7 interrupt handler.
1414 */
1415static void mc7_intr_handler(struct mc7 *mc7)
1416{
1417 struct adapter *adapter = mc7->adapter;
1418 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1419
1420 if (cause & F_CE) {
1421 mc7->stats.corr_err++;
1422 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1423 "data 0x%x 0x%x 0x%x\n", mc7->name,
1424 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1425 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1426 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1427 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1428 }
1429
1430 if (cause & F_UE) {
1431 mc7->stats.uncorr_err++;
1432 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1433 "data 0x%x 0x%x 0x%x\n", mc7->name,
1434 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1435 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1436 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1437 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1438 }
1439
1440 if (G_PE(cause)) {
1441 mc7->stats.parity_err++;
1442 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1443 mc7->name, G_PE(cause));
1444 }
1445
1446 if (cause & F_AE) {
1447 u32 addr = 0;
1448
1449 if (adapter->params.rev > 0)
1450 addr = t3_read_reg(adapter,
1451 mc7->offset + A_MC7_ERR_ADDR);
1452 mc7->stats.addr_err++;
1453 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1454 mc7->name, addr);
1455 }
1456
1457 if (cause & MC7_INTR_FATAL)
1458 t3_fatal_err(adapter);
1459
1460 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1461}
1462
1463#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1464 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1465/*
1466 * XGMAC interrupt handler.
1467 */
1468static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1469{
1470 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1471 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1472
1473 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1474 mac->stats.tx_fifo_parity_err++;
1475 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1476 }
1477 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1478 mac->stats.rx_fifo_parity_err++;
1479 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1480 }
1481 if (cause & F_TXFIFO_UNDERRUN)
1482 mac->stats.tx_fifo_urun++;
1483 if (cause & F_RXFIFO_OVERFLOW)
1484 mac->stats.rx_fifo_ovfl++;
1485 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1486 mac->stats.serdes_signal_loss++;
1487 if (cause & F_XAUIPCSCTCERR)
1488 mac->stats.xaui_pcs_ctc_err++;
1489 if (cause & F_XAUIPCSALIGNCHANGE)
1490 mac->stats.xaui_pcs_align_change++;
1491
1492 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1493 if (cause & XGM_INTR_FATAL)
1494 t3_fatal_err(adap);
1495 return cause != 0;
1496}
1497
1498/*
1499 * Interrupt handler for PHY events.
1500 */
1501int t3_phy_intr_handler(struct adapter *adapter)
1502{
1503 static const int intr_gpio_bits[] = { 8, 0x20 };
1504
1505 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1506
1507 for_each_port(adapter, i) {
1508 if (cause & intr_gpio_bits[i]) {
1509 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1510 int phy_cause = phy->ops->intr_handler(phy);
1511
1512 if (phy_cause & cphy_cause_link_change)
1513 t3_link_changed(adapter, i);
1514 if (phy_cause & cphy_cause_fifo_error)
1515 phy->fifo_errors++;
1516 }
1517 }
1518
1519 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1520 return 0;
1521}
1522
1523/*
1524 * T3 slow path (non-data) interrupt handler.
1525 */
1526int t3_slow_intr_handler(struct adapter *adapter)
1527{
1528 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1529
1530 cause &= adapter->slow_intr_mask;
1531 if (!cause)
1532 return 0;
1533 if (cause & F_PCIM0) {
1534 if (is_pcie(adapter))
1535 pcie_intr_handler(adapter);
1536 else
1537 pci_intr_handler(adapter);
1538 }
1539 if (cause & F_SGE3)
1540 t3_sge_err_intr_handler(adapter);
1541 if (cause & F_MC7_PMRX)
1542 mc7_intr_handler(&adapter->pmrx);
1543 if (cause & F_MC7_PMTX)
1544 mc7_intr_handler(&adapter->pmtx);
1545 if (cause & F_MC7_CM)
1546 mc7_intr_handler(&adapter->cm);
1547 if (cause & F_CIM)
1548 cim_intr_handler(adapter);
1549 if (cause & F_TP1)
1550 tp_intr_handler(adapter);
1551 if (cause & F_ULP2_RX)
1552 ulprx_intr_handler(adapter);
1553 if (cause & F_ULP2_TX)
1554 ulptx_intr_handler(adapter);
1555 if (cause & F_PM1_RX)
1556 pmrx_intr_handler(adapter);
1557 if (cause & F_PM1_TX)
1558 pmtx_intr_handler(adapter);
1559 if (cause & F_CPL_SWITCH)
1560 cplsw_intr_handler(adapter);
1561 if (cause & F_MPS0)
1562 mps_intr_handler(adapter);
1563 if (cause & F_MC5A)
1564 t3_mc5_intr_handler(&adapter->mc5);
1565 if (cause & F_XGMAC0_0)
1566 mac_intr_handler(adapter, 0);
1567 if (cause & F_XGMAC0_1)
1568 mac_intr_handler(adapter, 1);
1569 if (cause & F_T3DBG)
1570 t3_os_ext_intr_handler(adapter);
1571
1572 /* Clear the interrupts just processed. */
1573 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1574 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1575 return 1;
1576}
1577
1578/**
1579 * t3_intr_enable - enable interrupts
1580 * @adapter: the adapter whose interrupts should be enabled
1581 *
1582 * Enable interrupts by setting the interrupt enable registers of the
1583 * various HW modules and then enabling the top-level interrupt
1584 * concentrator.
1585 */
1586void t3_intr_enable(struct adapter *adapter)
1587{
1588 static const struct addr_val_pair intr_en_avp[] = {
1589 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1590 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1591 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1592 MC7_INTR_MASK},
1593 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1594 MC7_INTR_MASK},
1595 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1596 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1597 {A_TP_INT_ENABLE, 0x3bfffff},
1598 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1599 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1600 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1601 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1602 };
1603
1604 adapter->slow_intr_mask = PL_INTR_MASK;
1605
1606 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1607
1608 if (adapter->params.rev > 0) {
1609 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1610 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1611 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1612 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1613 F_PBL_BOUND_ERR_CH1);
1614 } else {
1615 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1616 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1617 }
1618
1619 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1620 adapter_info(adapter)->gpio_intr);
1621 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1622 adapter_info(adapter)->gpio_intr);
1623 if (is_pcie(adapter))
1624 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1625 else
1626 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1627 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1628 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1629}
1630
1631/**
1632 * t3_intr_disable - disable a card's interrupts
1633 * @adapter: the adapter whose interrupts should be disabled
1634 *
1635 * Disable interrupts. We only disable the top-level interrupt
1636 * concentrator and the SGE data interrupts.
1637 */
1638void t3_intr_disable(struct adapter *adapter)
1639{
1640 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1641 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1642 adapter->slow_intr_mask = 0;
1643}
1644
1645/**
1646 * t3_intr_clear - clear all interrupts
1647 * @adapter: the adapter whose interrupts should be cleared
1648 *
1649 * Clears all interrupts.
1650 */
1651void t3_intr_clear(struct adapter *adapter)
1652{
1653 static const unsigned int cause_reg_addr[] = {
1654 A_SG_INT_CAUSE,
1655 A_SG_RSPQ_FL_STATUS,
1656 A_PCIX_INT_CAUSE,
1657 A_MC7_INT_CAUSE,
1658 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1659 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1660 A_CIM_HOST_INT_CAUSE,
1661 A_TP_INT_CAUSE,
1662 A_MC5_DB_INT_CAUSE,
1663 A_ULPRX_INT_CAUSE,
1664 A_ULPTX_INT_CAUSE,
1665 A_CPL_INTR_CAUSE,
1666 A_PM1_TX_INT_CAUSE,
1667 A_PM1_RX_INT_CAUSE,
1668 A_MPS_INT_CAUSE,
1669 A_T3DBG_INT_CAUSE,
1670 };
1671 unsigned int i;
1672
1673 /* Clear PHY and MAC interrupts for each port. */
1674 for_each_port(adapter, i)
1675 t3_port_intr_clear(adapter, i);
1676
1677 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1678 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1679
1680 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1681 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1682}
1683
1684/**
1685 * t3_port_intr_enable - enable port-specific interrupts
1686 * @adapter: associated adapter
1687 * @idx: index of port whose interrupts should be enabled
1688 *
1689 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1690 * adapter port.
1691 */
1692void t3_port_intr_enable(struct adapter *adapter, int idx)
1693{
1694 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1695
1696 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1697 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1698 phy->ops->intr_enable(phy);
1699}
1700
1701/**
1702 * t3_port_intr_disable - disable port-specific interrupts
1703 * @adapter: associated adapter
1704 * @idx: index of port whose interrupts should be disabled
1705 *
1706 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1707 * adapter port.
1708 */
1709void t3_port_intr_disable(struct adapter *adapter, int idx)
1710{
1711 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1712
1713 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1714 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1715 phy->ops->intr_disable(phy);
1716}
1717
1718/**
1719 * t3_port_intr_clear - clear port-specific interrupts
1720 * @adapter: associated adapter
1721 * @idx: index of port whose interrupts to clear
1722 *
1723 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1724 * adapter port.
1725 */
1726void t3_port_intr_clear(struct adapter *adapter, int idx)
1727{
1728 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1729
1730 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1731 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1732 phy->ops->intr_clear(phy);
1733}
1734
1735/**
1736 * t3_sge_write_context - write an SGE context
1737 * @adapter: the adapter
1738 * @id: the context id
1739 * @type: the context type
1740 *
1741 * Program an SGE context with the values already loaded in the
1742 * CONTEXT_DATA? registers.
1743 */
1744static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1745 unsigned int type)
1746{
1747 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1748 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1749 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1750 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1751 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1752 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1753 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1754 0, 5, 1);
1755}
1756
1757/**
1758 * t3_sge_init_ecntxt - initialize an SGE egress context
1759 * @adapter: the adapter to configure
1760 * @id: the context id
1761 * @gts_enable: whether to enable GTS for the context
1762 * @type: the egress context type
1763 * @respq: associated response queue
1764 * @base_addr: base address of queue
1765 * @size: number of queue entries
1766 * @token: uP token
1767 * @gen: initial generation value for the context
1768 * @cidx: consumer pointer
1769 *
1770 * Initialize an SGE egress context and make it ready for use. If the
1771 * platform allows concurrent context operations, the caller is
1772 * responsible for appropriate locking.
1773 */
1774int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1775 enum sge_context_type type, int respq, u64 base_addr,
1776 unsigned int size, unsigned int token, int gen,
1777 unsigned int cidx)
1778{
1779 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1780
1781 if (base_addr & 0xfff) /* must be 4K aligned */
1782 return -EINVAL;
1783 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1784 return -EBUSY;
1785
1786 base_addr >>= 12;
1787 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1788 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1789 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1790 V_EC_BASE_LO(base_addr & 0xffff));
1791 base_addr >>= 16;
1792 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1793 base_addr >>= 32;
1794 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1795 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1796 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1797 F_EC_VALID);
1798 return t3_sge_write_context(adapter, id, F_EGRESS);
1799}
1800
1801/**
1802 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1803 * @adapter: the adapter to configure
1804 * @id: the context id
1805 * @gts_enable: whether to enable GTS for the context
1806 * @base_addr: base address of queue
1807 * @size: number of queue entries
1808 * @bsize: size of each buffer for this queue
1809 * @cong_thres: threshold to signal congestion to upstream producers
1810 * @gen: initial generation value for the context
1811 * @cidx: consumer pointer
1812 *
1813 * Initialize an SGE free list context and make it ready for use. The
1814 * caller is responsible for ensuring only one context operation occurs
1815 * at a time.
1816 */
1817int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1818 int gts_enable, u64 base_addr, unsigned int size,
1819 unsigned int bsize, unsigned int cong_thres, int gen,
1820 unsigned int cidx)
1821{
1822 if (base_addr & 0xfff) /* must be 4K aligned */
1823 return -EINVAL;
1824 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1825 return -EBUSY;
1826
1827 base_addr >>= 12;
1828 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1829 base_addr >>= 32;
1830 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1831 V_FL_BASE_HI((u32) base_addr) |
1832 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1833 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1834 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1835 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1836 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1837 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1838 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1839 return t3_sge_write_context(adapter, id, F_FREELIST);
1840}
1841
1842/**
1843 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1844 * @adapter: the adapter to configure
1845 * @id: the context id
1846 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1847 * @base_addr: base address of queue
1848 * @size: number of queue entries
1849 * @fl_thres: threshold for selecting the normal or jumbo free list
1850 * @gen: initial generation value for the context
1851 * @cidx: consumer pointer
1852 *
1853 * Initialize an SGE response queue context and make it ready for use.
1854 * The caller is responsible for ensuring only one context operation
1855 * occurs at a time.
1856 */
1857int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1858 int irq_vec_idx, u64 base_addr, unsigned int size,
1859 unsigned int fl_thres, int gen, unsigned int cidx)
1860{
1861 unsigned int intr = 0;
1862
1863 if (base_addr & 0xfff) /* must be 4K aligned */
1864 return -EINVAL;
1865 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1866 return -EBUSY;
1867
1868 base_addr >>= 12;
1869 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1870 V_CQ_INDEX(cidx));
1871 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1872 base_addr >>= 32;
1873 if (irq_vec_idx >= 0)
1874 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1875 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1876 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1877 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1878 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1879}
1880
1881/**
1882 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1883 * @adapter: the adapter to configure
1884 * @id: the context id
1885 * @base_addr: base address of queue
1886 * @size: number of queue entries
1887 * @rspq: response queue for async notifications
1888 * @ovfl_mode: CQ overflow mode
1889 * @credits: completion queue credits
1890 * @credit_thres: the credit threshold
1891 *
1892 * Initialize an SGE completion queue context and make it ready for use.
1893 * The caller is responsible for ensuring only one context operation
1894 * occurs at a time.
1895 */
1896int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1897 unsigned int size, int rspq, int ovfl_mode,
1898 unsigned int credits, unsigned int credit_thres)
1899{
1900 if (base_addr & 0xfff) /* must be 4K aligned */
1901 return -EINVAL;
1902 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1903 return -EBUSY;
1904
1905 base_addr >>= 12;
1906 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1907 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1908 base_addr >>= 32;
1909 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1910 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1911 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1912 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1913 V_CQ_CREDIT_THRES(credit_thres));
1914 return t3_sge_write_context(adapter, id, F_CQ);
1915}
1916
1917/**
1918 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1919 * @adapter: the adapter
1920 * @id: the egress context id
1921 * @enable: enable (1) or disable (0) the context
1922 *
1923 * Enable or disable an SGE egress context. The caller is responsible for
1924 * ensuring only one context operation occurs at a time.
1925 */
1926int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1927{
1928 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1929 return -EBUSY;
1930
1931 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1932 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1933 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1934 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1935 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1936 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1937 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1938 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1939 0, 5, 1);
1940}
1941
1942/**
1943 * t3_sge_disable_fl - disable an SGE free-buffer list
1944 * @adapter: the adapter
1945 * @id: the free list context id
1946 *
1947 * Disable an SGE free-buffer list. The caller is responsible for
1948 * ensuring only one context operation occurs at a time.
1949 */
1950int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1951{
1952 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1953 return -EBUSY;
1954
1955 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1956 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1957 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1958 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1959 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1960 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1961 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1962 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1963 0, 5, 1);
1964}
1965
1966/**
1967 * t3_sge_disable_rspcntxt - disable an SGE response queue
1968 * @adapter: the adapter
1969 * @id: the response queue context id
1970 *
1971 * Disable an SGE response queue. The caller is responsible for
1972 * ensuring only one context operation occurs at a time.
1973 */
1974int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1975{
1976 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1977 return -EBUSY;
1978
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1980 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1981 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1982 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1983 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
1984 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1985 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
1986 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1987 0, 5, 1);
1988}
1989
1990/**
1991 * t3_sge_disable_cqcntxt - disable an SGE completion queue
1992 * @adapter: the adapter
1993 * @id: the completion queue context id
1994 *
1995 * Disable an SGE completion queue. The caller is responsible for
1996 * ensuring only one context operation occurs at a time.
1997 */
1998int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
1999{
2000 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2001 return -EBUSY;
2002
2003 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2004 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2005 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2006 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2007 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2008 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2009 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2010 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2011 0, 5, 1);
2012}
2013
2014/**
2015 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2016 * @adapter: the adapter
2017 * @id: the context id
2018 * @op: the operation to perform
2019 *
2020 * Perform the selected operation on an SGE completion queue context.
2021 * The caller is responsible for ensuring only one context operation
2022 * occurs at a time.
2023 */
2024int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2025 unsigned int credits)
2026{
2027 u32 val;
2028
2029 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2030 return -EBUSY;
2031
2032 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2033 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2034 V_CONTEXT(id) | F_CQ);
2035 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2036 0, 5, 1, &val))
2037 return -EIO;
2038
2039 if (op >= 2 && op < 7) {
2040 if (adapter->params.rev > 0)
2041 return G_CQ_INDEX(val);
2042
2043 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2044 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2045 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2046 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2047 return -EIO;
2048 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2049 }
2050 return 0;
2051}
2052
2053/**
2054 * t3_sge_read_context - read an SGE context
2055 * @type: the context type
2056 * @adapter: the adapter
2057 * @id: the context id
2058 * @data: holds the retrieved context
2059 *
2060 * Read an SGE egress context. The caller is responsible for ensuring
2061 * only one context operation occurs at a time.
2062 */
2063static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2064 unsigned int id, u32 data[4])
2065{
2066 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2067 return -EBUSY;
2068
2069 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2070 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2071 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2072 5, 1))
2073 return -EIO;
2074 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2075 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2076 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2077 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2078 return 0;
2079}
2080
2081/**
2082 * t3_sge_read_ecntxt - read an SGE egress context
2083 * @adapter: the adapter
2084 * @id: the context id
2085 * @data: holds the retrieved context
2086 *
2087 * Read an SGE egress context. The caller is responsible for ensuring
2088 * only one context operation occurs at a time.
2089 */
2090int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2091{
2092 if (id >= 65536)
2093 return -EINVAL;
2094 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2095}
2096
2097/**
2098 * t3_sge_read_cq - read an SGE CQ context
2099 * @adapter: the adapter
2100 * @id: the context id
2101 * @data: holds the retrieved context
2102 *
2103 * Read an SGE CQ context. The caller is responsible for ensuring
2104 * only one context operation occurs at a time.
2105 */
2106int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2107{
2108 if (id >= 65536)
2109 return -EINVAL;
2110 return t3_sge_read_context(F_CQ, adapter, id, data);
2111}
2112
2113/**
2114 * t3_sge_read_fl - read an SGE free-list context
2115 * @adapter: the adapter
2116 * @id: the context id
2117 * @data: holds the retrieved context
2118 *
2119 * Read an SGE free-list context. The caller is responsible for ensuring
2120 * only one context operation occurs at a time.
2121 */
2122int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2123{
2124 if (id >= SGE_QSETS * 2)
2125 return -EINVAL;
2126 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2127}
2128
2129/**
2130 * t3_sge_read_rspq - read an SGE response queue context
2131 * @adapter: the adapter
2132 * @id: the context id
2133 * @data: holds the retrieved context
2134 *
2135 * Read an SGE response queue context. The caller is responsible for
2136 * ensuring only one context operation occurs at a time.
2137 */
2138int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2139{
2140 if (id >= SGE_QSETS)
2141 return -EINVAL;
2142 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2143}
2144
2145/**
2146 * t3_config_rss - configure Rx packet steering
2147 * @adapter: the adapter
2148 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2149 * @cpus: values for the CPU lookup table (0xff terminated)
2150 * @rspq: values for the response queue lookup table (0xffff terminated)
2151 *
2152 * Programs the receive packet steering logic. @cpus and @rspq provide
2153 * the values for the CPU and response queue lookup tables. If they
2154 * provide fewer values than the size of the tables the supplied values
2155 * are used repeatedly until the tables are fully populated.
2156 */
2157void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2158 const u8 * cpus, const u16 *rspq)
2159{
2160 int i, j, cpu_idx = 0, q_idx = 0;
2161
2162 if (cpus)
2163 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2164 u32 val = i << 16;
2165
2166 for (j = 0; j < 2; ++j) {
2167 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2168 if (cpus[cpu_idx] == 0xff)
2169 cpu_idx = 0;
2170 }
2171 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2172 }
2173
2174 if (rspq)
2175 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2176 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2177 (i << 16) | rspq[q_idx++]);
2178 if (rspq[q_idx] == 0xffff)
2179 q_idx = 0;
2180 }
2181
2182 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2183}
2184
2185/**
2186 * t3_read_rss - read the contents of the RSS tables
2187 * @adapter: the adapter
2188 * @lkup: holds the contents of the RSS lookup table
2189 * @map: holds the contents of the RSS map table
2190 *
2191 * Reads the contents of the receive packet steering tables.
2192 */
2193int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2194{
2195 int i;
2196 u32 val;
2197
2198 if (lkup)
2199 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2200 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2201 0xffff0000 | i);
2202 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2203 if (!(val & 0x80000000))
2204 return -EAGAIN;
2205 *lkup++ = val;
2206 *lkup++ = (val >> 8);
2207 }
2208
2209 if (map)
2210 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2211 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2212 0xffff0000 | i);
2213 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2214 if (!(val & 0x80000000))
2215 return -EAGAIN;
2216 *map++ = val;
2217 }
2218 return 0;
2219}
2220
2221/**
2222 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2223 * @adap: the adapter
2224 * @enable: 1 to select offload mode, 0 for regular NIC
2225 *
2226 * Switches TP to NIC/offload mode.
2227 */
2228void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2229{
2230 if (is_offload(adap) || !enable)
2231 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2232 V_NICMODE(!enable));
2233}
2234
2235/**
2236 * pm_num_pages - calculate the number of pages of the payload memory
2237 * @mem_size: the size of the payload memory
2238 * @pg_size: the size of each payload memory page
2239 *
2240 * Calculate the number of pages, each of the given size, that fit in a
2241 * memory of the specified size, respecting the HW requirement that the
2242 * number of pages must be a multiple of 24.
2243 */
2244static inline unsigned int pm_num_pages(unsigned int mem_size,
2245 unsigned int pg_size)
2246{
2247 unsigned int n = mem_size / pg_size;
2248
2249 return n - n % 24;
2250}
2251
2252#define mem_region(adap, start, size, reg) \
2253 t3_write_reg((adap), A_ ## reg, (start)); \
2254 start += size
2255
2256/*
2257 * partition_mem - partition memory and configure TP memory settings
2258 * @adap: the adapter
2259 * @p: the TP parameters
2260 *
2261 * Partitions context and payload memory and configures TP's memory
2262 * registers.
2263 */
2264static void partition_mem(struct adapter *adap, const struct tp_params *p)
2265{
2266 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2267 unsigned int timers = 0, timers_shift = 22;
2268
2269 if (adap->params.rev > 0) {
2270 if (tids <= 16 * 1024) {
2271 timers = 1;
2272 timers_shift = 16;
2273 } else if (tids <= 64 * 1024) {
2274 timers = 2;
2275 timers_shift = 18;
2276 } else if (tids <= 256 * 1024) {
2277 timers = 3;
2278 timers_shift = 20;
2279 }
2280 }
2281
2282 t3_write_reg(adap, A_TP_PMM_SIZE,
2283 p->chan_rx_size | (p->chan_tx_size >> 16));
2284
2285 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2286 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2287 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2288 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2289 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2290
2291 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2292 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2293 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2294
2295 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2296 /* Add a bit of headroom and make multiple of 24 */
2297 pstructs += 48;
2298 pstructs -= pstructs % 24;
2299 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2300
2301 m = tids * TCB_SIZE;
2302 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2303 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2304 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2305 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2306 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2307 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2308 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2309 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2310
2311 m = (m + 4095) & ~0xfff;
2312 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2313 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2314
2315 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2316 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2317 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2318 if (tids < m)
2319 adap->params.mc5.nservers += m - tids;
2320}
2321
2322static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2323 u32 val)
2324{
2325 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2326 t3_write_reg(adap, A_TP_PIO_DATA, val);
2327}
2328
2329static void tp_config(struct adapter *adap, const struct tp_params *p)
2330{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002331 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2332 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2333 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2334 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2335 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2336 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2337 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2338 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2339 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2340 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2341 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2342 F_IPV6ENABLE | F_NICMODE);
2343 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2344 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2345 t3_set_reg_field(adap, A_TP_PARA_REG6,
2346 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2347 0);
2348
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002349 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2350 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2351 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2352 F_RXCONGESTIONMODE);
2353 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002354
2355 if (adap->params.rev > 0) {
2356 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2357 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2358 F_TXPACEAUTO);
2359 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2360 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2361 } else
2362 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2363
2364 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2365 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2366 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2367}
2368
2369/* Desired TP timer resolution in usec */
2370#define TP_TMR_RES 50
2371
2372/* TCP timer values in ms */
2373#define TP_DACK_TIMER 50
2374#define TP_RTO_MIN 250
2375
2376/**
2377 * tp_set_timers - set TP timing parameters
2378 * @adap: the adapter to set
2379 * @core_clk: the core clock frequency in Hz
2380 *
2381 * Set TP's timing parameters, such as the various timer resolutions and
2382 * the TCP timer values.
2383 */
2384static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2385{
2386 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2387 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2388 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2389 unsigned int tps = core_clk >> tre;
2390
2391 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2392 V_DELAYEDACKRESOLUTION(dack_re) |
2393 V_TIMESTAMPRESOLUTION(tstamp_re));
2394 t3_write_reg(adap, A_TP_DACK_TIMER,
2395 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2396 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2397 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2398 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2399 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2400 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2401 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2402 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2403 V_KEEPALIVEMAX(9));
2404
2405#define SECONDS * tps
2406
2407 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2408 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2409 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2410 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2411 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2412 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2413 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2414 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2415 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2416
2417#undef SECONDS
2418}
2419
2420/**
2421 * t3_tp_set_coalescing_size - set receive coalescing size
2422 * @adap: the adapter
2423 * @size: the receive coalescing size
2424 * @psh: whether a set PSH bit should deliver coalesced data
2425 *
2426 * Set the receive coalescing size and PSH bit handling.
2427 */
2428int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2429{
2430 u32 val;
2431
2432 if (size > MAX_RX_COALESCING_LEN)
2433 return -EINVAL;
2434
2435 val = t3_read_reg(adap, A_TP_PARA_REG3);
2436 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2437
2438 if (size) {
2439 val |= F_RXCOALESCEENABLE;
2440 if (psh)
2441 val |= F_RXCOALESCEPSHEN;
2442 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2443 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2444 }
2445 t3_write_reg(adap, A_TP_PARA_REG3, val);
2446 return 0;
2447}
2448
2449/**
2450 * t3_tp_set_max_rxsize - set the max receive size
2451 * @adap: the adapter
2452 * @size: the max receive size
2453 *
2454 * Set TP's max receive size. This is the limit that applies when
2455 * receive coalescing is disabled.
2456 */
2457void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2458{
2459 t3_write_reg(adap, A_TP_PARA_REG7,
2460 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2461}
2462
2463static void __devinit init_mtus(unsigned short mtus[])
2464{
2465 /*
2466 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2467 * it can accomodate max size TCP/IP headers when SACK and timestamps
2468 * are enabled and still have at least 8 bytes of payload.
2469 */
2470 mtus[0] = 88;
2471 mtus[1] = 256;
2472 mtus[2] = 512;
2473 mtus[3] = 576;
2474 mtus[4] = 808;
2475 mtus[5] = 1024;
2476 mtus[6] = 1280;
2477 mtus[7] = 1492;
2478 mtus[8] = 1500;
2479 mtus[9] = 2002;
2480 mtus[10] = 2048;
2481 mtus[11] = 4096;
2482 mtus[12] = 4352;
2483 mtus[13] = 8192;
2484 mtus[14] = 9000;
2485 mtus[15] = 9600;
2486}
2487
2488/*
2489 * Initial congestion control parameters.
2490 */
2491static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2492{
2493 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2494 a[9] = 2;
2495 a[10] = 3;
2496 a[11] = 4;
2497 a[12] = 5;
2498 a[13] = 6;
2499 a[14] = 7;
2500 a[15] = 8;
2501 a[16] = 9;
2502 a[17] = 10;
2503 a[18] = 14;
2504 a[19] = 17;
2505 a[20] = 21;
2506 a[21] = 25;
2507 a[22] = 30;
2508 a[23] = 35;
2509 a[24] = 45;
2510 a[25] = 60;
2511 a[26] = 80;
2512 a[27] = 100;
2513 a[28] = 200;
2514 a[29] = 300;
2515 a[30] = 400;
2516 a[31] = 500;
2517
2518 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2519 b[9] = b[10] = 1;
2520 b[11] = b[12] = 2;
2521 b[13] = b[14] = b[15] = b[16] = 3;
2522 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2523 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2524 b[28] = b[29] = 6;
2525 b[30] = b[31] = 7;
2526}
2527
2528/* The minimum additive increment value for the congestion control table */
2529#define CC_MIN_INCR 2U
2530
2531/**
2532 * t3_load_mtus - write the MTU and congestion control HW tables
2533 * @adap: the adapter
2534 * @mtus: the unrestricted values for the MTU table
2535 * @alphs: the values for the congestion control alpha parameter
2536 * @beta: the values for the congestion control beta parameter
2537 * @mtu_cap: the maximum permitted effective MTU
2538 *
2539 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2540 * Update the high-speed congestion control table with the supplied alpha,
2541 * beta, and MTUs.
2542 */
2543void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2544 unsigned short alpha[NCCTRL_WIN],
2545 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2546{
2547 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2548 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2549 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2550 28672, 40960, 57344, 81920, 114688, 163840, 229376
2551 };
2552
2553 unsigned int i, w;
2554
2555 for (i = 0; i < NMTUS; ++i) {
2556 unsigned int mtu = min(mtus[i], mtu_cap);
2557 unsigned int log2 = fls(mtu);
2558
2559 if (!(mtu & ((1 << log2) >> 2))) /* round */
2560 log2--;
2561 t3_write_reg(adap, A_TP_MTU_TABLE,
2562 (i << 24) | (log2 << 16) | mtu);
2563
2564 for (w = 0; w < NCCTRL_WIN; ++w) {
2565 unsigned int inc;
2566
2567 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2568 CC_MIN_INCR);
2569
2570 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2571 (w << 16) | (beta[w] << 13) | inc);
2572 }
2573 }
2574}
2575
2576/**
2577 * t3_read_hw_mtus - returns the values in the HW MTU table
2578 * @adap: the adapter
2579 * @mtus: where to store the HW MTU values
2580 *
2581 * Reads the HW MTU table.
2582 */
2583void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2584{
2585 int i;
2586
2587 for (i = 0; i < NMTUS; ++i) {
2588 unsigned int val;
2589
2590 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2591 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2592 mtus[i] = val & 0x3fff;
2593 }
2594}
2595
2596/**
2597 * t3_get_cong_cntl_tab - reads the congestion control table
2598 * @adap: the adapter
2599 * @incr: where to store the alpha values
2600 *
2601 * Reads the additive increments programmed into the HW congestion
2602 * control table.
2603 */
2604void t3_get_cong_cntl_tab(struct adapter *adap,
2605 unsigned short incr[NMTUS][NCCTRL_WIN])
2606{
2607 unsigned int mtu, w;
2608
2609 for (mtu = 0; mtu < NMTUS; ++mtu)
2610 for (w = 0; w < NCCTRL_WIN; ++w) {
2611 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2612 0xffff0000 | (mtu << 5) | w);
2613 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2614 0x1fff;
2615 }
2616}
2617
2618/**
2619 * t3_tp_get_mib_stats - read TP's MIB counters
2620 * @adap: the adapter
2621 * @tps: holds the returned counter values
2622 *
2623 * Returns the values of TP's MIB counters.
2624 */
2625void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2626{
2627 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2628 sizeof(*tps) / sizeof(u32), 0);
2629}
2630
2631#define ulp_region(adap, name, start, len) \
2632 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2633 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2634 (start) + (len) - 1); \
2635 start += len
2636
2637#define ulptx_region(adap, name, start, len) \
2638 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2639 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2640 (start) + (len) - 1)
2641
2642static void ulp_config(struct adapter *adap, const struct tp_params *p)
2643{
2644 unsigned int m = p->chan_rx_size;
2645
2646 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2647 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2648 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2649 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2650 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2651 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2652 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2653 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2654}
2655
2656void t3_config_trace_filter(struct adapter *adapter,
2657 const struct trace_params *tp, int filter_index,
2658 int invert, int enable)
2659{
2660 u32 addr, key[4], mask[4];
2661
2662 key[0] = tp->sport | (tp->sip << 16);
2663 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2664 key[2] = tp->dip;
2665 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2666
2667 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2668 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2669 mask[2] = tp->dip_mask;
2670 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2671
2672 if (invert)
2673 key[3] |= (1 << 29);
2674 if (enable)
2675 key[3] |= (1 << 28);
2676
2677 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2678 tp_wr_indirect(adapter, addr++, key[0]);
2679 tp_wr_indirect(adapter, addr++, mask[0]);
2680 tp_wr_indirect(adapter, addr++, key[1]);
2681 tp_wr_indirect(adapter, addr++, mask[1]);
2682 tp_wr_indirect(adapter, addr++, key[2]);
2683 tp_wr_indirect(adapter, addr++, mask[2]);
2684 tp_wr_indirect(adapter, addr++, key[3]);
2685 tp_wr_indirect(adapter, addr, mask[3]);
2686 t3_read_reg(adapter, A_TP_PIO_DATA);
2687}
2688
2689/**
2690 * t3_config_sched - configure a HW traffic scheduler
2691 * @adap: the adapter
2692 * @kbps: target rate in Kbps
2693 * @sched: the scheduler index
2694 *
2695 * Configure a HW scheduler for the target rate
2696 */
2697int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2698{
2699 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2700 unsigned int clk = adap->params.vpd.cclk * 1000;
2701 unsigned int selected_cpt = 0, selected_bpt = 0;
2702
2703 if (kbps > 0) {
2704 kbps *= 125; /* -> bytes */
2705 for (cpt = 1; cpt <= 255; cpt++) {
2706 tps = clk / cpt;
2707 bpt = (kbps + tps / 2) / tps;
2708 if (bpt > 0 && bpt <= 255) {
2709 v = bpt * tps;
2710 delta = v >= kbps ? v - kbps : kbps - v;
2711 if (delta <= mindelta) {
2712 mindelta = delta;
2713 selected_cpt = cpt;
2714 selected_bpt = bpt;
2715 }
2716 } else if (selected_cpt)
2717 break;
2718 }
2719 if (!selected_cpt)
2720 return -EINVAL;
2721 }
2722 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2723 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2724 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2725 if (sched & 1)
2726 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2727 else
2728 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2729 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2730 return 0;
2731}
2732
2733static int tp_init(struct adapter *adap, const struct tp_params *p)
2734{
2735 int busy = 0;
2736
2737 tp_config(adap, p);
2738 t3_set_vlan_accel(adap, 3, 0);
2739
2740 if (is_offload(adap)) {
2741 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2742 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2743 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2744 0, 1000, 5);
2745 if (busy)
2746 CH_ERR(adap, "TP initialization timed out\n");
2747 }
2748
2749 if (!busy)
2750 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2751 return busy;
2752}
2753
2754int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2755{
2756 if (port_mask & ~((1 << adap->params.nports) - 1))
2757 return -EINVAL;
2758 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2759 port_mask << S_PORT0ACTIVE);
2760 return 0;
2761}
2762
2763/*
2764 * Perform the bits of HW initialization that are dependent on the number
2765 * of available ports.
2766 */
2767static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2768{
2769 int i;
2770
2771 if (nports == 1) {
2772 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2773 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2774 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2775 F_PORT0ACTIVE | F_ENFORCEPKT);
2776 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2777 } else {
2778 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2779 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2780 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2781 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2782 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2783 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2784 F_ENFORCEPKT);
2785 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2786 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2787 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2788 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2789 for (i = 0; i < 16; i++)
2790 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2791 (i << 16) | 0x1010);
2792 }
2793}
2794
2795static int calibrate_xgm(struct adapter *adapter)
2796{
2797 if (uses_xaui(adapter)) {
2798 unsigned int v, i;
2799
2800 for (i = 0; i < 5; ++i) {
2801 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2802 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2803 msleep(1);
2804 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2805 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2806 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2807 V_XAUIIMP(G_CALIMP(v) >> 2));
2808 return 0;
2809 }
2810 }
2811 CH_ERR(adapter, "MAC calibration failed\n");
2812 return -1;
2813 } else {
2814 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2815 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2816 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2817 F_XGM_IMPSETUPDATE);
2818 }
2819 return 0;
2820}
2821
2822static void calibrate_xgm_t3b(struct adapter *adapter)
2823{
2824 if (!uses_xaui(adapter)) {
2825 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2826 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2827 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2828 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2829 F_XGM_IMPSETUPDATE);
2830 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2831 0);
2832 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2833 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2834 }
2835}
2836
2837struct mc7_timing_params {
2838 unsigned char ActToPreDly;
2839 unsigned char ActToRdWrDly;
2840 unsigned char PreCyc;
2841 unsigned char RefCyc[5];
2842 unsigned char BkCyc;
2843 unsigned char WrToRdDly;
2844 unsigned char RdToWrDly;
2845};
2846
2847/*
2848 * Write a value to a register and check that the write completed. These
2849 * writes normally complete in a cycle or two, so one read should suffice.
2850 * The very first read exists to flush the posted write to the device.
2851 */
2852static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2853{
2854 t3_write_reg(adapter, addr, val);
2855 t3_read_reg(adapter, addr); /* flush */
2856 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2857 return 0;
2858 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2859 return -EIO;
2860}
2861
2862static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2863{
2864 static const unsigned int mc7_mode[] = {
2865 0x632, 0x642, 0x652, 0x432, 0x442
2866 };
2867 static const struct mc7_timing_params mc7_timings[] = {
2868 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2869 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2870 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2871 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2872 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2873 };
2874
2875 u32 val;
2876 unsigned int width, density, slow, attempts;
2877 struct adapter *adapter = mc7->adapter;
2878 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2879
2880 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2881 slow = val & F_SLOW;
2882 width = G_WIDTH(val);
2883 density = G_DEN(val);
2884
2885 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2886 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2887 msleep(1);
2888
2889 if (!slow) {
2890 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2891 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2892 msleep(1);
2893 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2894 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2895 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2896 mc7->name);
2897 goto out_fail;
2898 }
2899 }
2900
2901 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2902 V_ACTTOPREDLY(p->ActToPreDly) |
2903 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2904 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2905 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2906
2907 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2908 val | F_CLKEN | F_TERM150);
2909 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2910
2911 if (!slow)
2912 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2913 F_DLLENB);
2914 udelay(1);
2915
2916 val = slow ? 3 : 6;
2917 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2918 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2919 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2920 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2921 goto out_fail;
2922
2923 if (!slow) {
2924 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2925 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2926 udelay(5);
2927 }
2928
2929 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2930 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2931 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2932 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2933 mc7_mode[mem_type]) ||
2934 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2935 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2936 goto out_fail;
2937
2938 /* clock value is in KHz */
2939 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2940 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2941
2942 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2943 F_PERREFEN | V_PREREFDIV(mc7_clock));
2944 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2945
2946 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2947 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2948 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2949 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2950 (mc7->size << width) - 1);
2951 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2952 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2953
2954 attempts = 50;
2955 do {
2956 msleep(250);
2957 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2958 } while ((val & F_BUSY) && --attempts);
2959 if (val & F_BUSY) {
2960 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2961 goto out_fail;
2962 }
2963
2964 /* Enable normal memory accesses. */
2965 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2966 return 0;
2967
2968out_fail:
2969 return -1;
2970}
2971
2972static void config_pcie(struct adapter *adap)
2973{
2974 static const u16 ack_lat[4][6] = {
2975 {237, 416, 559, 1071, 2095, 4143},
2976 {128, 217, 289, 545, 1057, 2081},
2977 {73, 118, 154, 282, 538, 1050},
2978 {67, 107, 86, 150, 278, 534}
2979 };
2980 static const u16 rpl_tmr[4][6] = {
2981 {711, 1248, 1677, 3213, 6285, 12429},
2982 {384, 651, 867, 1635, 3171, 6243},
2983 {219, 354, 462, 846, 1614, 3150},
2984 {201, 321, 258, 450, 834, 1602}
2985 };
2986
2987 u16 val;
2988 unsigned int log2_width, pldsize;
2989 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
2990
2991 pci_read_config_word(adap->pdev,
2992 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
2993 &val);
2994 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
2995 pci_read_config_word(adap->pdev,
2996 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
2997 &val);
2998
2999 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3000 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3001 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3002 log2_width = fls(adap->params.pci.width) - 1;
3003 acklat = ack_lat[log2_width][pldsize];
3004 if (val & 1) /* check LOsEnable */
3005 acklat += fst_trn_tx * 4;
3006 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3007
3008 if (adap->params.rev == 0)
3009 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3010 V_T3A_ACKLAT(M_T3A_ACKLAT),
3011 V_T3A_ACKLAT(acklat));
3012 else
3013 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3014 V_ACKLAT(acklat));
3015
3016 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3017 V_REPLAYLMT(rpllmt));
3018
3019 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3020 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3021}
3022
3023/*
3024 * Initialize and configure T3 HW modules. This performs the
3025 * initialization steps that need to be done once after a card is reset.
3026 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3027 *
3028 * fw_params are passed to FW and their value is platform dependent. Only the
3029 * top 8 bits are available for use, the rest must be 0.
3030 */
3031int t3_init_hw(struct adapter *adapter, u32 fw_params)
3032{
3033 int err = -EIO, attempts = 100;
3034 const struct vpd_params *vpd = &adapter->params.vpd;
3035
3036 if (adapter->params.rev > 0)
3037 calibrate_xgm_t3b(adapter);
3038 else if (calibrate_xgm(adapter))
3039 goto out_err;
3040
3041 if (vpd->mclk) {
3042 partition_mem(adapter, &adapter->params.tp);
3043
3044 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3045 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3046 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3047 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3048 adapter->params.mc5.nfilters,
3049 adapter->params.mc5.nroutes))
3050 goto out_err;
3051 }
3052
3053 if (tp_init(adapter, &adapter->params.tp))
3054 goto out_err;
3055
3056 t3_tp_set_coalescing_size(adapter,
3057 min(adapter->params.sge.max_pkt_size,
3058 MAX_RX_COALESCING_LEN), 1);
3059 t3_tp_set_max_rxsize(adapter,
3060 min(adapter->params.sge.max_pkt_size, 16384U));
3061 ulp_config(adapter, &adapter->params.tp);
3062
3063 if (is_pcie(adapter))
3064 config_pcie(adapter);
3065 else
3066 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3067
3068 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3069 init_hw_for_avail_ports(adapter, adapter->params.nports);
3070 t3_sge_init(adapter, &adapter->params.sge);
3071
3072 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3073 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3074 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3075 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3076
3077 do { /* wait for uP to initialize */
3078 msleep(20);
3079 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3080 if (!attempts)
3081 goto out_err;
3082
3083 err = 0;
3084out_err:
3085 return err;
3086}
3087
3088/**
3089 * get_pci_mode - determine a card's PCI mode
3090 * @adapter: the adapter
3091 * @p: where to store the PCI settings
3092 *
3093 * Determines a card's PCI mode and associated parameters, such as speed
3094 * and width.
3095 */
3096static void __devinit get_pci_mode(struct adapter *adapter,
3097 struct pci_params *p)
3098{
3099 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3100 u32 pci_mode, pcie_cap;
3101
3102 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3103 if (pcie_cap) {
3104 u16 val;
3105
3106 p->variant = PCI_VARIANT_PCIE;
3107 p->pcie_cap_addr = pcie_cap;
3108 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3109 &val);
3110 p->width = (val >> 4) & 0x3f;
3111 return;
3112 }
3113
3114 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3115 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3116 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3117 pci_mode = G_PCIXINITPAT(pci_mode);
3118 if (pci_mode == 0)
3119 p->variant = PCI_VARIANT_PCI;
3120 else if (pci_mode < 4)
3121 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3122 else if (pci_mode < 8)
3123 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3124 else
3125 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3126}
3127
3128/**
3129 * init_link_config - initialize a link's SW state
3130 * @lc: structure holding the link state
3131 * @ai: information about the current card
3132 *
3133 * Initializes the SW state maintained for each link, including the link's
3134 * capabilities and default speed/duplex/flow-control/autonegotiation
3135 * settings.
3136 */
3137static void __devinit init_link_config(struct link_config *lc,
3138 unsigned int caps)
3139{
3140 lc->supported = caps;
3141 lc->requested_speed = lc->speed = SPEED_INVALID;
3142 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3143 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3144 if (lc->supported & SUPPORTED_Autoneg) {
3145 lc->advertising = lc->supported;
3146 lc->autoneg = AUTONEG_ENABLE;
3147 lc->requested_fc |= PAUSE_AUTONEG;
3148 } else {
3149 lc->advertising = 0;
3150 lc->autoneg = AUTONEG_DISABLE;
3151 }
3152}
3153
3154/**
3155 * mc7_calc_size - calculate MC7 memory size
3156 * @cfg: the MC7 configuration
3157 *
3158 * Calculates the size of an MC7 memory in bytes from the value of its
3159 * configuration register.
3160 */
3161static unsigned int __devinit mc7_calc_size(u32 cfg)
3162{
3163 unsigned int width = G_WIDTH(cfg);
3164 unsigned int banks = !!(cfg & F_BKS) + 1;
3165 unsigned int org = !!(cfg & F_ORG) + 1;
3166 unsigned int density = G_DEN(cfg);
3167 unsigned int MBs = ((256 << density) * banks) / (org << width);
3168
3169 return MBs << 20;
3170}
3171
3172static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3173 unsigned int base_addr, const char *name)
3174{
3175 u32 cfg;
3176
3177 mc7->adapter = adapter;
3178 mc7->name = name;
3179 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3180 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3181 mc7->size = mc7_calc_size(cfg);
3182 mc7->width = G_WIDTH(cfg);
3183}
3184
3185void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3186{
3187 mac->adapter = adapter;
3188 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3189 mac->nucast = 1;
3190
3191 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3192 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3193 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3194 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3195 F_ENRGMII, 0);
3196 }
3197}
3198
3199void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3200{
3201 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3202
3203 mi1_init(adapter, ai);
3204 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3205 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3206 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3207 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3208
3209 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3210 val |= F_ENRGMII;
3211
3212 /* Enable MAC clocks so we can access the registers */
3213 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3214 t3_read_reg(adapter, A_XGM_PORT_CFG);
3215
3216 val |= F_CLKDIVRESET_;
3217 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3218 t3_read_reg(adapter, A_XGM_PORT_CFG);
3219 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3220 t3_read_reg(adapter, A_XGM_PORT_CFG);
3221}
3222
3223/*
3224 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3225 * ones don't.
3226 */
3227int t3_reset_adapter(struct adapter *adapter)
3228{
3229 int i;
3230 uint16_t devid = 0;
3231
3232 if (is_pcie(adapter))
3233 pci_save_state(adapter->pdev);
3234 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3235
3236 /*
3237 * Delay. Give Some time to device to reset fully.
3238 * XXX The delay time should be modified.
3239 */
3240 for (i = 0; i < 10; i++) {
3241 msleep(50);
3242 pci_read_config_word(adapter->pdev, 0x00, &devid);
3243 if (devid == 0x1425)
3244 break;
3245 }
3246
3247 if (devid != 0x1425)
3248 return -1;
3249
3250 if (is_pcie(adapter))
3251 pci_restore_state(adapter->pdev);
3252 return 0;
3253}
3254
3255/*
3256 * Initialize adapter SW state for the various HW modules, set initial values
3257 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3258 * interface.
3259 */
3260int __devinit t3_prep_adapter(struct adapter *adapter,
3261 const struct adapter_info *ai, int reset)
3262{
3263 int ret;
3264 unsigned int i, j = 0;
3265
3266 get_pci_mode(adapter, &adapter->params.pci);
3267
3268 adapter->params.info = ai;
3269 adapter->params.nports = ai->nports;
3270 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3271 adapter->params.linkpoll_period = 0;
3272 adapter->params.stats_update_period = is_10G(adapter) ?
3273 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3274 adapter->params.pci.vpd_cap_addr =
3275 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3276 ret = get_vpd_params(adapter, &adapter->params.vpd);
3277 if (ret < 0)
3278 return ret;
3279
3280 if (reset && t3_reset_adapter(adapter))
3281 return -1;
3282
3283 t3_sge_prep(adapter, &adapter->params.sge);
3284
3285 if (adapter->params.vpd.mclk) {
3286 struct tp_params *p = &adapter->params.tp;
3287
3288 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3289 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3290 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3291
3292 p->nchan = ai->nports;
3293 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3294 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3295 p->cm_size = t3_mc7_size(&adapter->cm);
3296 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3297 p->chan_tx_size = p->pmtx_size / p->nchan;
3298 p->rx_pg_size = 64 * 1024;
3299 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3300 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3301 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3302 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3303 adapter->params.rev > 0 ? 12 : 6;
3304
3305 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3306 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3307 DEFAULT_NFILTERS : 0;
3308 adapter->params.mc5.nroutes = 0;
3309 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3310
3311 init_mtus(adapter->params.mtus);
3312 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3313 }
3314
3315 early_hw_init(adapter, ai);
3316
3317 for_each_port(adapter, i) {
3318 u8 hw_addr[6];
3319 struct port_info *p = adap2pinfo(adapter, i);
3320
3321 while (!adapter->params.vpd.port_type[j])
3322 ++j;
3323
3324 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3325 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3326 ai->mdio_ops);
3327 mac_prep(&p->mac, adapter, j);
3328 ++j;
3329
3330 /*
3331 * The VPD EEPROM stores the base Ethernet address for the
3332 * card. A port's address is derived from the base by adding
3333 * the port's index to the base's low octet.
3334 */
3335 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3336 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3337
3338 memcpy(adapter->port[i]->dev_addr, hw_addr,
3339 ETH_ALEN);
3340 memcpy(adapter->port[i]->perm_addr, hw_addr,
3341 ETH_ALEN);
3342 init_link_config(&p->link_config, p->port_type->caps);
3343 p->phy.ops->power_down(&p->phy, 1);
3344 if (!(p->port_type->caps & SUPPORTED_IRQ))
3345 adapter->params.linkpoll_period = 10;
3346 }
3347
3348 return 0;
3349}
3350
3351void t3_led_ready(struct adapter *adapter)
3352{
3353 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3354 F_GPIO0_OUT_VAL);
3355}