blob: d7b991b7ed7883688df25d19556c679b6b7212ef [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
Divy Le Rayf2c68792007-01-30 19:44:13 -080037/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700441 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700446 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
Divy Le Ray75758e82007-12-05 10:15:01 -0800450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
Divy Le Ray167cdf52007-08-21 20:49:36 -0700508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
Divy Le Ray167cdf52007-08-21 20:49:36 -0700651 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500652
653 /* Old eeproms didn't have port information */
654 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
655 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
656 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
657 } else {
658 p->port_type[0] = hex2int(vpd.port0_data[0]);
659 p->port_type[1] = hex2int(vpd.port1_data[0]);
660 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
661 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
662 }
663
664 for (i = 0; i < 6; i++)
665 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
666 hex2int(vpd.na_data[2 * i + 1]);
667 return 0;
668}
669
670/* serial flash and firmware constants */
671enum {
672 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
674 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675
676 /* flash command opcodes */
677 SF_PROG_PAGE = 2, /* program page */
678 SF_WR_DISABLE = 4, /* disable writes */
679 SF_RD_STATUS = 5, /* read status register */
680 SF_WR_ENABLE = 6, /* enable writes */
681 SF_RD_DATA_FAST = 0xb, /* read flash */
682 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683
684 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
Divy Le Ray2e283962007-03-18 13:10:06 -0700685 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE = 8 /* at least version and csum */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500687};
688
689/**
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
695 *
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
699 */
700static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
701 u32 *valp)
702{
703 int ret;
704
705 if (!byte_cnt || byte_cnt > 4)
706 return -EINVAL;
707 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
708 return -EBUSY;
709 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
710 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
711 if (!ret)
712 *valp = t3_read_reg(adapter, A_SF_DATA);
713 return ret;
714}
715
716/**
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
722 *
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
726 */
727static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
728 u32 val)
729{
730 if (!byte_cnt || byte_cnt > 4)
731 return -EINVAL;
732 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
733 return -EBUSY;
734 t3_write_reg(adapter, A_SF_DATA, val);
735 t3_write_reg(adapter, A_SF_OP,
736 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
737 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
738}
739
740/**
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
745 *
746 * Wait for a flash operation to complete by polling the status register.
747 */
748static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
749{
750 int ret;
751 u32 status;
752
753 while (1) {
754 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
755 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
756 return ret;
757 if (!(status & 1))
758 return 0;
759 if (--attempts == 0)
760 return -EAGAIN;
761 if (delay)
762 msleep(delay);
763 }
764}
765
766/**
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
773 *
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
777 * natural endianess.
778 */
779int t3_read_flash(struct adapter *adapter, unsigned int addr,
780 unsigned int nwords, u32 *data, int byte_oriented)
781{
782 int ret;
783
784 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
785 return -EINVAL;
786
787 addr = swab32(addr) | SF_RD_DATA_FAST;
788
789 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
790 (ret = sf1_read(adapter, 1, 1, data)) != 0)
791 return ret;
792
793 for (; nwords; nwords--, data++) {
794 ret = sf1_read(adapter, 4, nwords > 1, data);
795 if (ret)
796 return ret;
797 if (byte_oriented)
798 *data = htonl(*data);
799 }
800 return 0;
801}
802
803/**
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
809 *
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
812 */
813static int t3_write_flash(struct adapter *adapter, unsigned int addr,
814 unsigned int n, const u8 *data)
815{
816 int ret;
817 u32 buf[64];
818 unsigned int i, c, left, val, offset = addr & 0xff;
819
820 if (addr + n > SF_SIZE || offset + n > 256)
821 return -EINVAL;
822
823 val = swab32(addr) | SF_PROG_PAGE;
824
825 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
826 (ret = sf1_write(adapter, 4, 1, val)) != 0)
827 return ret;
828
829 for (left = n; left; left -= c) {
830 c = min(left, 4U);
831 for (val = 0, i = 0; i < c; ++i)
832 val = (val << 8) + *data++;
833
834 ret = sf1_write(adapter, c, c != left, val);
835 if (ret)
836 return ret;
837 }
838 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
839 return ret;
840
841 /* Read the page to verify the write succeeded */
842 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
843 if (ret)
844 return ret;
845
846 if (memcmp(data - n, (u8 *) buf + offset, n))
847 return -EIO;
848 return 0;
849}
850
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700851/**
Divy Le Ray47330072007-08-29 19:15:52 -0700852 * t3_get_tp_version - read the tp sram version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700853 * @adapter: the adapter
Divy Le Ray47330072007-08-29 19:15:52 -0700854 * @vers: where to place the version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700855 *
Divy Le Ray47330072007-08-29 19:15:52 -0700856 * Reads the protocol sram version from sram.
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700857 */
Divy Le Ray47330072007-08-29 19:15:52 -0700858int t3_get_tp_version(struct adapter *adapter, u32 *vers)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700859{
860 int ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
868
Divy Le Ray47330072007-08-29 19:15:52 -0700869 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 return 0;
872}
873
874/**
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
878 *
879 * Reads the protocol sram version from flash.
880 */
881int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
882{
883 int ret;
884 u32 vers;
885 unsigned int major, minor;
886
887 if (adapter->params.rev == T3_REV_A)
888 return 0;
889
890 *must_load = 1;
891
892 ret = t3_get_tp_version(adapter, &vers);
893 if (ret)
894 return ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700895
896 major = G_TP_VERSION_MAJOR(vers);
897 minor = G_TP_VERSION_MINOR(vers);
898
899 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
900 return 0;
901
Divy Le Ray47330072007-08-29 19:15:52 -0700902 if (major != TP_VERSION_MAJOR)
903 CH_ERR(adapter, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major, minor,
905 TP_VERSION_MAJOR, TP_VERSION_MINOR);
906 else {
907 *must_load = 0;
908 CH_ERR(adapter, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major, minor,
910 TP_VERSION_MAJOR, TP_VERSION_MINOR);
911 }
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700912 return -EINVAL;
913}
914
915/**
916 * t3_check_tpsram - check if provided protocol SRAM
917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
920 * @size: image size
921 *
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
924 */
925int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
926{
927 u32 csum;
928 unsigned int i;
929 const u32 *p = (const u32 *)tp_sram;
930
931 /* Verify checksum */
932 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
933 csum += ntohl(p[i]);
934 if (csum != 0xffffffff) {
935 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
936 csum);
937 return -EINVAL;
938 }
939
940 return 0;
941}
942
Divy Le Ray4aac3892007-01-30 19:43:45 -0800943enum fw_version_type {
944 FW_VERSION_N3,
945 FW_VERSION_T3
946};
947
Divy Le Ray4d22de32007-01-18 22:04:14 -0500948/**
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
952 *
953 * Reads the FW version from flash.
954 */
955int t3_get_fw_version(struct adapter *adapter, u32 *vers)
956{
957 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
958}
959
960/**
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
Divy Le Raya5a3b462007-09-05 15:58:09 -0700963 * @must_load: set to 1 if loading a new FW image is required
964
Divy Le Ray4d22de32007-01-18 22:04:14 -0500965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
967 */
Divy Le Raya5a3b462007-09-05 15:58:09 -0700968int t3_check_fw_version(struct adapter *adapter, int *must_load)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500969{
970 int ret;
971 u32 vers;
Divy Le Ray4aac3892007-01-30 19:43:45 -0800972 unsigned int type, major, minor;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500973
Divy Le Raya5a3b462007-09-05 15:58:09 -0700974 *must_load = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500975 ret = t3_get_fw_version(adapter, &vers);
976 if (ret)
977 return ret;
978
Divy Le Ray4aac3892007-01-30 19:43:45 -0800979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
982
Divy Le Ray75d86262007-02-25 16:32:37 -0800983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500985 return 0;
986
Divy Le Raya5a3b462007-09-05 15:58:09 -0700987 if (major != FW_VERSION_MAJOR)
988 CH_ERR(adapter, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major, minor,
990 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray273fa902007-11-16 11:22:00 -0800991 else if (minor < FW_VERSION_MINOR) {
Divy Le Raya5a3b462007-09-05 15:58:09 -0700992 *must_load = 0;
Divy Le Ray273fa902007-11-16 11:22:00 -0800993 CH_WARN(adapter, "found old FW minor version(%u.%u), "
Divy Le Raya5a3b462007-09-05 15:58:09 -0700994 "driver compiled for version %u.%u\n", major, minor,
995 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray273fa902007-11-16 11:22:00 -0800996 } else {
997 CH_WARN(adapter, "found newer FW version(%u.%u), "
998 "driver compiled for version %u.%u\n", major, minor,
999 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1000 return 0;
Divy Le Raya5a3b462007-09-05 15:58:09 -07001001 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001002 return -EINVAL;
1003}
1004
1005/**
1006 * t3_flash_erase_sectors - erase a range of flash sectors
1007 * @adapter: the adapter
1008 * @start: the first sector to erase
1009 * @end: the last sector to erase
1010 *
1011 * Erases the sectors in the given range.
1012 */
1013static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1014{
1015 while (start <= end) {
1016 int ret;
1017
1018 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1019 (ret = sf1_write(adapter, 4, 0,
1020 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1021 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1022 return ret;
1023 start++;
1024 }
1025 return 0;
1026}
1027
1028/*
1029 * t3_load_fw - download firmware
1030 * @adapter: the adapter
Divy Le Ray8a9fab22007-05-30 21:10:52 -07001031 * @fw_data: the firmware image to write
Divy Le Ray4d22de32007-01-18 22:04:14 -05001032 * @size: image size
1033 *
1034 * Write the supplied firmware image to the card's serial flash.
1035 * The FW image has the following sections: @size - 8 bytes of code and
1036 * data, followed by 4 bytes of FW version, followed by the 32-bit
1037 * 1's complement checksum of the whole image.
1038 */
1039int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1040{
1041 u32 csum;
1042 unsigned int i;
1043 const u32 *p = (const u32 *)fw_data;
1044 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1045
Divy Le Ray2e283962007-03-18 13:10:06 -07001046 if ((size & 3) || size < FW_MIN_SIZE)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001047 return -EINVAL;
1048 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1049 return -EFBIG;
1050
1051 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1052 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055 csum);
1056 return -EINVAL;
1057 }
1058
1059 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1060 if (ret)
1061 goto out;
1062
1063 size -= 8; /* trim off version and checksum */
1064 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1065 unsigned int chunk_size = min(size, 256U);
1066
1067 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1068 if (ret)
1069 goto out;
1070
1071 addr += chunk_size;
1072 fw_data += chunk_size;
1073 size -= chunk_size;
1074 }
1075
1076 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1077out:
1078 if (ret)
1079 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1080 return ret;
1081}
1082
1083#define CIM_CTL_BASE 0x2000
1084
1085/**
1086 * t3_cim_ctl_blk_read - read a block from CIM control region
1087 *
1088 * @adap: the adapter
1089 * @addr: the start address within the CIM control region
1090 * @n: number of words to read
1091 * @valp: where to store the result
1092 *
1093 * Reads a block of 4-byte words from the CIM control region.
1094 */
1095int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1096 unsigned int n, unsigned int *valp)
1097{
1098 int ret = 0;
1099
1100 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1101 return -EBUSY;
1102
1103 for ( ; !ret && n--; addr += 4) {
1104 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1105 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1106 0, 5, 2);
1107 if (!ret)
1108 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1109 }
1110 return ret;
1111}
1112
1113
1114/**
1115 * t3_link_changed - handle interface link changes
1116 * @adapter: the adapter
1117 * @port_id: the port index that changed link state
1118 *
1119 * Called when a port's link settings change to propagate the new values
1120 * to the associated PHY and MAC. After performing the common tasks it
1121 * invokes an OS-specific handler.
1122 */
1123void t3_link_changed(struct adapter *adapter, int port_id)
1124{
1125 int link_ok, speed, duplex, fc;
1126 struct port_info *pi = adap2pinfo(adapter, port_id);
1127 struct cphy *phy = &pi->phy;
1128 struct cmac *mac = &pi->mac;
1129 struct link_config *lc = &pi->link_config;
1130
1131 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1132
1133 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1134 uses_xaui(adapter)) {
1135 if (link_ok)
1136 t3b_pcs_reset(mac);
1137 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1138 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1139 }
1140 lc->link_ok = link_ok;
1141 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1142 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1143 if (lc->requested_fc & PAUSE_AUTONEG)
1144 fc &= lc->requested_fc;
1145 else
1146 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1147
1148 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1149 /* Set MAC speed, duplex, and flow control to match PHY. */
1150 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1151 lc->fc = fc;
1152 }
1153
1154 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1155}
1156
1157/**
1158 * t3_link_start - apply link configuration to MAC/PHY
1159 * @phy: the PHY to setup
1160 * @mac: the MAC to setup
1161 * @lc: the requested link configuration
1162 *
1163 * Set up a port's MAC and PHY according to a desired link configuration.
1164 * - If the PHY can auto-negotiate first decide what to advertise, then
1165 * enable/disable auto-negotiation as desired, and reset.
1166 * - If the PHY does not auto-negotiate just reset it.
1167 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1168 * otherwise do it later based on the outcome of auto-negotiation.
1169 */
1170int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1171{
1172 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1173
1174 lc->link_ok = 0;
1175 if (lc->supported & SUPPORTED_Autoneg) {
1176 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1177 if (fc) {
1178 lc->advertising |= ADVERTISED_Asym_Pause;
1179 if (fc & PAUSE_RX)
1180 lc->advertising |= ADVERTISED_Pause;
1181 }
1182 phy->ops->advertise(phy, lc->advertising);
1183
1184 if (lc->autoneg == AUTONEG_DISABLE) {
1185 lc->speed = lc->requested_speed;
1186 lc->duplex = lc->requested_duplex;
1187 lc->fc = (unsigned char)fc;
1188 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1189 fc);
1190 /* Also disables autoneg */
1191 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1192 phy->ops->reset(phy, 0);
1193 } else
1194 phy->ops->autoneg_enable(phy);
1195 } else {
1196 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1197 lc->fc = (unsigned char)fc;
1198 phy->ops->reset(phy, 0);
1199 }
1200 return 0;
1201}
1202
1203/**
1204 * t3_set_vlan_accel - control HW VLAN extraction
1205 * @adapter: the adapter
1206 * @ports: bitmap of adapter ports to operate on
1207 * @on: enable (1) or disable (0) HW VLAN extraction
1208 *
1209 * Enables or disables HW extraction of VLAN tags for the given port.
1210 */
1211void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1212{
1213 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1214 ports << S_VLANEXTRACTIONENABLE,
1215 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1216}
1217
1218struct intr_info {
1219 unsigned int mask; /* bits to check in interrupt status */
1220 const char *msg; /* message to print or NULL */
1221 short stat_idx; /* stat counter to increment or -1 */
1222 unsigned short fatal:1; /* whether the condition reported is fatal */
1223};
1224
1225/**
1226 * t3_handle_intr_status - table driven interrupt handler
1227 * @adapter: the adapter that generated the interrupt
1228 * @reg: the interrupt status register to process
1229 * @mask: a mask to apply to the interrupt status
1230 * @acts: table of interrupt actions
1231 * @stats: statistics counters tracking interrupt occurences
1232 *
1233 * A table driven interrupt handler that applies a set of masks to an
1234 * interrupt status word and performs the corresponding actions if the
1235 * interrupts described by the mask have occured. The actions include
1236 * optionally printing a warning or alert message, and optionally
1237 * incrementing a stat counter. The table is terminated by an entry
1238 * specifying mask 0. Returns the number of fatal interrupt conditions.
1239 */
1240static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1241 unsigned int mask,
1242 const struct intr_info *acts,
1243 unsigned long *stats)
1244{
1245 int fatal = 0;
1246 unsigned int status = t3_read_reg(adapter, reg) & mask;
1247
1248 for (; acts->mask; ++acts) {
1249 if (!(status & acts->mask))
1250 continue;
1251 if (acts->fatal) {
1252 fatal++;
1253 CH_ALERT(adapter, "%s (0x%x)\n",
1254 acts->msg, status & acts->mask);
1255 } else if (acts->msg)
1256 CH_WARN(adapter, "%s (0x%x)\n",
1257 acts->msg, status & acts->mask);
1258 if (acts->stat_idx >= 0)
1259 stats[acts->stat_idx]++;
1260 }
1261 if (status) /* clear processed interrupts */
1262 t3_write_reg(adapter, reg, status);
1263 return fatal;
1264}
1265
1266#define SGE_INTR_MASK (F_RSPQDISABLED)
1267#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1268 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1269 F_NFASRCHFAIL)
1270#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1271#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1272 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1273 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1274#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1275 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1276 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1277 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1278 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1279 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1280#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1281 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1282 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1283 V_BISTERR(M_BISTERR) | F_PEXERR)
1284#define ULPRX_INTR_MASK F_PARERR
1285#define ULPTX_INTR_MASK 0
1286#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1287 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1288 F_ZERO_SWITCH_ERROR)
1289#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1290 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1291 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1292 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1293#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1294 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1295 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1296#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1297 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1298 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1299#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1300 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1301 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1302 V_MCAPARERRENB(M_MCAPARERRENB))
1303#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1304 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1305 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1306 F_MPS0 | F_CPL_SWITCH)
1307
1308/*
1309 * Interrupt handler for the PCIX1 module.
1310 */
1311static void pci_intr_handler(struct adapter *adapter)
1312{
1313 static const struct intr_info pcix1_intr_info[] = {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001314 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1315 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1316 {F_RCVTARABT, "PCI received target abort", -1, 1},
1317 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1318 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1319 {F_DETPARERR, "PCI detected parity error", -1, 1},
1320 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1321 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1322 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1323 1},
1324 {F_DETCORECCERR, "PCI correctable ECC error",
1325 STAT_PCI_CORR_ECC, 0},
1326 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1327 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1328 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1329 1},
1330 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1331 1},
1332 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1333 1},
1334 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1335 "error", -1, 1},
1336 {0}
1337 };
1338
1339 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1340 pcix1_intr_info, adapter->irq_stats))
1341 t3_fatal_err(adapter);
1342}
1343
1344/*
1345 * Interrupt handler for the PCIE module.
1346 */
1347static void pcie_intr_handler(struct adapter *adapter)
1348{
1349 static const struct intr_info pcie_intr_info[] = {
Divy Le Rayb5a44bc2007-01-30 19:44:01 -08001350 {F_PEXERR, "PCI PEX error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001351 {F_UNXSPLCPLERRR,
1352 "PCI unexpected split completion DMA read error", -1, 1},
1353 {F_UNXSPLCPLERRC,
1354 "PCI unexpected split completion DMA command error", -1, 1},
1355 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1356 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1357 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1358 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1359 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1360 "PCI MSI-X table/PBA parity error", -1, 1},
1361 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1362 {0}
1363 };
1364
Divy Le Ray3eea3332007-09-05 15:58:15 -07001365 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1366 CH_ALERT(adapter, "PEX error code 0x%x\n",
1367 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1368
Divy Le Ray4d22de32007-01-18 22:04:14 -05001369 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1370 pcie_intr_info, adapter->irq_stats))
1371 t3_fatal_err(adapter);
1372}
1373
1374/*
1375 * TP interrupt handler.
1376 */
1377static void tp_intr_handler(struct adapter *adapter)
1378{
1379 static const struct intr_info tp_intr_info[] = {
1380 {0xffffff, "TP parity error", -1, 1},
1381 {0x1000000, "TP out of Rx pages", -1, 1},
1382 {0x2000000, "TP out of Tx pages", -1, 1},
1383 {0}
1384 };
1385
1386 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1387 tp_intr_info, NULL))
1388 t3_fatal_err(adapter);
1389}
1390
1391/*
1392 * CIM interrupt handler.
1393 */
1394static void cim_intr_handler(struct adapter *adapter)
1395{
1396 static const struct intr_info cim_intr_info[] = {
1397 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1398 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1399 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1400 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1401 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1402 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1403 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1404 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1405 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1406 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1407 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1408 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1409 {0}
1410 };
1411
1412 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1413 cim_intr_info, NULL))
1414 t3_fatal_err(adapter);
1415}
1416
1417/*
1418 * ULP RX interrupt handler.
1419 */
1420static void ulprx_intr_handler(struct adapter *adapter)
1421{
1422 static const struct intr_info ulprx_intr_info[] = {
1423 {F_PARERR, "ULP RX parity error", -1, 1},
1424 {0}
1425 };
1426
1427 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1428 ulprx_intr_info, NULL))
1429 t3_fatal_err(adapter);
1430}
1431
1432/*
1433 * ULP TX interrupt handler.
1434 */
1435static void ulptx_intr_handler(struct adapter *adapter)
1436{
1437 static const struct intr_info ulptx_intr_info[] = {
1438 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1439 STAT_ULP_CH0_PBL_OOB, 0},
1440 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1441 STAT_ULP_CH1_PBL_OOB, 0},
1442 {0}
1443 };
1444
1445 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1446 ulptx_intr_info, adapter->irq_stats))
1447 t3_fatal_err(adapter);
1448}
1449
1450#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1451 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1452 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1453 F_ICSPI1_TX_FRAMING_ERROR)
1454#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1455 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1456 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1457 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1458
1459/*
1460 * PM TX interrupt handler.
1461 */
1462static void pmtx_intr_handler(struct adapter *adapter)
1463{
1464 static const struct intr_info pmtx_intr_info[] = {
1465 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1466 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1467 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1468 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1469 "PMTX ispi parity error", -1, 1},
1470 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1471 "PMTX ospi parity error", -1, 1},
1472 {0}
1473 };
1474
1475 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1476 pmtx_intr_info, NULL))
1477 t3_fatal_err(adapter);
1478}
1479
1480#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1481 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1482 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1483 F_IESPI1_TX_FRAMING_ERROR)
1484#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1485 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1486 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1487 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1488
1489/*
1490 * PM RX interrupt handler.
1491 */
1492static void pmrx_intr_handler(struct adapter *adapter)
1493{
1494 static const struct intr_info pmrx_intr_info[] = {
1495 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1496 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1497 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1498 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1499 "PMRX ispi parity error", -1, 1},
1500 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1501 "PMRX ospi parity error", -1, 1},
1502 {0}
1503 };
1504
1505 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1506 pmrx_intr_info, NULL))
1507 t3_fatal_err(adapter);
1508}
1509
1510/*
1511 * CPL switch interrupt handler.
1512 */
1513static void cplsw_intr_handler(struct adapter *adapter)
1514{
1515 static const struct intr_info cplsw_intr_info[] = {
1516/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1517 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1518 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1519 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1520 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1521 {0}
1522 };
1523
1524 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1525 cplsw_intr_info, NULL))
1526 t3_fatal_err(adapter);
1527}
1528
1529/*
1530 * MPS interrupt handler.
1531 */
1532static void mps_intr_handler(struct adapter *adapter)
1533{
1534 static const struct intr_info mps_intr_info[] = {
1535 {0x1ff, "MPS parity error", -1, 1},
1536 {0}
1537 };
1538
1539 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1540 mps_intr_info, NULL))
1541 t3_fatal_err(adapter);
1542}
1543
1544#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1545
1546/*
1547 * MC7 interrupt handler.
1548 */
1549static void mc7_intr_handler(struct mc7 *mc7)
1550{
1551 struct adapter *adapter = mc7->adapter;
1552 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1553
1554 if (cause & F_CE) {
1555 mc7->stats.corr_err++;
1556 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1557 "data 0x%x 0x%x 0x%x\n", mc7->name,
1558 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1559 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1560 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1561 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1562 }
1563
1564 if (cause & F_UE) {
1565 mc7->stats.uncorr_err++;
1566 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1567 "data 0x%x 0x%x 0x%x\n", mc7->name,
1568 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1569 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1570 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1571 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1572 }
1573
1574 if (G_PE(cause)) {
1575 mc7->stats.parity_err++;
1576 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1577 mc7->name, G_PE(cause));
1578 }
1579
1580 if (cause & F_AE) {
1581 u32 addr = 0;
1582
1583 if (adapter->params.rev > 0)
1584 addr = t3_read_reg(adapter,
1585 mc7->offset + A_MC7_ERR_ADDR);
1586 mc7->stats.addr_err++;
1587 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1588 mc7->name, addr);
1589 }
1590
1591 if (cause & MC7_INTR_FATAL)
1592 t3_fatal_err(adapter);
1593
1594 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1595}
1596
1597#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1598 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1599/*
1600 * XGMAC interrupt handler.
1601 */
1602static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1603{
1604 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1605 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1606
1607 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1608 mac->stats.tx_fifo_parity_err++;
1609 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1610 }
1611 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1612 mac->stats.rx_fifo_parity_err++;
1613 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1614 }
1615 if (cause & F_TXFIFO_UNDERRUN)
1616 mac->stats.tx_fifo_urun++;
1617 if (cause & F_RXFIFO_OVERFLOW)
1618 mac->stats.rx_fifo_ovfl++;
1619 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1620 mac->stats.serdes_signal_loss++;
1621 if (cause & F_XAUIPCSCTCERR)
1622 mac->stats.xaui_pcs_ctc_err++;
1623 if (cause & F_XAUIPCSALIGNCHANGE)
1624 mac->stats.xaui_pcs_align_change++;
1625
1626 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1627 if (cause & XGM_INTR_FATAL)
1628 t3_fatal_err(adap);
1629 return cause != 0;
1630}
1631
1632/*
1633 * Interrupt handler for PHY events.
1634 */
1635int t3_phy_intr_handler(struct adapter *adapter)
1636{
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001637 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001638 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1639
1640 for_each_port(adapter, i) {
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001641 struct port_info *p = adap2pinfo(adapter, i);
1642
1643 mask = gpi - (gpi & (gpi - 1));
1644 gpi -= mask;
1645
1646 if (!(p->port_type->caps & SUPPORTED_IRQ))
1647 continue;
1648
1649 if (cause & mask) {
1650 int phy_cause = p->phy.ops->intr_handler(&p->phy);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001651
1652 if (phy_cause & cphy_cause_link_change)
1653 t3_link_changed(adapter, i);
1654 if (phy_cause & cphy_cause_fifo_error)
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001655 p->phy.fifo_errors++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001656 }
1657 }
1658
1659 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1660 return 0;
1661}
1662
1663/*
1664 * T3 slow path (non-data) interrupt handler.
1665 */
1666int t3_slow_intr_handler(struct adapter *adapter)
1667{
1668 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1669
1670 cause &= adapter->slow_intr_mask;
1671 if (!cause)
1672 return 0;
1673 if (cause & F_PCIM0) {
1674 if (is_pcie(adapter))
1675 pcie_intr_handler(adapter);
1676 else
1677 pci_intr_handler(adapter);
1678 }
1679 if (cause & F_SGE3)
1680 t3_sge_err_intr_handler(adapter);
1681 if (cause & F_MC7_PMRX)
1682 mc7_intr_handler(&adapter->pmrx);
1683 if (cause & F_MC7_PMTX)
1684 mc7_intr_handler(&adapter->pmtx);
1685 if (cause & F_MC7_CM)
1686 mc7_intr_handler(&adapter->cm);
1687 if (cause & F_CIM)
1688 cim_intr_handler(adapter);
1689 if (cause & F_TP1)
1690 tp_intr_handler(adapter);
1691 if (cause & F_ULP2_RX)
1692 ulprx_intr_handler(adapter);
1693 if (cause & F_ULP2_TX)
1694 ulptx_intr_handler(adapter);
1695 if (cause & F_PM1_RX)
1696 pmrx_intr_handler(adapter);
1697 if (cause & F_PM1_TX)
1698 pmtx_intr_handler(adapter);
1699 if (cause & F_CPL_SWITCH)
1700 cplsw_intr_handler(adapter);
1701 if (cause & F_MPS0)
1702 mps_intr_handler(adapter);
1703 if (cause & F_MC5A)
1704 t3_mc5_intr_handler(&adapter->mc5);
1705 if (cause & F_XGMAC0_0)
1706 mac_intr_handler(adapter, 0);
1707 if (cause & F_XGMAC0_1)
1708 mac_intr_handler(adapter, 1);
1709 if (cause & F_T3DBG)
1710 t3_os_ext_intr_handler(adapter);
1711
1712 /* Clear the interrupts just processed. */
1713 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1714 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1715 return 1;
1716}
1717
1718/**
1719 * t3_intr_enable - enable interrupts
1720 * @adapter: the adapter whose interrupts should be enabled
1721 *
1722 * Enable interrupts by setting the interrupt enable registers of the
1723 * various HW modules and then enabling the top-level interrupt
1724 * concentrator.
1725 */
1726void t3_intr_enable(struct adapter *adapter)
1727{
1728 static const struct addr_val_pair intr_en_avp[] = {
1729 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1730 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1731 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1732 MC7_INTR_MASK},
1733 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1734 MC7_INTR_MASK},
1735 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1736 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1737 {A_TP_INT_ENABLE, 0x3bfffff},
1738 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1739 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1740 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1741 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1742 };
1743
1744 adapter->slow_intr_mask = PL_INTR_MASK;
1745
1746 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1747
1748 if (adapter->params.rev > 0) {
1749 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1750 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1751 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1752 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1753 F_PBL_BOUND_ERR_CH1);
1754 } else {
1755 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1756 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1757 }
1758
1759 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1760 adapter_info(adapter)->gpio_intr);
1761 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1762 adapter_info(adapter)->gpio_intr);
1763 if (is_pcie(adapter))
1764 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1765 else
1766 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1767 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1768 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1769}
1770
1771/**
1772 * t3_intr_disable - disable a card's interrupts
1773 * @adapter: the adapter whose interrupts should be disabled
1774 *
1775 * Disable interrupts. We only disable the top-level interrupt
1776 * concentrator and the SGE data interrupts.
1777 */
1778void t3_intr_disable(struct adapter *adapter)
1779{
1780 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1781 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1782 adapter->slow_intr_mask = 0;
1783}
1784
1785/**
1786 * t3_intr_clear - clear all interrupts
1787 * @adapter: the adapter whose interrupts should be cleared
1788 *
1789 * Clears all interrupts.
1790 */
1791void t3_intr_clear(struct adapter *adapter)
1792{
1793 static const unsigned int cause_reg_addr[] = {
1794 A_SG_INT_CAUSE,
1795 A_SG_RSPQ_FL_STATUS,
1796 A_PCIX_INT_CAUSE,
1797 A_MC7_INT_CAUSE,
1798 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1799 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1800 A_CIM_HOST_INT_CAUSE,
1801 A_TP_INT_CAUSE,
1802 A_MC5_DB_INT_CAUSE,
1803 A_ULPRX_INT_CAUSE,
1804 A_ULPTX_INT_CAUSE,
1805 A_CPL_INTR_CAUSE,
1806 A_PM1_TX_INT_CAUSE,
1807 A_PM1_RX_INT_CAUSE,
1808 A_MPS_INT_CAUSE,
1809 A_T3DBG_INT_CAUSE,
1810 };
1811 unsigned int i;
1812
1813 /* Clear PHY and MAC interrupts for each port. */
1814 for_each_port(adapter, i)
1815 t3_port_intr_clear(adapter, i);
1816
1817 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1818 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1819
Divy Le Ray3eea3332007-09-05 15:58:15 -07001820 if (is_pcie(adapter))
1821 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001822 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1823 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1824}
1825
1826/**
1827 * t3_port_intr_enable - enable port-specific interrupts
1828 * @adapter: associated adapter
1829 * @idx: index of port whose interrupts should be enabled
1830 *
1831 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1832 * adapter port.
1833 */
1834void t3_port_intr_enable(struct adapter *adapter, int idx)
1835{
1836 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1837
1838 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1839 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1840 phy->ops->intr_enable(phy);
1841}
1842
1843/**
1844 * t3_port_intr_disable - disable port-specific interrupts
1845 * @adapter: associated adapter
1846 * @idx: index of port whose interrupts should be disabled
1847 *
1848 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1849 * adapter port.
1850 */
1851void t3_port_intr_disable(struct adapter *adapter, int idx)
1852{
1853 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1854
1855 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1856 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1857 phy->ops->intr_disable(phy);
1858}
1859
1860/**
1861 * t3_port_intr_clear - clear port-specific interrupts
1862 * @adapter: associated adapter
1863 * @idx: index of port whose interrupts to clear
1864 *
1865 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1866 * adapter port.
1867 */
1868void t3_port_intr_clear(struct adapter *adapter, int idx)
1869{
1870 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1871
1872 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1873 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1874 phy->ops->intr_clear(phy);
1875}
1876
Divy Le Raybb9366a2007-09-05 15:58:30 -07001877#define SG_CONTEXT_CMD_ATTEMPTS 100
1878
Divy Le Ray4d22de32007-01-18 22:04:14 -05001879/**
1880 * t3_sge_write_context - write an SGE context
1881 * @adapter: the adapter
1882 * @id: the context id
1883 * @type: the context type
1884 *
1885 * Program an SGE context with the values already loaded in the
1886 * CONTEXT_DATA? registers.
1887 */
1888static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1889 unsigned int type)
1890{
1891 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1892 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1893 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1894 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1895 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1896 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1897 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07001898 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001899}
1900
1901/**
1902 * t3_sge_init_ecntxt - initialize an SGE egress context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @gts_enable: whether to enable GTS for the context
1906 * @type: the egress context type
1907 * @respq: associated response queue
1908 * @base_addr: base address of queue
1909 * @size: number of queue entries
1910 * @token: uP token
1911 * @gen: initial generation value for the context
1912 * @cidx: consumer pointer
1913 *
1914 * Initialize an SGE egress context and make it ready for use. If the
1915 * platform allows concurrent context operations, the caller is
1916 * responsible for appropriate locking.
1917 */
1918int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1919 enum sge_context_type type, int respq, u64 base_addr,
1920 unsigned int size, unsigned int token, int gen,
1921 unsigned int cidx)
1922{
1923 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1924
1925 if (base_addr & 0xfff) /* must be 4K aligned */
1926 return -EINVAL;
1927 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1928 return -EBUSY;
1929
1930 base_addr >>= 12;
1931 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1932 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1933 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1934 V_EC_BASE_LO(base_addr & 0xffff));
1935 base_addr >>= 16;
1936 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1937 base_addr >>= 32;
1938 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1939 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1940 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1941 F_EC_VALID);
1942 return t3_sge_write_context(adapter, id, F_EGRESS);
1943}
1944
1945/**
1946 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1947 * @adapter: the adapter to configure
1948 * @id: the context id
1949 * @gts_enable: whether to enable GTS for the context
1950 * @base_addr: base address of queue
1951 * @size: number of queue entries
1952 * @bsize: size of each buffer for this queue
1953 * @cong_thres: threshold to signal congestion to upstream producers
1954 * @gen: initial generation value for the context
1955 * @cidx: consumer pointer
1956 *
1957 * Initialize an SGE free list context and make it ready for use. The
1958 * caller is responsible for ensuring only one context operation occurs
1959 * at a time.
1960 */
1961int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1962 int gts_enable, u64 base_addr, unsigned int size,
1963 unsigned int bsize, unsigned int cong_thres, int gen,
1964 unsigned int cidx)
1965{
1966 if (base_addr & 0xfff) /* must be 4K aligned */
1967 return -EINVAL;
1968 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1969 return -EBUSY;
1970
1971 base_addr >>= 12;
1972 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1973 base_addr >>= 32;
1974 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1975 V_FL_BASE_HI((u32) base_addr) |
1976 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1977 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1978 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1979 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1980 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1981 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1982 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1983 return t3_sge_write_context(adapter, id, F_FREELIST);
1984}
1985
1986/**
1987 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1988 * @adapter: the adapter to configure
1989 * @id: the context id
1990 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1991 * @base_addr: base address of queue
1992 * @size: number of queue entries
1993 * @fl_thres: threshold for selecting the normal or jumbo free list
1994 * @gen: initial generation value for the context
1995 * @cidx: consumer pointer
1996 *
1997 * Initialize an SGE response queue context and make it ready for use.
1998 * The caller is responsible for ensuring only one context operation
1999 * occurs at a time.
2000 */
2001int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2002 int irq_vec_idx, u64 base_addr, unsigned int size,
2003 unsigned int fl_thres, int gen, unsigned int cidx)
2004{
2005 unsigned int intr = 0;
2006
2007 if (base_addr & 0xfff) /* must be 4K aligned */
2008 return -EINVAL;
2009 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2010 return -EBUSY;
2011
2012 base_addr >>= 12;
2013 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2014 V_CQ_INDEX(cidx));
2015 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2016 base_addr >>= 32;
2017 if (irq_vec_idx >= 0)
2018 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2019 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2020 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2021 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2022 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2023}
2024
2025/**
2026 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2027 * @adapter: the adapter to configure
2028 * @id: the context id
2029 * @base_addr: base address of queue
2030 * @size: number of queue entries
2031 * @rspq: response queue for async notifications
2032 * @ovfl_mode: CQ overflow mode
2033 * @credits: completion queue credits
2034 * @credit_thres: the credit threshold
2035 *
2036 * Initialize an SGE completion queue context and make it ready for use.
2037 * The caller is responsible for ensuring only one context operation
2038 * occurs at a time.
2039 */
2040int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2041 unsigned int size, int rspq, int ovfl_mode,
2042 unsigned int credits, unsigned int credit_thres)
2043{
2044 if (base_addr & 0xfff) /* must be 4K aligned */
2045 return -EINVAL;
2046 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2047 return -EBUSY;
2048
2049 base_addr >>= 12;
2050 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2051 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2052 base_addr >>= 32;
2053 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2054 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
Divy Le Ray1c17ae82007-09-05 15:58:25 -07002055 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2056 V_CQ_ERR(ovfl_mode));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002057 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2058 V_CQ_CREDIT_THRES(credit_thres));
2059 return t3_sge_write_context(adapter, id, F_CQ);
2060}
2061
2062/**
2063 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2064 * @adapter: the adapter
2065 * @id: the egress context id
2066 * @enable: enable (1) or disable (0) the context
2067 *
2068 * Enable or disable an SGE egress context. The caller is responsible for
2069 * ensuring only one context operation occurs at a time.
2070 */
2071int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2072{
2073 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2074 return -EBUSY;
2075
2076 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2077 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2078 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2079 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2081 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2082 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2083 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002084 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002085}
2086
2087/**
2088 * t3_sge_disable_fl - disable an SGE free-buffer list
2089 * @adapter: the adapter
2090 * @id: the free list context id
2091 *
2092 * Disable an SGE free-buffer list. The caller is responsible for
2093 * ensuring only one context operation occurs at a time.
2094 */
2095int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2096{
2097 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2098 return -EBUSY;
2099
2100 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2101 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2102 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2103 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2104 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2105 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2106 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2107 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002108 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002109}
2110
2111/**
2112 * t3_sge_disable_rspcntxt - disable an SGE response queue
2113 * @adapter: the adapter
2114 * @id: the response queue context id
2115 *
2116 * Disable an SGE response queue. The caller is responsible for
2117 * ensuring only one context operation occurs at a time.
2118 */
2119int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2120{
2121 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2122 return -EBUSY;
2123
2124 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2125 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2126 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2127 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2128 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2129 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2130 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2131 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002132 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002133}
2134
2135/**
2136 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2137 * @adapter: the adapter
2138 * @id: the completion queue context id
2139 *
2140 * Disable an SGE completion queue. The caller is responsible for
2141 * ensuring only one context operation occurs at a time.
2142 */
2143int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2144{
2145 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2146 return -EBUSY;
2147
2148 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2152 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2153 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2154 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2155 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002156 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002157}
2158
2159/**
2160 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2161 * @adapter: the adapter
2162 * @id: the context id
2163 * @op: the operation to perform
2164 *
2165 * Perform the selected operation on an SGE completion queue context.
2166 * The caller is responsible for ensuring only one context operation
2167 * occurs at a time.
2168 */
2169int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2170 unsigned int credits)
2171{
2172 u32 val;
2173
2174 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2175 return -EBUSY;
2176
2177 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2178 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2179 V_CONTEXT(id) | F_CQ);
2180 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002181 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002182 return -EIO;
2183
2184 if (op >= 2 && op < 7) {
2185 if (adapter->params.rev > 0)
2186 return G_CQ_INDEX(val);
2187
2188 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2189 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2190 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002191 F_CONTEXT_CMD_BUSY, 0,
2192 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002193 return -EIO;
2194 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2195 }
2196 return 0;
2197}
2198
2199/**
2200 * t3_sge_read_context - read an SGE context
2201 * @type: the context type
2202 * @adapter: the adapter
2203 * @id: the context id
2204 * @data: holds the retrieved context
2205 *
2206 * Read an SGE egress context. The caller is responsible for ensuring
2207 * only one context operation occurs at a time.
2208 */
2209static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2210 unsigned int id, u32 data[4])
2211{
2212 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2213 return -EBUSY;
2214
2215 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2216 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2217 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002218 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002219 return -EIO;
2220 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2221 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2222 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2223 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2224 return 0;
2225}
2226
2227/**
2228 * t3_sge_read_ecntxt - read an SGE egress context
2229 * @adapter: the adapter
2230 * @id: the context id
2231 * @data: holds the retrieved context
2232 *
2233 * Read an SGE egress context. The caller is responsible for ensuring
2234 * only one context operation occurs at a time.
2235 */
2236int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2237{
2238 if (id >= 65536)
2239 return -EINVAL;
2240 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2241}
2242
2243/**
2244 * t3_sge_read_cq - read an SGE CQ context
2245 * @adapter: the adapter
2246 * @id: the context id
2247 * @data: holds the retrieved context
2248 *
2249 * Read an SGE CQ context. The caller is responsible for ensuring
2250 * only one context operation occurs at a time.
2251 */
2252int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2253{
2254 if (id >= 65536)
2255 return -EINVAL;
2256 return t3_sge_read_context(F_CQ, adapter, id, data);
2257}
2258
2259/**
2260 * t3_sge_read_fl - read an SGE free-list context
2261 * @adapter: the adapter
2262 * @id: the context id
2263 * @data: holds the retrieved context
2264 *
2265 * Read an SGE free-list context. The caller is responsible for ensuring
2266 * only one context operation occurs at a time.
2267 */
2268int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2269{
2270 if (id >= SGE_QSETS * 2)
2271 return -EINVAL;
2272 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2273}
2274
2275/**
2276 * t3_sge_read_rspq - read an SGE response queue context
2277 * @adapter: the adapter
2278 * @id: the context id
2279 * @data: holds the retrieved context
2280 *
2281 * Read an SGE response queue context. The caller is responsible for
2282 * ensuring only one context operation occurs at a time.
2283 */
2284int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2285{
2286 if (id >= SGE_QSETS)
2287 return -EINVAL;
2288 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2289}
2290
2291/**
2292 * t3_config_rss - configure Rx packet steering
2293 * @adapter: the adapter
2294 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2295 * @cpus: values for the CPU lookup table (0xff terminated)
2296 * @rspq: values for the response queue lookup table (0xffff terminated)
2297 *
2298 * Programs the receive packet steering logic. @cpus and @rspq provide
2299 * the values for the CPU and response queue lookup tables. If they
2300 * provide fewer values than the size of the tables the supplied values
2301 * are used repeatedly until the tables are fully populated.
2302 */
2303void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2304 const u8 * cpus, const u16 *rspq)
2305{
2306 int i, j, cpu_idx = 0, q_idx = 0;
2307
2308 if (cpus)
2309 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2310 u32 val = i << 16;
2311
2312 for (j = 0; j < 2; ++j) {
2313 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2314 if (cpus[cpu_idx] == 0xff)
2315 cpu_idx = 0;
2316 }
2317 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2318 }
2319
2320 if (rspq)
2321 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2322 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2323 (i << 16) | rspq[q_idx++]);
2324 if (rspq[q_idx] == 0xffff)
2325 q_idx = 0;
2326 }
2327
2328 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2329}
2330
2331/**
2332 * t3_read_rss - read the contents of the RSS tables
2333 * @adapter: the adapter
2334 * @lkup: holds the contents of the RSS lookup table
2335 * @map: holds the contents of the RSS map table
2336 *
2337 * Reads the contents of the receive packet steering tables.
2338 */
2339int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2340{
2341 int i;
2342 u32 val;
2343
2344 if (lkup)
2345 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2346 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2347 0xffff0000 | i);
2348 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2349 if (!(val & 0x80000000))
2350 return -EAGAIN;
2351 *lkup++ = val;
2352 *lkup++ = (val >> 8);
2353 }
2354
2355 if (map)
2356 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2357 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2358 0xffff0000 | i);
2359 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2360 if (!(val & 0x80000000))
2361 return -EAGAIN;
2362 *map++ = val;
2363 }
2364 return 0;
2365}
2366
2367/**
2368 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2369 * @adap: the adapter
2370 * @enable: 1 to select offload mode, 0 for regular NIC
2371 *
2372 * Switches TP to NIC/offload mode.
2373 */
2374void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2375{
2376 if (is_offload(adap) || !enable)
2377 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2378 V_NICMODE(!enable));
2379}
2380
2381/**
2382 * pm_num_pages - calculate the number of pages of the payload memory
2383 * @mem_size: the size of the payload memory
2384 * @pg_size: the size of each payload memory page
2385 *
2386 * Calculate the number of pages, each of the given size, that fit in a
2387 * memory of the specified size, respecting the HW requirement that the
2388 * number of pages must be a multiple of 24.
2389 */
2390static inline unsigned int pm_num_pages(unsigned int mem_size,
2391 unsigned int pg_size)
2392{
2393 unsigned int n = mem_size / pg_size;
2394
2395 return n - n % 24;
2396}
2397
2398#define mem_region(adap, start, size, reg) \
2399 t3_write_reg((adap), A_ ## reg, (start)); \
2400 start += size
2401
2402/*
2403 * partition_mem - partition memory and configure TP memory settings
2404 * @adap: the adapter
2405 * @p: the TP parameters
2406 *
2407 * Partitions context and payload memory and configures TP's memory
2408 * registers.
2409 */
2410static void partition_mem(struct adapter *adap, const struct tp_params *p)
2411{
2412 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2413 unsigned int timers = 0, timers_shift = 22;
2414
2415 if (adap->params.rev > 0) {
2416 if (tids <= 16 * 1024) {
2417 timers = 1;
2418 timers_shift = 16;
2419 } else if (tids <= 64 * 1024) {
2420 timers = 2;
2421 timers_shift = 18;
2422 } else if (tids <= 256 * 1024) {
2423 timers = 3;
2424 timers_shift = 20;
2425 }
2426 }
2427
2428 t3_write_reg(adap, A_TP_PMM_SIZE,
2429 p->chan_rx_size | (p->chan_tx_size >> 16));
2430
2431 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2432 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2433 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2434 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2435 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2436
2437 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2438 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2439 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2440
2441 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2442 /* Add a bit of headroom and make multiple of 24 */
2443 pstructs += 48;
2444 pstructs -= pstructs % 24;
2445 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2446
2447 m = tids * TCB_SIZE;
2448 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2449 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2450 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2451 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2452 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2453 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2454 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2455 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2456
2457 m = (m + 4095) & ~0xfff;
2458 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2459 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2460
2461 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2462 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2463 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2464 if (tids < m)
2465 adap->params.mc5.nservers += m - tids;
2466}
2467
2468static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2469 u32 val)
2470{
2471 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2472 t3_write_reg(adap, A_TP_PIO_DATA, val);
2473}
2474
2475static void tp_config(struct adapter *adap, const struct tp_params *p)
2476{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002477 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2478 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2479 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2480 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2481 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002482 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002483 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2484 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2485 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2486 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2487 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2488 F_IPV6ENABLE | F_NICMODE);
2489 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2490 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002491 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2492 adap->params.rev > 0 ? F_ENABLEESND :
2493 F_T3A_ENABLEESND);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002494
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002495 t3_set_reg_field(adap, A_TP_PC_CONFIG,
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002496 F_ENABLEEPCMDAFULL,
2497 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2498 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002499 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002500 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2501 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2502
Divy Le Ray4d22de32007-01-18 22:04:14 -05002503 if (adap->params.rev > 0) {
2504 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2505 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2506 F_TXPACEAUTO);
2507 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2508 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2509 } else
2510 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2511
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002512 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2513 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2514 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2515 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002516}
2517
2518/* Desired TP timer resolution in usec */
2519#define TP_TMR_RES 50
2520
2521/* TCP timer values in ms */
2522#define TP_DACK_TIMER 50
2523#define TP_RTO_MIN 250
2524
2525/**
2526 * tp_set_timers - set TP timing parameters
2527 * @adap: the adapter to set
2528 * @core_clk: the core clock frequency in Hz
2529 *
2530 * Set TP's timing parameters, such as the various timer resolutions and
2531 * the TCP timer values.
2532 */
2533static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2534{
2535 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2536 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2537 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2538 unsigned int tps = core_clk >> tre;
2539
2540 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2541 V_DELAYEDACKRESOLUTION(dack_re) |
2542 V_TIMESTAMPRESOLUTION(tstamp_re));
2543 t3_write_reg(adap, A_TP_DACK_TIMER,
2544 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2545 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2546 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2547 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2548 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2549 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2550 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2551 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2552 V_KEEPALIVEMAX(9));
2553
2554#define SECONDS * tps
2555
2556 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2557 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2558 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2559 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2560 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2561 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2562 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2563 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2564 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2565
2566#undef SECONDS
2567}
2568
2569/**
2570 * t3_tp_set_coalescing_size - set receive coalescing size
2571 * @adap: the adapter
2572 * @size: the receive coalescing size
2573 * @psh: whether a set PSH bit should deliver coalesced data
2574 *
2575 * Set the receive coalescing size and PSH bit handling.
2576 */
2577int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2578{
2579 u32 val;
2580
2581 if (size > MAX_RX_COALESCING_LEN)
2582 return -EINVAL;
2583
2584 val = t3_read_reg(adap, A_TP_PARA_REG3);
2585 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2586
2587 if (size) {
2588 val |= F_RXCOALESCEENABLE;
2589 if (psh)
2590 val |= F_RXCOALESCEPSHEN;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002591 size = min(MAX_RX_COALESCING_LEN, size);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002592 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2593 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2594 }
2595 t3_write_reg(adap, A_TP_PARA_REG3, val);
2596 return 0;
2597}
2598
2599/**
2600 * t3_tp_set_max_rxsize - set the max receive size
2601 * @adap: the adapter
2602 * @size: the max receive size
2603 *
2604 * Set TP's max receive size. This is the limit that applies when
2605 * receive coalescing is disabled.
2606 */
2607void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2608{
2609 t3_write_reg(adap, A_TP_PARA_REG7,
2610 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2611}
2612
2613static void __devinit init_mtus(unsigned short mtus[])
2614{
2615 /*
2616 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2617 * it can accomodate max size TCP/IP headers when SACK and timestamps
2618 * are enabled and still have at least 8 bytes of payload.
2619 */
Divy Le Ray75758e82007-12-05 10:15:01 -08002620 mtus[0] = 88;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002621 mtus[1] = 88;
2622 mtus[2] = 256;
2623 mtus[3] = 512;
2624 mtus[4] = 576;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002625 mtus[5] = 1024;
2626 mtus[6] = 1280;
2627 mtus[7] = 1492;
2628 mtus[8] = 1500;
2629 mtus[9] = 2002;
2630 mtus[10] = 2048;
2631 mtus[11] = 4096;
2632 mtus[12] = 4352;
2633 mtus[13] = 8192;
2634 mtus[14] = 9000;
2635 mtus[15] = 9600;
2636}
2637
2638/*
2639 * Initial congestion control parameters.
2640 */
2641static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2642{
2643 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2644 a[9] = 2;
2645 a[10] = 3;
2646 a[11] = 4;
2647 a[12] = 5;
2648 a[13] = 6;
2649 a[14] = 7;
2650 a[15] = 8;
2651 a[16] = 9;
2652 a[17] = 10;
2653 a[18] = 14;
2654 a[19] = 17;
2655 a[20] = 21;
2656 a[21] = 25;
2657 a[22] = 30;
2658 a[23] = 35;
2659 a[24] = 45;
2660 a[25] = 60;
2661 a[26] = 80;
2662 a[27] = 100;
2663 a[28] = 200;
2664 a[29] = 300;
2665 a[30] = 400;
2666 a[31] = 500;
2667
2668 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2669 b[9] = b[10] = 1;
2670 b[11] = b[12] = 2;
2671 b[13] = b[14] = b[15] = b[16] = 3;
2672 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2673 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2674 b[28] = b[29] = 6;
2675 b[30] = b[31] = 7;
2676}
2677
2678/* The minimum additive increment value for the congestion control table */
2679#define CC_MIN_INCR 2U
2680
2681/**
2682 * t3_load_mtus - write the MTU and congestion control HW tables
2683 * @adap: the adapter
2684 * @mtus: the unrestricted values for the MTU table
2685 * @alphs: the values for the congestion control alpha parameter
2686 * @beta: the values for the congestion control beta parameter
2687 * @mtu_cap: the maximum permitted effective MTU
2688 *
2689 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2690 * Update the high-speed congestion control table with the supplied alpha,
2691 * beta, and MTUs.
2692 */
2693void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2694 unsigned short alpha[NCCTRL_WIN],
2695 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2696{
2697 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2698 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2699 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2700 28672, 40960, 57344, 81920, 114688, 163840, 229376
2701 };
2702
2703 unsigned int i, w;
2704
2705 for (i = 0; i < NMTUS; ++i) {
2706 unsigned int mtu = min(mtus[i], mtu_cap);
2707 unsigned int log2 = fls(mtu);
2708
2709 if (!(mtu & ((1 << log2) >> 2))) /* round */
2710 log2--;
2711 t3_write_reg(adap, A_TP_MTU_TABLE,
2712 (i << 24) | (log2 << 16) | mtu);
2713
2714 for (w = 0; w < NCCTRL_WIN; ++w) {
2715 unsigned int inc;
2716
2717 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2718 CC_MIN_INCR);
2719
2720 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2721 (w << 16) | (beta[w] << 13) | inc);
2722 }
2723 }
2724}
2725
2726/**
2727 * t3_read_hw_mtus - returns the values in the HW MTU table
2728 * @adap: the adapter
2729 * @mtus: where to store the HW MTU values
2730 *
2731 * Reads the HW MTU table.
2732 */
2733void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2734{
2735 int i;
2736
2737 for (i = 0; i < NMTUS; ++i) {
2738 unsigned int val;
2739
2740 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2741 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2742 mtus[i] = val & 0x3fff;
2743 }
2744}
2745
2746/**
2747 * t3_get_cong_cntl_tab - reads the congestion control table
2748 * @adap: the adapter
2749 * @incr: where to store the alpha values
2750 *
2751 * Reads the additive increments programmed into the HW congestion
2752 * control table.
2753 */
2754void t3_get_cong_cntl_tab(struct adapter *adap,
2755 unsigned short incr[NMTUS][NCCTRL_WIN])
2756{
2757 unsigned int mtu, w;
2758
2759 for (mtu = 0; mtu < NMTUS; ++mtu)
2760 for (w = 0; w < NCCTRL_WIN; ++w) {
2761 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2762 0xffff0000 | (mtu << 5) | w);
2763 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2764 0x1fff;
2765 }
2766}
2767
2768/**
2769 * t3_tp_get_mib_stats - read TP's MIB counters
2770 * @adap: the adapter
2771 * @tps: holds the returned counter values
2772 *
2773 * Returns the values of TP's MIB counters.
2774 */
2775void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2776{
2777 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2778 sizeof(*tps) / sizeof(u32), 0);
2779}
2780
2781#define ulp_region(adap, name, start, len) \
2782 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2783 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2784 (start) + (len) - 1); \
2785 start += len
2786
2787#define ulptx_region(adap, name, start, len) \
2788 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2789 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2790 (start) + (len) - 1)
2791
2792static void ulp_config(struct adapter *adap, const struct tp_params *p)
2793{
2794 unsigned int m = p->chan_rx_size;
2795
2796 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2797 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2798 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2799 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2800 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2801 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2802 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2803 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2804}
2805
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002806/**
2807 * t3_set_proto_sram - set the contents of the protocol sram
2808 * @adapter: the adapter
2809 * @data: the protocol image
2810 *
2811 * Write the contents of the protocol SRAM.
2812 */
2813int t3_set_proto_sram(struct adapter *adap, u8 *data)
2814{
2815 int i;
2816 u32 *buf = (u32 *)data;
2817
2818 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2819 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2820 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2821 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2822 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2823 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2824
2825 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2826 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2827 return -EIO;
2828 }
2829 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2830
2831 return 0;
2832}
2833
Divy Le Ray4d22de32007-01-18 22:04:14 -05002834void t3_config_trace_filter(struct adapter *adapter,
2835 const struct trace_params *tp, int filter_index,
2836 int invert, int enable)
2837{
2838 u32 addr, key[4], mask[4];
2839
2840 key[0] = tp->sport | (tp->sip << 16);
2841 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2842 key[2] = tp->dip;
2843 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2844
2845 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2846 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2847 mask[2] = tp->dip_mask;
2848 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2849
2850 if (invert)
2851 key[3] |= (1 << 29);
2852 if (enable)
2853 key[3] |= (1 << 28);
2854
2855 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2856 tp_wr_indirect(adapter, addr++, key[0]);
2857 tp_wr_indirect(adapter, addr++, mask[0]);
2858 tp_wr_indirect(adapter, addr++, key[1]);
2859 tp_wr_indirect(adapter, addr++, mask[1]);
2860 tp_wr_indirect(adapter, addr++, key[2]);
2861 tp_wr_indirect(adapter, addr++, mask[2]);
2862 tp_wr_indirect(adapter, addr++, key[3]);
2863 tp_wr_indirect(adapter, addr, mask[3]);
2864 t3_read_reg(adapter, A_TP_PIO_DATA);
2865}
2866
2867/**
2868 * t3_config_sched - configure a HW traffic scheduler
2869 * @adap: the adapter
2870 * @kbps: target rate in Kbps
2871 * @sched: the scheduler index
2872 *
2873 * Configure a HW scheduler for the target rate
2874 */
2875int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2876{
2877 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2878 unsigned int clk = adap->params.vpd.cclk * 1000;
2879 unsigned int selected_cpt = 0, selected_bpt = 0;
2880
2881 if (kbps > 0) {
2882 kbps *= 125; /* -> bytes */
2883 for (cpt = 1; cpt <= 255; cpt++) {
2884 tps = clk / cpt;
2885 bpt = (kbps + tps / 2) / tps;
2886 if (bpt > 0 && bpt <= 255) {
2887 v = bpt * tps;
2888 delta = v >= kbps ? v - kbps : kbps - v;
2889 if (delta <= mindelta) {
2890 mindelta = delta;
2891 selected_cpt = cpt;
2892 selected_bpt = bpt;
2893 }
2894 } else if (selected_cpt)
2895 break;
2896 }
2897 if (!selected_cpt)
2898 return -EINVAL;
2899 }
2900 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2901 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2902 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2903 if (sched & 1)
2904 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2905 else
2906 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2907 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2908 return 0;
2909}
2910
2911static int tp_init(struct adapter *adap, const struct tp_params *p)
2912{
2913 int busy = 0;
2914
2915 tp_config(adap, p);
2916 t3_set_vlan_accel(adap, 3, 0);
2917
2918 if (is_offload(adap)) {
2919 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2920 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2921 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2922 0, 1000, 5);
2923 if (busy)
2924 CH_ERR(adap, "TP initialization timed out\n");
2925 }
2926
2927 if (!busy)
2928 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2929 return busy;
2930}
2931
2932int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2933{
2934 if (port_mask & ~((1 << adap->params.nports) - 1))
2935 return -EINVAL;
2936 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2937 port_mask << S_PORT0ACTIVE);
2938 return 0;
2939}
2940
2941/*
2942 * Perform the bits of HW initialization that are dependent on the number
2943 * of available ports.
2944 */
2945static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2946{
2947 int i;
2948
2949 if (nports == 1) {
2950 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2951 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2952 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2953 F_PORT0ACTIVE | F_ENFORCEPKT);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002954 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002955 } else {
2956 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2957 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2958 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2959 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2960 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2961 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2962 F_ENFORCEPKT);
2963 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2964 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2965 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2966 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2967 for (i = 0; i < 16; i++)
2968 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2969 (i << 16) | 0x1010);
2970 }
2971}
2972
2973static int calibrate_xgm(struct adapter *adapter)
2974{
2975 if (uses_xaui(adapter)) {
2976 unsigned int v, i;
2977
2978 for (i = 0; i < 5; ++i) {
2979 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2980 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2981 msleep(1);
2982 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2983 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2984 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2985 V_XAUIIMP(G_CALIMP(v) >> 2));
2986 return 0;
2987 }
2988 }
2989 CH_ERR(adapter, "MAC calibration failed\n");
2990 return -1;
2991 } else {
2992 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2993 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2994 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2995 F_XGM_IMPSETUPDATE);
2996 }
2997 return 0;
2998}
2999
3000static void calibrate_xgm_t3b(struct adapter *adapter)
3001{
3002 if (!uses_xaui(adapter)) {
3003 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3004 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3005 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3006 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3007 F_XGM_IMPSETUPDATE);
3008 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3009 0);
3010 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3011 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3012 }
3013}
3014
3015struct mc7_timing_params {
3016 unsigned char ActToPreDly;
3017 unsigned char ActToRdWrDly;
3018 unsigned char PreCyc;
3019 unsigned char RefCyc[5];
3020 unsigned char BkCyc;
3021 unsigned char WrToRdDly;
3022 unsigned char RdToWrDly;
3023};
3024
3025/*
3026 * Write a value to a register and check that the write completed. These
3027 * writes normally complete in a cycle or two, so one read should suffice.
3028 * The very first read exists to flush the posted write to the device.
3029 */
3030static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3031{
3032 t3_write_reg(adapter, addr, val);
3033 t3_read_reg(adapter, addr); /* flush */
3034 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3035 return 0;
3036 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3037 return -EIO;
3038}
3039
3040static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3041{
3042 static const unsigned int mc7_mode[] = {
3043 0x632, 0x642, 0x652, 0x432, 0x442
3044 };
3045 static const struct mc7_timing_params mc7_timings[] = {
3046 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3047 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3048 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3049 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3050 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3051 };
3052
3053 u32 val;
3054 unsigned int width, density, slow, attempts;
3055 struct adapter *adapter = mc7->adapter;
3056 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3057
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003058 if (!mc7->size)
3059 return 0;
3060
Divy Le Ray4d22de32007-01-18 22:04:14 -05003061 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3062 slow = val & F_SLOW;
3063 width = G_WIDTH(val);
3064 density = G_DEN(val);
3065
3066 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3067 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3068 msleep(1);
3069
3070 if (!slow) {
3071 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3072 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3073 msleep(1);
3074 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3075 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3076 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3077 mc7->name);
3078 goto out_fail;
3079 }
3080 }
3081
3082 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3083 V_ACTTOPREDLY(p->ActToPreDly) |
3084 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3085 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3086 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3087
3088 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3089 val | F_CLKEN | F_TERM150);
3090 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3091
3092 if (!slow)
3093 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3094 F_DLLENB);
3095 udelay(1);
3096
3097 val = slow ? 3 : 6;
3098 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3099 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3100 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3101 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3102 goto out_fail;
3103
3104 if (!slow) {
3105 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3106 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3107 udelay(5);
3108 }
3109
3110 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3111 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3112 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3113 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3114 mc7_mode[mem_type]) ||
3115 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3116 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3117 goto out_fail;
3118
3119 /* clock value is in KHz */
3120 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3121 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3122
3123 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3124 F_PERREFEN | V_PREREFDIV(mc7_clock));
3125 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3126
3127 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3128 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3129 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3130 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3131 (mc7->size << width) - 1);
3132 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3133 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3134
3135 attempts = 50;
3136 do {
3137 msleep(250);
3138 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3139 } while ((val & F_BUSY) && --attempts);
3140 if (val & F_BUSY) {
3141 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3142 goto out_fail;
3143 }
3144
3145 /* Enable normal memory accesses. */
3146 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3147 return 0;
3148
3149out_fail:
3150 return -1;
3151}
3152
3153static void config_pcie(struct adapter *adap)
3154{
3155 static const u16 ack_lat[4][6] = {
3156 {237, 416, 559, 1071, 2095, 4143},
3157 {128, 217, 289, 545, 1057, 2081},
3158 {73, 118, 154, 282, 538, 1050},
3159 {67, 107, 86, 150, 278, 534}
3160 };
3161 static const u16 rpl_tmr[4][6] = {
3162 {711, 1248, 1677, 3213, 6285, 12429},
3163 {384, 651, 867, 1635, 3171, 6243},
3164 {219, 354, 462, 846, 1614, 3150},
3165 {201, 321, 258, 450, 834, 1602}
3166 };
3167
3168 u16 val;
3169 unsigned int log2_width, pldsize;
3170 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3171
3172 pci_read_config_word(adap->pdev,
3173 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3174 &val);
3175 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3176 pci_read_config_word(adap->pdev,
3177 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3178 &val);
3179
3180 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3181 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3182 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3183 log2_width = fls(adap->params.pci.width) - 1;
3184 acklat = ack_lat[log2_width][pldsize];
3185 if (val & 1) /* check LOsEnable */
3186 acklat += fst_trn_tx * 4;
3187 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3188
3189 if (adap->params.rev == 0)
3190 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3191 V_T3A_ACKLAT(M_T3A_ACKLAT),
3192 V_T3A_ACKLAT(acklat));
3193 else
3194 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3195 V_ACKLAT(acklat));
3196
3197 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3198 V_REPLAYLMT(rpllmt));
3199
3200 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3201 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3202}
3203
3204/*
3205 * Initialize and configure T3 HW modules. This performs the
3206 * initialization steps that need to be done once after a card is reset.
3207 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3208 *
3209 * fw_params are passed to FW and their value is platform dependent. Only the
3210 * top 8 bits are available for use, the rest must be 0.
3211 */
3212int t3_init_hw(struct adapter *adapter, u32 fw_params)
3213{
3214 int err = -EIO, attempts = 100;
3215 const struct vpd_params *vpd = &adapter->params.vpd;
3216
3217 if (adapter->params.rev > 0)
3218 calibrate_xgm_t3b(adapter);
3219 else if (calibrate_xgm(adapter))
3220 goto out_err;
3221
3222 if (vpd->mclk) {
3223 partition_mem(adapter, &adapter->params.tp);
3224
3225 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3226 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3227 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3228 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3229 adapter->params.mc5.nfilters,
3230 adapter->params.mc5.nroutes))
3231 goto out_err;
3232 }
3233
3234 if (tp_init(adapter, &adapter->params.tp))
3235 goto out_err;
3236
3237 t3_tp_set_coalescing_size(adapter,
3238 min(adapter->params.sge.max_pkt_size,
3239 MAX_RX_COALESCING_LEN), 1);
3240 t3_tp_set_max_rxsize(adapter,
3241 min(adapter->params.sge.max_pkt_size, 16384U));
3242 ulp_config(adapter, &adapter->params.tp);
3243
3244 if (is_pcie(adapter))
3245 config_pcie(adapter);
3246 else
3247 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3248
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003249 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
Divy Le Ray3f61e422007-08-21 20:49:41 -07003250 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3251 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003252 init_hw_for_avail_ports(adapter, adapter->params.nports);
3253 t3_sge_init(adapter, &adapter->params.sge);
3254
3255 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3256 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3257 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3258 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3259
3260 do { /* wait for uP to initialize */
3261 msleep(20);
3262 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003263 if (!attempts) {
3264 CH_ERR(adapter, "uP initialization timed out\n");
Divy Le Ray4d22de32007-01-18 22:04:14 -05003265 goto out_err;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003266 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003267
3268 err = 0;
3269out_err:
3270 return err;
3271}
3272
3273/**
3274 * get_pci_mode - determine a card's PCI mode
3275 * @adapter: the adapter
3276 * @p: where to store the PCI settings
3277 *
3278 * Determines a card's PCI mode and associated parameters, such as speed
3279 * and width.
3280 */
3281static void __devinit get_pci_mode(struct adapter *adapter,
3282 struct pci_params *p)
3283{
3284 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3285 u32 pci_mode, pcie_cap;
3286
3287 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3288 if (pcie_cap) {
3289 u16 val;
3290
3291 p->variant = PCI_VARIANT_PCIE;
3292 p->pcie_cap_addr = pcie_cap;
3293 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3294 &val);
3295 p->width = (val >> 4) & 0x3f;
3296 return;
3297 }
3298
3299 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3300 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3301 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3302 pci_mode = G_PCIXINITPAT(pci_mode);
3303 if (pci_mode == 0)
3304 p->variant = PCI_VARIANT_PCI;
3305 else if (pci_mode < 4)
3306 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3307 else if (pci_mode < 8)
3308 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3309 else
3310 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3311}
3312
3313/**
3314 * init_link_config - initialize a link's SW state
3315 * @lc: structure holding the link state
3316 * @ai: information about the current card
3317 *
3318 * Initializes the SW state maintained for each link, including the link's
3319 * capabilities and default speed/duplex/flow-control/autonegotiation
3320 * settings.
3321 */
3322static void __devinit init_link_config(struct link_config *lc,
3323 unsigned int caps)
3324{
3325 lc->supported = caps;
3326 lc->requested_speed = lc->speed = SPEED_INVALID;
3327 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3328 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3329 if (lc->supported & SUPPORTED_Autoneg) {
3330 lc->advertising = lc->supported;
3331 lc->autoneg = AUTONEG_ENABLE;
3332 lc->requested_fc |= PAUSE_AUTONEG;
3333 } else {
3334 lc->advertising = 0;
3335 lc->autoneg = AUTONEG_DISABLE;
3336 }
3337}
3338
3339/**
3340 * mc7_calc_size - calculate MC7 memory size
3341 * @cfg: the MC7 configuration
3342 *
3343 * Calculates the size of an MC7 memory in bytes from the value of its
3344 * configuration register.
3345 */
3346static unsigned int __devinit mc7_calc_size(u32 cfg)
3347{
3348 unsigned int width = G_WIDTH(cfg);
3349 unsigned int banks = !!(cfg & F_BKS) + 1;
3350 unsigned int org = !!(cfg & F_ORG) + 1;
3351 unsigned int density = G_DEN(cfg);
3352 unsigned int MBs = ((256 << density) * banks) / (org << width);
3353
3354 return MBs << 20;
3355}
3356
3357static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3358 unsigned int base_addr, const char *name)
3359{
3360 u32 cfg;
3361
3362 mc7->adapter = adapter;
3363 mc7->name = name;
3364 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3365 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003366 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003367 mc7->width = G_WIDTH(cfg);
3368}
3369
3370void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3371{
3372 mac->adapter = adapter;
3373 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3374 mac->nucast = 1;
3375
3376 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3377 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3378 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3379 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3380 F_ENRGMII, 0);
3381 }
3382}
3383
3384void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3385{
3386 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3387
3388 mi1_init(adapter, ai);
3389 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3390 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3391 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3392 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003393 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003394
3395 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3396 val |= F_ENRGMII;
3397
3398 /* Enable MAC clocks so we can access the registers */
3399 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3400 t3_read_reg(adapter, A_XGM_PORT_CFG);
3401
3402 val |= F_CLKDIVRESET_;
3403 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3404 t3_read_reg(adapter, A_XGM_PORT_CFG);
3405 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3406 t3_read_reg(adapter, A_XGM_PORT_CFG);
3407}
3408
3409/*
Divy Le Raye4d08352007-03-18 13:10:17 -07003410 * Reset the adapter.
3411 * Older PCIe cards lose their config space during reset, PCI-X
Divy Le Ray4d22de32007-01-18 22:04:14 -05003412 * ones don't.
3413 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07003414static int t3_reset_adapter(struct adapter *adapter)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003415{
Divy Le Raye4d08352007-03-18 13:10:17 -07003416 int i, save_and_restore_pcie =
3417 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003418 uint16_t devid = 0;
3419
Divy Le Raye4d08352007-03-18 13:10:17 -07003420 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003421 pci_save_state(adapter->pdev);
3422 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3423
3424 /*
3425 * Delay. Give Some time to device to reset fully.
3426 * XXX The delay time should be modified.
3427 */
3428 for (i = 0; i < 10; i++) {
3429 msleep(50);
3430 pci_read_config_word(adapter->pdev, 0x00, &devid);
3431 if (devid == 0x1425)
3432 break;
3433 }
3434
3435 if (devid != 0x1425)
3436 return -1;
3437
Divy Le Raye4d08352007-03-18 13:10:17 -07003438 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003439 pci_restore_state(adapter->pdev);
3440 return 0;
3441}
3442
3443/*
3444 * Initialize adapter SW state for the various HW modules, set initial values
3445 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3446 * interface.
3447 */
3448int __devinit t3_prep_adapter(struct adapter *adapter,
3449 const struct adapter_info *ai, int reset)
3450{
3451 int ret;
3452 unsigned int i, j = 0;
3453
3454 get_pci_mode(adapter, &adapter->params.pci);
3455
3456 adapter->params.info = ai;
3457 adapter->params.nports = ai->nports;
3458 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3459 adapter->params.linkpoll_period = 0;
3460 adapter->params.stats_update_period = is_10G(adapter) ?
3461 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3462 adapter->params.pci.vpd_cap_addr =
3463 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3464 ret = get_vpd_params(adapter, &adapter->params.vpd);
3465 if (ret < 0)
3466 return ret;
3467
3468 if (reset && t3_reset_adapter(adapter))
3469 return -1;
3470
3471 t3_sge_prep(adapter, &adapter->params.sge);
3472
3473 if (adapter->params.vpd.mclk) {
3474 struct tp_params *p = &adapter->params.tp;
3475
3476 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3477 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3478 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3479
3480 p->nchan = ai->nports;
3481 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3482 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3483 p->cm_size = t3_mc7_size(&adapter->cm);
3484 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3485 p->chan_tx_size = p->pmtx_size / p->nchan;
3486 p->rx_pg_size = 64 * 1024;
3487 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3488 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3489 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3490 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3491 adapter->params.rev > 0 ? 12 : 6;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003492 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003493
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003494 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3495 t3_mc7_size(&adapter->pmtx) &&
3496 t3_mc7_size(&adapter->cm);
3497
3498 if (is_offload(adapter)) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05003499 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3500 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3501 DEFAULT_NFILTERS : 0;
3502 adapter->params.mc5.nroutes = 0;
3503 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3504
3505 init_mtus(adapter->params.mtus);
3506 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3507 }
3508
3509 early_hw_init(adapter, ai);
3510
3511 for_each_port(adapter, i) {
3512 u8 hw_addr[6];
3513 struct port_info *p = adap2pinfo(adapter, i);
3514
3515 while (!adapter->params.vpd.port_type[j])
3516 ++j;
3517
3518 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3519 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3520 ai->mdio_ops);
3521 mac_prep(&p->mac, adapter, j);
3522 ++j;
3523
3524 /*
3525 * The VPD EEPROM stores the base Ethernet address for the
3526 * card. A port's address is derived from the base by adding
3527 * the port's index to the base's low octet.
3528 */
3529 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3530 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3531
3532 memcpy(adapter->port[i]->dev_addr, hw_addr,
3533 ETH_ALEN);
3534 memcpy(adapter->port[i]->perm_addr, hw_addr,
3535 ETH_ALEN);
3536 init_link_config(&p->link_config, p->port_type->caps);
3537 p->phy.ops->power_down(&p->phy, 1);
3538 if (!(p->port_type->caps & SUPPORTED_IRQ))
3539 adapter->params.linkpoll_period = 10;
3540 }
3541
3542 return 0;
3543}
3544
3545void t3_led_ready(struct adapter *adapter)
3546{
3547 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3548 F_GPIO0_OUT_VAL);
3549}