blob: dd3149d94ba8ed7115e9bd5149d7a26afe5b09f9 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
Divy Le Rayf2c68792007-01-30 19:44:13 -080037/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700441 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700446 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
Divy Le Ray2e283962007-03-18 13:10:06 -0700684 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
685 FW_MIN_SIZE = 8 /* at least version and csum */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500686};
687
688/**
689 * sf1_read - read data from the serial flash
690 * @adapter: the adapter
691 * @byte_cnt: number of bytes to read
692 * @cont: whether another operation will be chained
693 * @valp: where to store the read data
694 *
695 * Reads up to 4 bytes of data from the serial flash. The location of
696 * the read needs to be specified prior to calling this by issuing the
697 * appropriate commands to the serial flash.
698 */
699static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
700 u32 *valp)
701{
702 int ret;
703
704 if (!byte_cnt || byte_cnt > 4)
705 return -EINVAL;
706 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
707 return -EBUSY;
708 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
709 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
710 if (!ret)
711 *valp = t3_read_reg(adapter, A_SF_DATA);
712 return ret;
713}
714
715/**
716 * sf1_write - write data to the serial flash
717 * @adapter: the adapter
718 * @byte_cnt: number of bytes to write
719 * @cont: whether another operation will be chained
720 * @val: value to write
721 *
722 * Writes up to 4 bytes of data to the serial flash. The location of
723 * the write needs to be specified prior to calling this by issuing the
724 * appropriate commands to the serial flash.
725 */
726static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
727 u32 val)
728{
729 if (!byte_cnt || byte_cnt > 4)
730 return -EINVAL;
731 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
732 return -EBUSY;
733 t3_write_reg(adapter, A_SF_DATA, val);
734 t3_write_reg(adapter, A_SF_OP,
735 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
736 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
737}
738
739/**
740 * flash_wait_op - wait for a flash operation to complete
741 * @adapter: the adapter
742 * @attempts: max number of polls of the status register
743 * @delay: delay between polls in ms
744 *
745 * Wait for a flash operation to complete by polling the status register.
746 */
747static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
748{
749 int ret;
750 u32 status;
751
752 while (1) {
753 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
754 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
755 return ret;
756 if (!(status & 1))
757 return 0;
758 if (--attempts == 0)
759 return -EAGAIN;
760 if (delay)
761 msleep(delay);
762 }
763}
764
765/**
766 * t3_read_flash - read words from serial flash
767 * @adapter: the adapter
768 * @addr: the start address for the read
769 * @nwords: how many 32-bit words to read
770 * @data: where to store the read data
771 * @byte_oriented: whether to store data as bytes or as words
772 *
773 * Read the specified number of 32-bit words from the serial flash.
774 * If @byte_oriented is set the read data is stored as a byte array
775 * (i.e., big-endian), otherwise as 32-bit words in the platform's
776 * natural endianess.
777 */
778int t3_read_flash(struct adapter *adapter, unsigned int addr,
779 unsigned int nwords, u32 *data, int byte_oriented)
780{
781 int ret;
782
783 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
784 return -EINVAL;
785
786 addr = swab32(addr) | SF_RD_DATA_FAST;
787
788 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
789 (ret = sf1_read(adapter, 1, 1, data)) != 0)
790 return ret;
791
792 for (; nwords; nwords--, data++) {
793 ret = sf1_read(adapter, 4, nwords > 1, data);
794 if (ret)
795 return ret;
796 if (byte_oriented)
797 *data = htonl(*data);
798 }
799 return 0;
800}
801
802/**
803 * t3_write_flash - write up to a page of data to the serial flash
804 * @adapter: the adapter
805 * @addr: the start address to write
806 * @n: length of data to write
807 * @data: the data to write
808 *
809 * Writes up to a page of data (256 bytes) to the serial flash starting
810 * at the given address.
811 */
812static int t3_write_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int n, const u8 *data)
814{
815 int ret;
816 u32 buf[64];
817 unsigned int i, c, left, val, offset = addr & 0xff;
818
819 if (addr + n > SF_SIZE || offset + n > 256)
820 return -EINVAL;
821
822 val = swab32(addr) | SF_PROG_PAGE;
823
824 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
825 (ret = sf1_write(adapter, 4, 1, val)) != 0)
826 return ret;
827
828 for (left = n; left; left -= c) {
829 c = min(left, 4U);
830 for (val = 0, i = 0; i < c; ++i)
831 val = (val << 8) + *data++;
832
833 ret = sf1_write(adapter, c, c != left, val);
834 if (ret)
835 return ret;
836 }
837 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
838 return ret;
839
840 /* Read the page to verify the write succeeded */
841 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
842 if (ret)
843 return ret;
844
845 if (memcmp(data - n, (u8 *) buf + offset, n))
846 return -EIO;
847 return 0;
848}
849
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700850/**
851 * t3_check_tpsram_version - read the tp sram version
852 * @adapter: the adapter
853 *
854 * Reads the protocol sram version from serial eeprom.
855 */
856int t3_check_tpsram_version(struct adapter *adapter)
857{
858 int ret;
859 u32 vers;
860 unsigned int major, minor;
861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
868
869 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 major = G_TP_VERSION_MAJOR(vers);
872 minor = G_TP_VERSION_MINOR(vers);
873
874 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
875 return 0;
876
877 return -EINVAL;
878}
879
880/**
881 * t3_check_tpsram - check if provided protocol SRAM
882 * is compatible with this driver
883 * @adapter: the adapter
884 * @tp_sram: the firmware image to write
885 * @size: image size
886 *
887 * Checks if an adapter's tp sram is compatible with the driver.
888 * Returns 0 if the versions are compatible, a negative error otherwise.
889 */
890int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
891{
892 u32 csum;
893 unsigned int i;
894 const u32 *p = (const u32 *)tp_sram;
895
896 /* Verify checksum */
897 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
898 csum += ntohl(p[i]);
899 if (csum != 0xffffffff) {
900 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
901 csum);
902 return -EINVAL;
903 }
904
905 return 0;
906}
907
Divy Le Ray4aac3892007-01-30 19:43:45 -0800908enum fw_version_type {
909 FW_VERSION_N3,
910 FW_VERSION_T3
911};
912
Divy Le Ray4d22de32007-01-18 22:04:14 -0500913/**
914 * t3_get_fw_version - read the firmware version
915 * @adapter: the adapter
916 * @vers: where to place the version
917 *
918 * Reads the FW version from flash.
919 */
920int t3_get_fw_version(struct adapter *adapter, u32 *vers)
921{
922 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
923}
924
925/**
926 * t3_check_fw_version - check if the FW is compatible with this driver
927 * @adapter: the adapter
928 *
929 * Checks if an adapter's FW is compatible with the driver. Returns 0
930 * if the versions are compatible, a negative error otherwise.
931 */
932int t3_check_fw_version(struct adapter *adapter)
933{
934 int ret;
935 u32 vers;
Divy Le Ray4aac3892007-01-30 19:43:45 -0800936 unsigned int type, major, minor;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500937
938 ret = t3_get_fw_version(adapter, &vers);
939 if (ret)
940 return ret;
941
Divy Le Ray4aac3892007-01-30 19:43:45 -0800942 type = G_FW_VERSION_TYPE(vers);
943 major = G_FW_VERSION_MAJOR(vers);
944 minor = G_FW_VERSION_MINOR(vers);
945
Divy Le Ray75d86262007-02-25 16:32:37 -0800946 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
947 minor == FW_VERSION_MINOR)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500948 return 0;
949
Divy Le Ray4aac3892007-01-30 19:43:45 -0800950 CH_ERR(adapter, "found wrong FW version(%u.%u), "
Divy Le Ray75d86262007-02-25 16:32:37 -0800951 "driver needs version %u.%u\n", major, minor,
952 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500953 return -EINVAL;
954}
955
956/**
957 * t3_flash_erase_sectors - erase a range of flash sectors
958 * @adapter: the adapter
959 * @start: the first sector to erase
960 * @end: the last sector to erase
961 *
962 * Erases the sectors in the given range.
963 */
964static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
965{
966 while (start <= end) {
967 int ret;
968
969 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
970 (ret = sf1_write(adapter, 4, 0,
971 SF_ERASE_SECTOR | (start << 8))) != 0 ||
972 (ret = flash_wait_op(adapter, 5, 500)) != 0)
973 return ret;
974 start++;
975 }
976 return 0;
977}
978
979/*
980 * t3_load_fw - download firmware
981 * @adapter: the adapter
Divy Le Ray8a9fab22007-05-30 21:10:52 -0700982 * @fw_data: the firmware image to write
Divy Le Ray4d22de32007-01-18 22:04:14 -0500983 * @size: image size
984 *
985 * Write the supplied firmware image to the card's serial flash.
986 * The FW image has the following sections: @size - 8 bytes of code and
987 * data, followed by 4 bytes of FW version, followed by the 32-bit
988 * 1's complement checksum of the whole image.
989 */
990int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
991{
992 u32 csum;
993 unsigned int i;
994 const u32 *p = (const u32 *)fw_data;
995 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
996
Divy Le Ray2e283962007-03-18 13:10:06 -0700997 if ((size & 3) || size < FW_MIN_SIZE)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500998 return -EINVAL;
999 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1000 return -EFBIG;
1001
1002 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1003 csum += ntohl(p[i]);
1004 if (csum != 0xffffffff) {
1005 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1006 csum);
1007 return -EINVAL;
1008 }
1009
1010 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1011 if (ret)
1012 goto out;
1013
1014 size -= 8; /* trim off version and checksum */
1015 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1016 unsigned int chunk_size = min(size, 256U);
1017
1018 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1019 if (ret)
1020 goto out;
1021
1022 addr += chunk_size;
1023 fw_data += chunk_size;
1024 size -= chunk_size;
1025 }
1026
1027 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1028out:
1029 if (ret)
1030 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1031 return ret;
1032}
1033
1034#define CIM_CTL_BASE 0x2000
1035
1036/**
1037 * t3_cim_ctl_blk_read - read a block from CIM control region
1038 *
1039 * @adap: the adapter
1040 * @addr: the start address within the CIM control region
1041 * @n: number of words to read
1042 * @valp: where to store the result
1043 *
1044 * Reads a block of 4-byte words from the CIM control region.
1045 */
1046int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1047 unsigned int n, unsigned int *valp)
1048{
1049 int ret = 0;
1050
1051 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1052 return -EBUSY;
1053
1054 for ( ; !ret && n--; addr += 4) {
1055 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1056 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1057 0, 5, 2);
1058 if (!ret)
1059 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1060 }
1061 return ret;
1062}
1063
1064
1065/**
1066 * t3_link_changed - handle interface link changes
1067 * @adapter: the adapter
1068 * @port_id: the port index that changed link state
1069 *
1070 * Called when a port's link settings change to propagate the new values
1071 * to the associated PHY and MAC. After performing the common tasks it
1072 * invokes an OS-specific handler.
1073 */
1074void t3_link_changed(struct adapter *adapter, int port_id)
1075{
1076 int link_ok, speed, duplex, fc;
1077 struct port_info *pi = adap2pinfo(adapter, port_id);
1078 struct cphy *phy = &pi->phy;
1079 struct cmac *mac = &pi->mac;
1080 struct link_config *lc = &pi->link_config;
1081
1082 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1083
1084 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1085 uses_xaui(adapter)) {
1086 if (link_ok)
1087 t3b_pcs_reset(mac);
1088 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1089 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1090 }
1091 lc->link_ok = link_ok;
1092 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1093 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1094 if (lc->requested_fc & PAUSE_AUTONEG)
1095 fc &= lc->requested_fc;
1096 else
1097 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1098
1099 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1100 /* Set MAC speed, duplex, and flow control to match PHY. */
1101 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1102 lc->fc = fc;
1103 }
1104
1105 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1106}
1107
1108/**
1109 * t3_link_start - apply link configuration to MAC/PHY
1110 * @phy: the PHY to setup
1111 * @mac: the MAC to setup
1112 * @lc: the requested link configuration
1113 *
1114 * Set up a port's MAC and PHY according to a desired link configuration.
1115 * - If the PHY can auto-negotiate first decide what to advertise, then
1116 * enable/disable auto-negotiation as desired, and reset.
1117 * - If the PHY does not auto-negotiate just reset it.
1118 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1119 * otherwise do it later based on the outcome of auto-negotiation.
1120 */
1121int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1122{
1123 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1124
1125 lc->link_ok = 0;
1126 if (lc->supported & SUPPORTED_Autoneg) {
1127 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1128 if (fc) {
1129 lc->advertising |= ADVERTISED_Asym_Pause;
1130 if (fc & PAUSE_RX)
1131 lc->advertising |= ADVERTISED_Pause;
1132 }
1133 phy->ops->advertise(phy, lc->advertising);
1134
1135 if (lc->autoneg == AUTONEG_DISABLE) {
1136 lc->speed = lc->requested_speed;
1137 lc->duplex = lc->requested_duplex;
1138 lc->fc = (unsigned char)fc;
1139 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1140 fc);
1141 /* Also disables autoneg */
1142 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1143 phy->ops->reset(phy, 0);
1144 } else
1145 phy->ops->autoneg_enable(phy);
1146 } else {
1147 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1148 lc->fc = (unsigned char)fc;
1149 phy->ops->reset(phy, 0);
1150 }
1151 return 0;
1152}
1153
1154/**
1155 * t3_set_vlan_accel - control HW VLAN extraction
1156 * @adapter: the adapter
1157 * @ports: bitmap of adapter ports to operate on
1158 * @on: enable (1) or disable (0) HW VLAN extraction
1159 *
1160 * Enables or disables HW extraction of VLAN tags for the given port.
1161 */
1162void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1163{
1164 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1165 ports << S_VLANEXTRACTIONENABLE,
1166 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1167}
1168
1169struct intr_info {
1170 unsigned int mask; /* bits to check in interrupt status */
1171 const char *msg; /* message to print or NULL */
1172 short stat_idx; /* stat counter to increment or -1 */
1173 unsigned short fatal:1; /* whether the condition reported is fatal */
1174};
1175
1176/**
1177 * t3_handle_intr_status - table driven interrupt handler
1178 * @adapter: the adapter that generated the interrupt
1179 * @reg: the interrupt status register to process
1180 * @mask: a mask to apply to the interrupt status
1181 * @acts: table of interrupt actions
1182 * @stats: statistics counters tracking interrupt occurences
1183 *
1184 * A table driven interrupt handler that applies a set of masks to an
1185 * interrupt status word and performs the corresponding actions if the
1186 * interrupts described by the mask have occured. The actions include
1187 * optionally printing a warning or alert message, and optionally
1188 * incrementing a stat counter. The table is terminated by an entry
1189 * specifying mask 0. Returns the number of fatal interrupt conditions.
1190 */
1191static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1192 unsigned int mask,
1193 const struct intr_info *acts,
1194 unsigned long *stats)
1195{
1196 int fatal = 0;
1197 unsigned int status = t3_read_reg(adapter, reg) & mask;
1198
1199 for (; acts->mask; ++acts) {
1200 if (!(status & acts->mask))
1201 continue;
1202 if (acts->fatal) {
1203 fatal++;
1204 CH_ALERT(adapter, "%s (0x%x)\n",
1205 acts->msg, status & acts->mask);
1206 } else if (acts->msg)
1207 CH_WARN(adapter, "%s (0x%x)\n",
1208 acts->msg, status & acts->mask);
1209 if (acts->stat_idx >= 0)
1210 stats[acts->stat_idx]++;
1211 }
1212 if (status) /* clear processed interrupts */
1213 t3_write_reg(adapter, reg, status);
1214 return fatal;
1215}
1216
1217#define SGE_INTR_MASK (F_RSPQDISABLED)
1218#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1219 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1220 F_NFASRCHFAIL)
1221#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1222#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1223 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1224 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1225#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1226 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1227 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1228 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1229 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1230 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1231#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1232 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1233 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1234 V_BISTERR(M_BISTERR) | F_PEXERR)
1235#define ULPRX_INTR_MASK F_PARERR
1236#define ULPTX_INTR_MASK 0
1237#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1238 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1239 F_ZERO_SWITCH_ERROR)
1240#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1241 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1242 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1243 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1244#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1245 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1246 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1247#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1248 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1249 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1250#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1251 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1252 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1253 V_MCAPARERRENB(M_MCAPARERRENB))
1254#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1255 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1256 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1257 F_MPS0 | F_CPL_SWITCH)
1258
1259/*
1260 * Interrupt handler for the PCIX1 module.
1261 */
1262static void pci_intr_handler(struct adapter *adapter)
1263{
1264 static const struct intr_info pcix1_intr_info[] = {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001265 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1266 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1267 {F_RCVTARABT, "PCI received target abort", -1, 1},
1268 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1269 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1270 {F_DETPARERR, "PCI detected parity error", -1, 1},
1271 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1272 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1273 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1274 1},
1275 {F_DETCORECCERR, "PCI correctable ECC error",
1276 STAT_PCI_CORR_ECC, 0},
1277 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1278 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1279 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1280 1},
1281 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1282 1},
1283 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1284 1},
1285 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1286 "error", -1, 1},
1287 {0}
1288 };
1289
1290 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1291 pcix1_intr_info, adapter->irq_stats))
1292 t3_fatal_err(adapter);
1293}
1294
1295/*
1296 * Interrupt handler for the PCIE module.
1297 */
1298static void pcie_intr_handler(struct adapter *adapter)
1299{
1300 static const struct intr_info pcie_intr_info[] = {
Divy Le Rayb5a44bc2007-01-30 19:44:01 -08001301 {F_PEXERR, "PCI PEX error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001302 {F_UNXSPLCPLERRR,
1303 "PCI unexpected split completion DMA read error", -1, 1},
1304 {F_UNXSPLCPLERRC,
1305 "PCI unexpected split completion DMA command error", -1, 1},
1306 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1307 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1308 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1309 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1310 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1311 "PCI MSI-X table/PBA parity error", -1, 1},
1312 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1313 {0}
1314 };
1315
1316 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1317 pcie_intr_info, adapter->irq_stats))
1318 t3_fatal_err(adapter);
1319}
1320
1321/*
1322 * TP interrupt handler.
1323 */
1324static void tp_intr_handler(struct adapter *adapter)
1325{
1326 static const struct intr_info tp_intr_info[] = {
1327 {0xffffff, "TP parity error", -1, 1},
1328 {0x1000000, "TP out of Rx pages", -1, 1},
1329 {0x2000000, "TP out of Tx pages", -1, 1},
1330 {0}
1331 };
1332
1333 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1334 tp_intr_info, NULL))
1335 t3_fatal_err(adapter);
1336}
1337
1338/*
1339 * CIM interrupt handler.
1340 */
1341static void cim_intr_handler(struct adapter *adapter)
1342{
1343 static const struct intr_info cim_intr_info[] = {
1344 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1345 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1346 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1347 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1348 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1349 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1350 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1351 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1352 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1353 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1354 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1355 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1356 {0}
1357 };
1358
1359 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1360 cim_intr_info, NULL))
1361 t3_fatal_err(adapter);
1362}
1363
1364/*
1365 * ULP RX interrupt handler.
1366 */
1367static void ulprx_intr_handler(struct adapter *adapter)
1368{
1369 static const struct intr_info ulprx_intr_info[] = {
1370 {F_PARERR, "ULP RX parity error", -1, 1},
1371 {0}
1372 };
1373
1374 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1375 ulprx_intr_info, NULL))
1376 t3_fatal_err(adapter);
1377}
1378
1379/*
1380 * ULP TX interrupt handler.
1381 */
1382static void ulptx_intr_handler(struct adapter *adapter)
1383{
1384 static const struct intr_info ulptx_intr_info[] = {
1385 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1386 STAT_ULP_CH0_PBL_OOB, 0},
1387 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1388 STAT_ULP_CH1_PBL_OOB, 0},
1389 {0}
1390 };
1391
1392 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1393 ulptx_intr_info, adapter->irq_stats))
1394 t3_fatal_err(adapter);
1395}
1396
1397#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1398 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1399 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1400 F_ICSPI1_TX_FRAMING_ERROR)
1401#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1402 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1403 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1404 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1405
1406/*
1407 * PM TX interrupt handler.
1408 */
1409static void pmtx_intr_handler(struct adapter *adapter)
1410{
1411 static const struct intr_info pmtx_intr_info[] = {
1412 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1413 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1414 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1415 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1416 "PMTX ispi parity error", -1, 1},
1417 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1418 "PMTX ospi parity error", -1, 1},
1419 {0}
1420 };
1421
1422 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1423 pmtx_intr_info, NULL))
1424 t3_fatal_err(adapter);
1425}
1426
1427#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1428 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1429 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1430 F_IESPI1_TX_FRAMING_ERROR)
1431#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1432 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1433 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1434 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1435
1436/*
1437 * PM RX interrupt handler.
1438 */
1439static void pmrx_intr_handler(struct adapter *adapter)
1440{
1441 static const struct intr_info pmrx_intr_info[] = {
1442 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1443 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1444 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1445 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1446 "PMRX ispi parity error", -1, 1},
1447 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1448 "PMRX ospi parity error", -1, 1},
1449 {0}
1450 };
1451
1452 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1453 pmrx_intr_info, NULL))
1454 t3_fatal_err(adapter);
1455}
1456
1457/*
1458 * CPL switch interrupt handler.
1459 */
1460static void cplsw_intr_handler(struct adapter *adapter)
1461{
1462 static const struct intr_info cplsw_intr_info[] = {
1463/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1464 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1465 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1466 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1467 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1468 {0}
1469 };
1470
1471 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1472 cplsw_intr_info, NULL))
1473 t3_fatal_err(adapter);
1474}
1475
1476/*
1477 * MPS interrupt handler.
1478 */
1479static void mps_intr_handler(struct adapter *adapter)
1480{
1481 static const struct intr_info mps_intr_info[] = {
1482 {0x1ff, "MPS parity error", -1, 1},
1483 {0}
1484 };
1485
1486 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1487 mps_intr_info, NULL))
1488 t3_fatal_err(adapter);
1489}
1490
1491#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1492
1493/*
1494 * MC7 interrupt handler.
1495 */
1496static void mc7_intr_handler(struct mc7 *mc7)
1497{
1498 struct adapter *adapter = mc7->adapter;
1499 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1500
1501 if (cause & F_CE) {
1502 mc7->stats.corr_err++;
1503 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1504 "data 0x%x 0x%x 0x%x\n", mc7->name,
1505 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1506 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1507 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1508 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1509 }
1510
1511 if (cause & F_UE) {
1512 mc7->stats.uncorr_err++;
1513 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1514 "data 0x%x 0x%x 0x%x\n", mc7->name,
1515 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1516 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1517 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1518 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1519 }
1520
1521 if (G_PE(cause)) {
1522 mc7->stats.parity_err++;
1523 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1524 mc7->name, G_PE(cause));
1525 }
1526
1527 if (cause & F_AE) {
1528 u32 addr = 0;
1529
1530 if (adapter->params.rev > 0)
1531 addr = t3_read_reg(adapter,
1532 mc7->offset + A_MC7_ERR_ADDR);
1533 mc7->stats.addr_err++;
1534 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1535 mc7->name, addr);
1536 }
1537
1538 if (cause & MC7_INTR_FATAL)
1539 t3_fatal_err(adapter);
1540
1541 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1542}
1543
1544#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1545 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1546/*
1547 * XGMAC interrupt handler.
1548 */
1549static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1550{
1551 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1552 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1553
1554 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1555 mac->stats.tx_fifo_parity_err++;
1556 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1557 }
1558 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1559 mac->stats.rx_fifo_parity_err++;
1560 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1561 }
1562 if (cause & F_TXFIFO_UNDERRUN)
1563 mac->stats.tx_fifo_urun++;
1564 if (cause & F_RXFIFO_OVERFLOW)
1565 mac->stats.rx_fifo_ovfl++;
1566 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1567 mac->stats.serdes_signal_loss++;
1568 if (cause & F_XAUIPCSCTCERR)
1569 mac->stats.xaui_pcs_ctc_err++;
1570 if (cause & F_XAUIPCSALIGNCHANGE)
1571 mac->stats.xaui_pcs_align_change++;
1572
1573 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1574 if (cause & XGM_INTR_FATAL)
1575 t3_fatal_err(adap);
1576 return cause != 0;
1577}
1578
1579/*
1580 * Interrupt handler for PHY events.
1581 */
1582int t3_phy_intr_handler(struct adapter *adapter)
1583{
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001584 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001585 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1586
1587 for_each_port(adapter, i) {
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001588 struct port_info *p = adap2pinfo(adapter, i);
1589
1590 mask = gpi - (gpi & (gpi - 1));
1591 gpi -= mask;
1592
1593 if (!(p->port_type->caps & SUPPORTED_IRQ))
1594 continue;
1595
1596 if (cause & mask) {
1597 int phy_cause = p->phy.ops->intr_handler(&p->phy);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001598
1599 if (phy_cause & cphy_cause_link_change)
1600 t3_link_changed(adapter, i);
1601 if (phy_cause & cphy_cause_fifo_error)
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001602 p->phy.fifo_errors++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001603 }
1604 }
1605
1606 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1607 return 0;
1608}
1609
1610/*
1611 * T3 slow path (non-data) interrupt handler.
1612 */
1613int t3_slow_intr_handler(struct adapter *adapter)
1614{
1615 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1616
1617 cause &= adapter->slow_intr_mask;
1618 if (!cause)
1619 return 0;
1620 if (cause & F_PCIM0) {
1621 if (is_pcie(adapter))
1622 pcie_intr_handler(adapter);
1623 else
1624 pci_intr_handler(adapter);
1625 }
1626 if (cause & F_SGE3)
1627 t3_sge_err_intr_handler(adapter);
1628 if (cause & F_MC7_PMRX)
1629 mc7_intr_handler(&adapter->pmrx);
1630 if (cause & F_MC7_PMTX)
1631 mc7_intr_handler(&adapter->pmtx);
1632 if (cause & F_MC7_CM)
1633 mc7_intr_handler(&adapter->cm);
1634 if (cause & F_CIM)
1635 cim_intr_handler(adapter);
1636 if (cause & F_TP1)
1637 tp_intr_handler(adapter);
1638 if (cause & F_ULP2_RX)
1639 ulprx_intr_handler(adapter);
1640 if (cause & F_ULP2_TX)
1641 ulptx_intr_handler(adapter);
1642 if (cause & F_PM1_RX)
1643 pmrx_intr_handler(adapter);
1644 if (cause & F_PM1_TX)
1645 pmtx_intr_handler(adapter);
1646 if (cause & F_CPL_SWITCH)
1647 cplsw_intr_handler(adapter);
1648 if (cause & F_MPS0)
1649 mps_intr_handler(adapter);
1650 if (cause & F_MC5A)
1651 t3_mc5_intr_handler(&adapter->mc5);
1652 if (cause & F_XGMAC0_0)
1653 mac_intr_handler(adapter, 0);
1654 if (cause & F_XGMAC0_1)
1655 mac_intr_handler(adapter, 1);
1656 if (cause & F_T3DBG)
1657 t3_os_ext_intr_handler(adapter);
1658
1659 /* Clear the interrupts just processed. */
1660 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1661 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1662 return 1;
1663}
1664
1665/**
1666 * t3_intr_enable - enable interrupts
1667 * @adapter: the adapter whose interrupts should be enabled
1668 *
1669 * Enable interrupts by setting the interrupt enable registers of the
1670 * various HW modules and then enabling the top-level interrupt
1671 * concentrator.
1672 */
1673void t3_intr_enable(struct adapter *adapter)
1674{
1675 static const struct addr_val_pair intr_en_avp[] = {
1676 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1677 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1678 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 MC7_INTR_MASK},
1680 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1681 MC7_INTR_MASK},
1682 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1683 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1684 {A_TP_INT_ENABLE, 0x3bfffff},
1685 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1686 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1687 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1688 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1689 };
1690
1691 adapter->slow_intr_mask = PL_INTR_MASK;
1692
1693 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1694
1695 if (adapter->params.rev > 0) {
1696 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1697 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1698 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1699 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1700 F_PBL_BOUND_ERR_CH1);
1701 } else {
1702 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1703 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1704 }
1705
1706 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1707 adapter_info(adapter)->gpio_intr);
1708 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1709 adapter_info(adapter)->gpio_intr);
1710 if (is_pcie(adapter))
1711 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1712 else
1713 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1714 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1715 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1716}
1717
1718/**
1719 * t3_intr_disable - disable a card's interrupts
1720 * @adapter: the adapter whose interrupts should be disabled
1721 *
1722 * Disable interrupts. We only disable the top-level interrupt
1723 * concentrator and the SGE data interrupts.
1724 */
1725void t3_intr_disable(struct adapter *adapter)
1726{
1727 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1728 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1729 adapter->slow_intr_mask = 0;
1730}
1731
1732/**
1733 * t3_intr_clear - clear all interrupts
1734 * @adapter: the adapter whose interrupts should be cleared
1735 *
1736 * Clears all interrupts.
1737 */
1738void t3_intr_clear(struct adapter *adapter)
1739{
1740 static const unsigned int cause_reg_addr[] = {
1741 A_SG_INT_CAUSE,
1742 A_SG_RSPQ_FL_STATUS,
1743 A_PCIX_INT_CAUSE,
1744 A_MC7_INT_CAUSE,
1745 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1746 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1747 A_CIM_HOST_INT_CAUSE,
1748 A_TP_INT_CAUSE,
1749 A_MC5_DB_INT_CAUSE,
1750 A_ULPRX_INT_CAUSE,
1751 A_ULPTX_INT_CAUSE,
1752 A_CPL_INTR_CAUSE,
1753 A_PM1_TX_INT_CAUSE,
1754 A_PM1_RX_INT_CAUSE,
1755 A_MPS_INT_CAUSE,
1756 A_T3DBG_INT_CAUSE,
1757 };
1758 unsigned int i;
1759
1760 /* Clear PHY and MAC interrupts for each port. */
1761 for_each_port(adapter, i)
1762 t3_port_intr_clear(adapter, i);
1763
1764 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1765 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1766
1767 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1768 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1769}
1770
1771/**
1772 * t3_port_intr_enable - enable port-specific interrupts
1773 * @adapter: associated adapter
1774 * @idx: index of port whose interrupts should be enabled
1775 *
1776 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1777 * adapter port.
1778 */
1779void t3_port_intr_enable(struct adapter *adapter, int idx)
1780{
1781 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1782
1783 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1784 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1785 phy->ops->intr_enable(phy);
1786}
1787
1788/**
1789 * t3_port_intr_disable - disable port-specific interrupts
1790 * @adapter: associated adapter
1791 * @idx: index of port whose interrupts should be disabled
1792 *
1793 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1794 * adapter port.
1795 */
1796void t3_port_intr_disable(struct adapter *adapter, int idx)
1797{
1798 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1799
1800 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1801 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1802 phy->ops->intr_disable(phy);
1803}
1804
1805/**
1806 * t3_port_intr_clear - clear port-specific interrupts
1807 * @adapter: associated adapter
1808 * @idx: index of port whose interrupts to clear
1809 *
1810 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1811 * adapter port.
1812 */
1813void t3_port_intr_clear(struct adapter *adapter, int idx)
1814{
1815 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1816
1817 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1818 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1819 phy->ops->intr_clear(phy);
1820}
1821
1822/**
1823 * t3_sge_write_context - write an SGE context
1824 * @adapter: the adapter
1825 * @id: the context id
1826 * @type: the context type
1827 *
1828 * Program an SGE context with the values already loaded in the
1829 * CONTEXT_DATA? registers.
1830 */
1831static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1832 unsigned int type)
1833{
1834 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1835 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1836 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1837 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1838 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1839 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1840 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1841 0, 5, 1);
1842}
1843
1844/**
1845 * t3_sge_init_ecntxt - initialize an SGE egress context
1846 * @adapter: the adapter to configure
1847 * @id: the context id
1848 * @gts_enable: whether to enable GTS for the context
1849 * @type: the egress context type
1850 * @respq: associated response queue
1851 * @base_addr: base address of queue
1852 * @size: number of queue entries
1853 * @token: uP token
1854 * @gen: initial generation value for the context
1855 * @cidx: consumer pointer
1856 *
1857 * Initialize an SGE egress context and make it ready for use. If the
1858 * platform allows concurrent context operations, the caller is
1859 * responsible for appropriate locking.
1860 */
1861int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1862 enum sge_context_type type, int respq, u64 base_addr,
1863 unsigned int size, unsigned int token, int gen,
1864 unsigned int cidx)
1865{
1866 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1867
1868 if (base_addr & 0xfff) /* must be 4K aligned */
1869 return -EINVAL;
1870 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1871 return -EBUSY;
1872
1873 base_addr >>= 12;
1874 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1875 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1876 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1877 V_EC_BASE_LO(base_addr & 0xffff));
1878 base_addr >>= 16;
1879 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1880 base_addr >>= 32;
1881 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1882 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1883 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1884 F_EC_VALID);
1885 return t3_sge_write_context(adapter, id, F_EGRESS);
1886}
1887
1888/**
1889 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1890 * @adapter: the adapter to configure
1891 * @id: the context id
1892 * @gts_enable: whether to enable GTS for the context
1893 * @base_addr: base address of queue
1894 * @size: number of queue entries
1895 * @bsize: size of each buffer for this queue
1896 * @cong_thres: threshold to signal congestion to upstream producers
1897 * @gen: initial generation value for the context
1898 * @cidx: consumer pointer
1899 *
1900 * Initialize an SGE free list context and make it ready for use. The
1901 * caller is responsible for ensuring only one context operation occurs
1902 * at a time.
1903 */
1904int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1905 int gts_enable, u64 base_addr, unsigned int size,
1906 unsigned int bsize, unsigned int cong_thres, int gen,
1907 unsigned int cidx)
1908{
1909 if (base_addr & 0xfff) /* must be 4K aligned */
1910 return -EINVAL;
1911 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1912 return -EBUSY;
1913
1914 base_addr >>= 12;
1915 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1916 base_addr >>= 32;
1917 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1918 V_FL_BASE_HI((u32) base_addr) |
1919 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1920 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1921 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1922 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1923 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1924 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1925 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1926 return t3_sge_write_context(adapter, id, F_FREELIST);
1927}
1928
1929/**
1930 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1931 * @adapter: the adapter to configure
1932 * @id: the context id
1933 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1934 * @base_addr: base address of queue
1935 * @size: number of queue entries
1936 * @fl_thres: threshold for selecting the normal or jumbo free list
1937 * @gen: initial generation value for the context
1938 * @cidx: consumer pointer
1939 *
1940 * Initialize an SGE response queue context and make it ready for use.
1941 * The caller is responsible for ensuring only one context operation
1942 * occurs at a time.
1943 */
1944int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1945 int irq_vec_idx, u64 base_addr, unsigned int size,
1946 unsigned int fl_thres, int gen, unsigned int cidx)
1947{
1948 unsigned int intr = 0;
1949
1950 if (base_addr & 0xfff) /* must be 4K aligned */
1951 return -EINVAL;
1952 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1953 return -EBUSY;
1954
1955 base_addr >>= 12;
1956 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1957 V_CQ_INDEX(cidx));
1958 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1959 base_addr >>= 32;
1960 if (irq_vec_idx >= 0)
1961 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1962 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1963 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1964 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1965 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1966}
1967
1968/**
1969 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1970 * @adapter: the adapter to configure
1971 * @id: the context id
1972 * @base_addr: base address of queue
1973 * @size: number of queue entries
1974 * @rspq: response queue for async notifications
1975 * @ovfl_mode: CQ overflow mode
1976 * @credits: completion queue credits
1977 * @credit_thres: the credit threshold
1978 *
1979 * Initialize an SGE completion queue context and make it ready for use.
1980 * The caller is responsible for ensuring only one context operation
1981 * occurs at a time.
1982 */
1983int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1984 unsigned int size, int rspq, int ovfl_mode,
1985 unsigned int credits, unsigned int credit_thres)
1986{
1987 if (base_addr & 0xfff) /* must be 4K aligned */
1988 return -EINVAL;
1989 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1990 return -EBUSY;
1991
1992 base_addr >>= 12;
1993 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1994 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1995 base_addr >>= 32;
1996 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1997 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1998 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1999 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2000 V_CQ_CREDIT_THRES(credit_thres));
2001 return t3_sge_write_context(adapter, id, F_CQ);
2002}
2003
2004/**
2005 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2006 * @adapter: the adapter
2007 * @id: the egress context id
2008 * @enable: enable (1) or disable (0) the context
2009 *
2010 * Enable or disable an SGE egress context. The caller is responsible for
2011 * ensuring only one context operation occurs at a time.
2012 */
2013int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2014{
2015 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2016 return -EBUSY;
2017
2018 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2019 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2020 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2021 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2022 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2023 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2024 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2025 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2026 0, 5, 1);
2027}
2028
2029/**
2030 * t3_sge_disable_fl - disable an SGE free-buffer list
2031 * @adapter: the adapter
2032 * @id: the free list context id
2033 *
2034 * Disable an SGE free-buffer list. The caller is responsible for
2035 * ensuring only one context operation occurs at a time.
2036 */
2037int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2038{
2039 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2040 return -EBUSY;
2041
2042 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2043 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2044 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2045 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2046 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2047 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2048 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2049 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2050 0, 5, 1);
2051}
2052
2053/**
2054 * t3_sge_disable_rspcntxt - disable an SGE response queue
2055 * @adapter: the adapter
2056 * @id: the response queue context id
2057 *
2058 * Disable an SGE response queue. The caller is responsible for
2059 * ensuring only one context operation occurs at a time.
2060 */
2061int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2062{
2063 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2064 return -EBUSY;
2065
2066 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2067 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2068 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2069 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2070 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2071 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2072 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2073 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2074 0, 5, 1);
2075}
2076
2077/**
2078 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2079 * @adapter: the adapter
2080 * @id: the completion queue context id
2081 *
2082 * Disable an SGE completion queue. The caller is responsible for
2083 * ensuring only one context operation occurs at a time.
2084 */
2085int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2086{
2087 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2088 return -EBUSY;
2089
2090 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2091 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2092 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2093 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2094 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2095 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2096 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2097 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2098 0, 5, 1);
2099}
2100
2101/**
2102 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @op: the operation to perform
2106 *
2107 * Perform the selected operation on an SGE completion queue context.
2108 * The caller is responsible for ensuring only one context operation
2109 * occurs at a time.
2110 */
2111int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2112 unsigned int credits)
2113{
2114 u32 val;
2115
2116 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2117 return -EBUSY;
2118
2119 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2120 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2121 V_CONTEXT(id) | F_CQ);
2122 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2123 0, 5, 1, &val))
2124 return -EIO;
2125
2126 if (op >= 2 && op < 7) {
2127 if (adapter->params.rev > 0)
2128 return G_CQ_INDEX(val);
2129
2130 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2131 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2132 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2133 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2134 return -EIO;
2135 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2136 }
2137 return 0;
2138}
2139
2140/**
2141 * t3_sge_read_context - read an SGE context
2142 * @type: the context type
2143 * @adapter: the adapter
2144 * @id: the context id
2145 * @data: holds the retrieved context
2146 *
2147 * Read an SGE egress context. The caller is responsible for ensuring
2148 * only one context operation occurs at a time.
2149 */
2150static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2151 unsigned int id, u32 data[4])
2152{
2153 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2154 return -EBUSY;
2155
2156 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2157 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2158 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2159 5, 1))
2160 return -EIO;
2161 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2162 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2163 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2164 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2165 return 0;
2166}
2167
2168/**
2169 * t3_sge_read_ecntxt - read an SGE egress context
2170 * @adapter: the adapter
2171 * @id: the context id
2172 * @data: holds the retrieved context
2173 *
2174 * Read an SGE egress context. The caller is responsible for ensuring
2175 * only one context operation occurs at a time.
2176 */
2177int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2178{
2179 if (id >= 65536)
2180 return -EINVAL;
2181 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2182}
2183
2184/**
2185 * t3_sge_read_cq - read an SGE CQ context
2186 * @adapter: the adapter
2187 * @id: the context id
2188 * @data: holds the retrieved context
2189 *
2190 * Read an SGE CQ context. The caller is responsible for ensuring
2191 * only one context operation occurs at a time.
2192 */
2193int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2194{
2195 if (id >= 65536)
2196 return -EINVAL;
2197 return t3_sge_read_context(F_CQ, adapter, id, data);
2198}
2199
2200/**
2201 * t3_sge_read_fl - read an SGE free-list context
2202 * @adapter: the adapter
2203 * @id: the context id
2204 * @data: holds the retrieved context
2205 *
2206 * Read an SGE free-list context. The caller is responsible for ensuring
2207 * only one context operation occurs at a time.
2208 */
2209int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2210{
2211 if (id >= SGE_QSETS * 2)
2212 return -EINVAL;
2213 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2214}
2215
2216/**
2217 * t3_sge_read_rspq - read an SGE response queue context
2218 * @adapter: the adapter
2219 * @id: the context id
2220 * @data: holds the retrieved context
2221 *
2222 * Read an SGE response queue context. The caller is responsible for
2223 * ensuring only one context operation occurs at a time.
2224 */
2225int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2226{
2227 if (id >= SGE_QSETS)
2228 return -EINVAL;
2229 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2230}
2231
2232/**
2233 * t3_config_rss - configure Rx packet steering
2234 * @adapter: the adapter
2235 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2236 * @cpus: values for the CPU lookup table (0xff terminated)
2237 * @rspq: values for the response queue lookup table (0xffff terminated)
2238 *
2239 * Programs the receive packet steering logic. @cpus and @rspq provide
2240 * the values for the CPU and response queue lookup tables. If they
2241 * provide fewer values than the size of the tables the supplied values
2242 * are used repeatedly until the tables are fully populated.
2243 */
2244void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2245 const u8 * cpus, const u16 *rspq)
2246{
2247 int i, j, cpu_idx = 0, q_idx = 0;
2248
2249 if (cpus)
2250 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2251 u32 val = i << 16;
2252
2253 for (j = 0; j < 2; ++j) {
2254 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2255 if (cpus[cpu_idx] == 0xff)
2256 cpu_idx = 0;
2257 }
2258 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2259 }
2260
2261 if (rspq)
2262 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2263 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2264 (i << 16) | rspq[q_idx++]);
2265 if (rspq[q_idx] == 0xffff)
2266 q_idx = 0;
2267 }
2268
2269 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2270}
2271
2272/**
2273 * t3_read_rss - read the contents of the RSS tables
2274 * @adapter: the adapter
2275 * @lkup: holds the contents of the RSS lookup table
2276 * @map: holds the contents of the RSS map table
2277 *
2278 * Reads the contents of the receive packet steering tables.
2279 */
2280int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2281{
2282 int i;
2283 u32 val;
2284
2285 if (lkup)
2286 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2287 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2288 0xffff0000 | i);
2289 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2290 if (!(val & 0x80000000))
2291 return -EAGAIN;
2292 *lkup++ = val;
2293 *lkup++ = (val >> 8);
2294 }
2295
2296 if (map)
2297 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2298 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2299 0xffff0000 | i);
2300 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2301 if (!(val & 0x80000000))
2302 return -EAGAIN;
2303 *map++ = val;
2304 }
2305 return 0;
2306}
2307
2308/**
2309 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2310 * @adap: the adapter
2311 * @enable: 1 to select offload mode, 0 for regular NIC
2312 *
2313 * Switches TP to NIC/offload mode.
2314 */
2315void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2316{
2317 if (is_offload(adap) || !enable)
2318 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2319 V_NICMODE(!enable));
2320}
2321
2322/**
2323 * pm_num_pages - calculate the number of pages of the payload memory
2324 * @mem_size: the size of the payload memory
2325 * @pg_size: the size of each payload memory page
2326 *
2327 * Calculate the number of pages, each of the given size, that fit in a
2328 * memory of the specified size, respecting the HW requirement that the
2329 * number of pages must be a multiple of 24.
2330 */
2331static inline unsigned int pm_num_pages(unsigned int mem_size,
2332 unsigned int pg_size)
2333{
2334 unsigned int n = mem_size / pg_size;
2335
2336 return n - n % 24;
2337}
2338
2339#define mem_region(adap, start, size, reg) \
2340 t3_write_reg((adap), A_ ## reg, (start)); \
2341 start += size
2342
2343/*
2344 * partition_mem - partition memory and configure TP memory settings
2345 * @adap: the adapter
2346 * @p: the TP parameters
2347 *
2348 * Partitions context and payload memory and configures TP's memory
2349 * registers.
2350 */
2351static void partition_mem(struct adapter *adap, const struct tp_params *p)
2352{
2353 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2354 unsigned int timers = 0, timers_shift = 22;
2355
2356 if (adap->params.rev > 0) {
2357 if (tids <= 16 * 1024) {
2358 timers = 1;
2359 timers_shift = 16;
2360 } else if (tids <= 64 * 1024) {
2361 timers = 2;
2362 timers_shift = 18;
2363 } else if (tids <= 256 * 1024) {
2364 timers = 3;
2365 timers_shift = 20;
2366 }
2367 }
2368
2369 t3_write_reg(adap, A_TP_PMM_SIZE,
2370 p->chan_rx_size | (p->chan_tx_size >> 16));
2371
2372 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2373 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2374 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2375 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2376 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2377
2378 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2379 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2380 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2381
2382 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2383 /* Add a bit of headroom and make multiple of 24 */
2384 pstructs += 48;
2385 pstructs -= pstructs % 24;
2386 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2387
2388 m = tids * TCB_SIZE;
2389 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2390 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2391 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2392 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2393 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2394 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2395 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2396 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2397
2398 m = (m + 4095) & ~0xfff;
2399 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2400 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2401
2402 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2403 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2404 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2405 if (tids < m)
2406 adap->params.mc5.nservers += m - tids;
2407}
2408
2409static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2410 u32 val)
2411{
2412 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2413 t3_write_reg(adap, A_TP_PIO_DATA, val);
2414}
2415
2416static void tp_config(struct adapter *adap, const struct tp_params *p)
2417{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002418 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2419 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2420 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2421 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2422 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002423 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002424 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2425 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2426 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2427 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2428 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2429 F_IPV6ENABLE | F_NICMODE);
2430 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2431 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002432 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2433 adap->params.rev > 0 ? F_ENABLEESND :
2434 F_T3A_ENABLEESND);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002435
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002436 t3_set_reg_field(adap, A_TP_PC_CONFIG,
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002437 F_ENABLEEPCMDAFULL,
2438 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2439 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002440 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002441 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2442 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2443
Divy Le Ray4d22de32007-01-18 22:04:14 -05002444 if (adap->params.rev > 0) {
2445 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2446 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2447 F_TXPACEAUTO);
2448 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2449 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2450 } else
2451 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2452
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002453 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2454 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2455 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2456 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002457}
2458
2459/* Desired TP timer resolution in usec */
2460#define TP_TMR_RES 50
2461
2462/* TCP timer values in ms */
2463#define TP_DACK_TIMER 50
2464#define TP_RTO_MIN 250
2465
2466/**
2467 * tp_set_timers - set TP timing parameters
2468 * @adap: the adapter to set
2469 * @core_clk: the core clock frequency in Hz
2470 *
2471 * Set TP's timing parameters, such as the various timer resolutions and
2472 * the TCP timer values.
2473 */
2474static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2475{
2476 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2477 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2478 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2479 unsigned int tps = core_clk >> tre;
2480
2481 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2482 V_DELAYEDACKRESOLUTION(dack_re) |
2483 V_TIMESTAMPRESOLUTION(tstamp_re));
2484 t3_write_reg(adap, A_TP_DACK_TIMER,
2485 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2486 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2487 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2488 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2489 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2490 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2491 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2492 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2493 V_KEEPALIVEMAX(9));
2494
2495#define SECONDS * tps
2496
2497 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2498 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2499 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2500 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2501 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2502 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2503 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2504 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2505 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2506
2507#undef SECONDS
2508}
2509
2510/**
2511 * t3_tp_set_coalescing_size - set receive coalescing size
2512 * @adap: the adapter
2513 * @size: the receive coalescing size
2514 * @psh: whether a set PSH bit should deliver coalesced data
2515 *
2516 * Set the receive coalescing size and PSH bit handling.
2517 */
2518int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2519{
2520 u32 val;
2521
2522 if (size > MAX_RX_COALESCING_LEN)
2523 return -EINVAL;
2524
2525 val = t3_read_reg(adap, A_TP_PARA_REG3);
2526 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2527
2528 if (size) {
2529 val |= F_RXCOALESCEENABLE;
2530 if (psh)
2531 val |= F_RXCOALESCEPSHEN;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002532 size = min(MAX_RX_COALESCING_LEN, size);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002533 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2534 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2535 }
2536 t3_write_reg(adap, A_TP_PARA_REG3, val);
2537 return 0;
2538}
2539
2540/**
2541 * t3_tp_set_max_rxsize - set the max receive size
2542 * @adap: the adapter
2543 * @size: the max receive size
2544 *
2545 * Set TP's max receive size. This is the limit that applies when
2546 * receive coalescing is disabled.
2547 */
2548void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2549{
2550 t3_write_reg(adap, A_TP_PARA_REG7,
2551 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2552}
2553
2554static void __devinit init_mtus(unsigned short mtus[])
2555{
2556 /*
2557 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2558 * it can accomodate max size TCP/IP headers when SACK and timestamps
2559 * are enabled and still have at least 8 bytes of payload.
2560 */
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002561 mtus[1] = 88;
2562 mtus[1] = 88;
2563 mtus[2] = 256;
2564 mtus[3] = 512;
2565 mtus[4] = 576;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002566 mtus[5] = 1024;
2567 mtus[6] = 1280;
2568 mtus[7] = 1492;
2569 mtus[8] = 1500;
2570 mtus[9] = 2002;
2571 mtus[10] = 2048;
2572 mtus[11] = 4096;
2573 mtus[12] = 4352;
2574 mtus[13] = 8192;
2575 mtus[14] = 9000;
2576 mtus[15] = 9600;
2577}
2578
2579/*
2580 * Initial congestion control parameters.
2581 */
2582static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2583{
2584 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2585 a[9] = 2;
2586 a[10] = 3;
2587 a[11] = 4;
2588 a[12] = 5;
2589 a[13] = 6;
2590 a[14] = 7;
2591 a[15] = 8;
2592 a[16] = 9;
2593 a[17] = 10;
2594 a[18] = 14;
2595 a[19] = 17;
2596 a[20] = 21;
2597 a[21] = 25;
2598 a[22] = 30;
2599 a[23] = 35;
2600 a[24] = 45;
2601 a[25] = 60;
2602 a[26] = 80;
2603 a[27] = 100;
2604 a[28] = 200;
2605 a[29] = 300;
2606 a[30] = 400;
2607 a[31] = 500;
2608
2609 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2610 b[9] = b[10] = 1;
2611 b[11] = b[12] = 2;
2612 b[13] = b[14] = b[15] = b[16] = 3;
2613 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2614 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2615 b[28] = b[29] = 6;
2616 b[30] = b[31] = 7;
2617}
2618
2619/* The minimum additive increment value for the congestion control table */
2620#define CC_MIN_INCR 2U
2621
2622/**
2623 * t3_load_mtus - write the MTU and congestion control HW tables
2624 * @adap: the adapter
2625 * @mtus: the unrestricted values for the MTU table
2626 * @alphs: the values for the congestion control alpha parameter
2627 * @beta: the values for the congestion control beta parameter
2628 * @mtu_cap: the maximum permitted effective MTU
2629 *
2630 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2631 * Update the high-speed congestion control table with the supplied alpha,
2632 * beta, and MTUs.
2633 */
2634void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2635 unsigned short alpha[NCCTRL_WIN],
2636 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2637{
2638 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2639 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2640 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2641 28672, 40960, 57344, 81920, 114688, 163840, 229376
2642 };
2643
2644 unsigned int i, w;
2645
2646 for (i = 0; i < NMTUS; ++i) {
2647 unsigned int mtu = min(mtus[i], mtu_cap);
2648 unsigned int log2 = fls(mtu);
2649
2650 if (!(mtu & ((1 << log2) >> 2))) /* round */
2651 log2--;
2652 t3_write_reg(adap, A_TP_MTU_TABLE,
2653 (i << 24) | (log2 << 16) | mtu);
2654
2655 for (w = 0; w < NCCTRL_WIN; ++w) {
2656 unsigned int inc;
2657
2658 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2659 CC_MIN_INCR);
2660
2661 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2662 (w << 16) | (beta[w] << 13) | inc);
2663 }
2664 }
2665}
2666
2667/**
2668 * t3_read_hw_mtus - returns the values in the HW MTU table
2669 * @adap: the adapter
2670 * @mtus: where to store the HW MTU values
2671 *
2672 * Reads the HW MTU table.
2673 */
2674void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2675{
2676 int i;
2677
2678 for (i = 0; i < NMTUS; ++i) {
2679 unsigned int val;
2680
2681 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2682 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2683 mtus[i] = val & 0x3fff;
2684 }
2685}
2686
2687/**
2688 * t3_get_cong_cntl_tab - reads the congestion control table
2689 * @adap: the adapter
2690 * @incr: where to store the alpha values
2691 *
2692 * Reads the additive increments programmed into the HW congestion
2693 * control table.
2694 */
2695void t3_get_cong_cntl_tab(struct adapter *adap,
2696 unsigned short incr[NMTUS][NCCTRL_WIN])
2697{
2698 unsigned int mtu, w;
2699
2700 for (mtu = 0; mtu < NMTUS; ++mtu)
2701 for (w = 0; w < NCCTRL_WIN; ++w) {
2702 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2703 0xffff0000 | (mtu << 5) | w);
2704 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2705 0x1fff;
2706 }
2707}
2708
2709/**
2710 * t3_tp_get_mib_stats - read TP's MIB counters
2711 * @adap: the adapter
2712 * @tps: holds the returned counter values
2713 *
2714 * Returns the values of TP's MIB counters.
2715 */
2716void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2717{
2718 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2719 sizeof(*tps) / sizeof(u32), 0);
2720}
2721
2722#define ulp_region(adap, name, start, len) \
2723 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2724 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2725 (start) + (len) - 1); \
2726 start += len
2727
2728#define ulptx_region(adap, name, start, len) \
2729 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2730 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2731 (start) + (len) - 1)
2732
2733static void ulp_config(struct adapter *adap, const struct tp_params *p)
2734{
2735 unsigned int m = p->chan_rx_size;
2736
2737 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2738 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2739 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2740 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2741 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2742 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2743 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2744 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2745}
2746
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002747/**
2748 * t3_set_proto_sram - set the contents of the protocol sram
2749 * @adapter: the adapter
2750 * @data: the protocol image
2751 *
2752 * Write the contents of the protocol SRAM.
2753 */
2754int t3_set_proto_sram(struct adapter *adap, u8 *data)
2755{
2756 int i;
2757 u32 *buf = (u32 *)data;
2758
2759 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2760 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2761 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2762 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2763 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2764 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2765
2766 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2767 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2768 return -EIO;
2769 }
2770 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2771
2772 return 0;
2773}
2774
Divy Le Ray4d22de32007-01-18 22:04:14 -05002775void t3_config_trace_filter(struct adapter *adapter,
2776 const struct trace_params *tp, int filter_index,
2777 int invert, int enable)
2778{
2779 u32 addr, key[4], mask[4];
2780
2781 key[0] = tp->sport | (tp->sip << 16);
2782 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2783 key[2] = tp->dip;
2784 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2785
2786 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2787 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2788 mask[2] = tp->dip_mask;
2789 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2790
2791 if (invert)
2792 key[3] |= (1 << 29);
2793 if (enable)
2794 key[3] |= (1 << 28);
2795
2796 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2797 tp_wr_indirect(adapter, addr++, key[0]);
2798 tp_wr_indirect(adapter, addr++, mask[0]);
2799 tp_wr_indirect(adapter, addr++, key[1]);
2800 tp_wr_indirect(adapter, addr++, mask[1]);
2801 tp_wr_indirect(adapter, addr++, key[2]);
2802 tp_wr_indirect(adapter, addr++, mask[2]);
2803 tp_wr_indirect(adapter, addr++, key[3]);
2804 tp_wr_indirect(adapter, addr, mask[3]);
2805 t3_read_reg(adapter, A_TP_PIO_DATA);
2806}
2807
2808/**
2809 * t3_config_sched - configure a HW traffic scheduler
2810 * @adap: the adapter
2811 * @kbps: target rate in Kbps
2812 * @sched: the scheduler index
2813 *
2814 * Configure a HW scheduler for the target rate
2815 */
2816int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2817{
2818 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2819 unsigned int clk = adap->params.vpd.cclk * 1000;
2820 unsigned int selected_cpt = 0, selected_bpt = 0;
2821
2822 if (kbps > 0) {
2823 kbps *= 125; /* -> bytes */
2824 for (cpt = 1; cpt <= 255; cpt++) {
2825 tps = clk / cpt;
2826 bpt = (kbps + tps / 2) / tps;
2827 if (bpt > 0 && bpt <= 255) {
2828 v = bpt * tps;
2829 delta = v >= kbps ? v - kbps : kbps - v;
2830 if (delta <= mindelta) {
2831 mindelta = delta;
2832 selected_cpt = cpt;
2833 selected_bpt = bpt;
2834 }
2835 } else if (selected_cpt)
2836 break;
2837 }
2838 if (!selected_cpt)
2839 return -EINVAL;
2840 }
2841 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2842 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2843 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2844 if (sched & 1)
2845 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2846 else
2847 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2848 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2849 return 0;
2850}
2851
2852static int tp_init(struct adapter *adap, const struct tp_params *p)
2853{
2854 int busy = 0;
2855
2856 tp_config(adap, p);
2857 t3_set_vlan_accel(adap, 3, 0);
2858
2859 if (is_offload(adap)) {
2860 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2861 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2862 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2863 0, 1000, 5);
2864 if (busy)
2865 CH_ERR(adap, "TP initialization timed out\n");
2866 }
2867
2868 if (!busy)
2869 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2870 return busy;
2871}
2872
2873int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2874{
2875 if (port_mask & ~((1 << adap->params.nports) - 1))
2876 return -EINVAL;
2877 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2878 port_mask << S_PORT0ACTIVE);
2879 return 0;
2880}
2881
2882/*
2883 * Perform the bits of HW initialization that are dependent on the number
2884 * of available ports.
2885 */
2886static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2887{
2888 int i;
2889
2890 if (nports == 1) {
2891 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2892 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2893 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2894 F_PORT0ACTIVE | F_ENFORCEPKT);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002895 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002896 } else {
2897 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2898 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2899 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2900 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2901 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2902 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2903 F_ENFORCEPKT);
2904 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2905 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2906 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2907 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2908 for (i = 0; i < 16; i++)
2909 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2910 (i << 16) | 0x1010);
2911 }
2912}
2913
2914static int calibrate_xgm(struct adapter *adapter)
2915{
2916 if (uses_xaui(adapter)) {
2917 unsigned int v, i;
2918
2919 for (i = 0; i < 5; ++i) {
2920 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2921 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2922 msleep(1);
2923 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2924 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2925 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2926 V_XAUIIMP(G_CALIMP(v) >> 2));
2927 return 0;
2928 }
2929 }
2930 CH_ERR(adapter, "MAC calibration failed\n");
2931 return -1;
2932 } else {
2933 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2934 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2935 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2936 F_XGM_IMPSETUPDATE);
2937 }
2938 return 0;
2939}
2940
2941static void calibrate_xgm_t3b(struct adapter *adapter)
2942{
2943 if (!uses_xaui(adapter)) {
2944 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2945 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2946 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2947 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2948 F_XGM_IMPSETUPDATE);
2949 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2950 0);
2951 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2952 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2953 }
2954}
2955
2956struct mc7_timing_params {
2957 unsigned char ActToPreDly;
2958 unsigned char ActToRdWrDly;
2959 unsigned char PreCyc;
2960 unsigned char RefCyc[5];
2961 unsigned char BkCyc;
2962 unsigned char WrToRdDly;
2963 unsigned char RdToWrDly;
2964};
2965
2966/*
2967 * Write a value to a register and check that the write completed. These
2968 * writes normally complete in a cycle or two, so one read should suffice.
2969 * The very first read exists to flush the posted write to the device.
2970 */
2971static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2972{
2973 t3_write_reg(adapter, addr, val);
2974 t3_read_reg(adapter, addr); /* flush */
2975 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2976 return 0;
2977 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2978 return -EIO;
2979}
2980
2981static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2982{
2983 static const unsigned int mc7_mode[] = {
2984 0x632, 0x642, 0x652, 0x432, 0x442
2985 };
2986 static const struct mc7_timing_params mc7_timings[] = {
2987 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2988 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2989 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2990 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2991 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2992 };
2993
2994 u32 val;
2995 unsigned int width, density, slow, attempts;
2996 struct adapter *adapter = mc7->adapter;
2997 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2998
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07002999 if (!mc7->size)
3000 return 0;
3001
Divy Le Ray4d22de32007-01-18 22:04:14 -05003002 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3003 slow = val & F_SLOW;
3004 width = G_WIDTH(val);
3005 density = G_DEN(val);
3006
3007 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3008 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3009 msleep(1);
3010
3011 if (!slow) {
3012 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3013 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3014 msleep(1);
3015 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3016 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3017 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3018 mc7->name);
3019 goto out_fail;
3020 }
3021 }
3022
3023 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3024 V_ACTTOPREDLY(p->ActToPreDly) |
3025 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3026 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3027 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3028
3029 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3030 val | F_CLKEN | F_TERM150);
3031 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3032
3033 if (!slow)
3034 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3035 F_DLLENB);
3036 udelay(1);
3037
3038 val = slow ? 3 : 6;
3039 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3040 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3041 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3042 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3043 goto out_fail;
3044
3045 if (!slow) {
3046 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3047 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3048 udelay(5);
3049 }
3050
3051 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3052 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3053 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3054 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3055 mc7_mode[mem_type]) ||
3056 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3057 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3058 goto out_fail;
3059
3060 /* clock value is in KHz */
3061 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3062 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3063
3064 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3065 F_PERREFEN | V_PREREFDIV(mc7_clock));
3066 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3067
3068 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3069 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3070 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3071 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3072 (mc7->size << width) - 1);
3073 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3074 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3075
3076 attempts = 50;
3077 do {
3078 msleep(250);
3079 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3080 } while ((val & F_BUSY) && --attempts);
3081 if (val & F_BUSY) {
3082 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3083 goto out_fail;
3084 }
3085
3086 /* Enable normal memory accesses. */
3087 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3088 return 0;
3089
3090out_fail:
3091 return -1;
3092}
3093
3094static void config_pcie(struct adapter *adap)
3095{
3096 static const u16 ack_lat[4][6] = {
3097 {237, 416, 559, 1071, 2095, 4143},
3098 {128, 217, 289, 545, 1057, 2081},
3099 {73, 118, 154, 282, 538, 1050},
3100 {67, 107, 86, 150, 278, 534}
3101 };
3102 static const u16 rpl_tmr[4][6] = {
3103 {711, 1248, 1677, 3213, 6285, 12429},
3104 {384, 651, 867, 1635, 3171, 6243},
3105 {219, 354, 462, 846, 1614, 3150},
3106 {201, 321, 258, 450, 834, 1602}
3107 };
3108
3109 u16 val;
3110 unsigned int log2_width, pldsize;
3111 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3112
3113 pci_read_config_word(adap->pdev,
3114 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3115 &val);
3116 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3117 pci_read_config_word(adap->pdev,
3118 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3119 &val);
3120
3121 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3122 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3123 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3124 log2_width = fls(adap->params.pci.width) - 1;
3125 acklat = ack_lat[log2_width][pldsize];
3126 if (val & 1) /* check LOsEnable */
3127 acklat += fst_trn_tx * 4;
3128 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3129
3130 if (adap->params.rev == 0)
3131 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3132 V_T3A_ACKLAT(M_T3A_ACKLAT),
3133 V_T3A_ACKLAT(acklat));
3134 else
3135 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3136 V_ACKLAT(acklat));
3137
3138 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3139 V_REPLAYLMT(rpllmt));
3140
3141 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3142 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3143}
3144
3145/*
3146 * Initialize and configure T3 HW modules. This performs the
3147 * initialization steps that need to be done once after a card is reset.
3148 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3149 *
3150 * fw_params are passed to FW and their value is platform dependent. Only the
3151 * top 8 bits are available for use, the rest must be 0.
3152 */
3153int t3_init_hw(struct adapter *adapter, u32 fw_params)
3154{
3155 int err = -EIO, attempts = 100;
3156 const struct vpd_params *vpd = &adapter->params.vpd;
3157
3158 if (adapter->params.rev > 0)
3159 calibrate_xgm_t3b(adapter);
3160 else if (calibrate_xgm(adapter))
3161 goto out_err;
3162
3163 if (vpd->mclk) {
3164 partition_mem(adapter, &adapter->params.tp);
3165
3166 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3167 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3168 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3169 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3170 adapter->params.mc5.nfilters,
3171 adapter->params.mc5.nroutes))
3172 goto out_err;
3173 }
3174
3175 if (tp_init(adapter, &adapter->params.tp))
3176 goto out_err;
3177
3178 t3_tp_set_coalescing_size(adapter,
3179 min(adapter->params.sge.max_pkt_size,
3180 MAX_RX_COALESCING_LEN), 1);
3181 t3_tp_set_max_rxsize(adapter,
3182 min(adapter->params.sge.max_pkt_size, 16384U));
3183 ulp_config(adapter, &adapter->params.tp);
3184
3185 if (is_pcie(adapter))
3186 config_pcie(adapter);
3187 else
3188 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3189
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003190 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003191 init_hw_for_avail_ports(adapter, adapter->params.nports);
3192 t3_sge_init(adapter, &adapter->params.sge);
3193
3194 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3195 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3196 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3197 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3198
3199 do { /* wait for uP to initialize */
3200 msleep(20);
3201 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003202 if (!attempts) {
3203 CH_ERR(adapter, "uP initialization timed out\n");
Divy Le Ray4d22de32007-01-18 22:04:14 -05003204 goto out_err;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003205 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003206
3207 err = 0;
3208out_err:
3209 return err;
3210}
3211
3212/**
3213 * get_pci_mode - determine a card's PCI mode
3214 * @adapter: the adapter
3215 * @p: where to store the PCI settings
3216 *
3217 * Determines a card's PCI mode and associated parameters, such as speed
3218 * and width.
3219 */
3220static void __devinit get_pci_mode(struct adapter *adapter,
3221 struct pci_params *p)
3222{
3223 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3224 u32 pci_mode, pcie_cap;
3225
3226 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3227 if (pcie_cap) {
3228 u16 val;
3229
3230 p->variant = PCI_VARIANT_PCIE;
3231 p->pcie_cap_addr = pcie_cap;
3232 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3233 &val);
3234 p->width = (val >> 4) & 0x3f;
3235 return;
3236 }
3237
3238 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3239 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3240 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3241 pci_mode = G_PCIXINITPAT(pci_mode);
3242 if (pci_mode == 0)
3243 p->variant = PCI_VARIANT_PCI;
3244 else if (pci_mode < 4)
3245 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3246 else if (pci_mode < 8)
3247 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3248 else
3249 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3250}
3251
3252/**
3253 * init_link_config - initialize a link's SW state
3254 * @lc: structure holding the link state
3255 * @ai: information about the current card
3256 *
3257 * Initializes the SW state maintained for each link, including the link's
3258 * capabilities and default speed/duplex/flow-control/autonegotiation
3259 * settings.
3260 */
3261static void __devinit init_link_config(struct link_config *lc,
3262 unsigned int caps)
3263{
3264 lc->supported = caps;
3265 lc->requested_speed = lc->speed = SPEED_INVALID;
3266 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3267 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3268 if (lc->supported & SUPPORTED_Autoneg) {
3269 lc->advertising = lc->supported;
3270 lc->autoneg = AUTONEG_ENABLE;
3271 lc->requested_fc |= PAUSE_AUTONEG;
3272 } else {
3273 lc->advertising = 0;
3274 lc->autoneg = AUTONEG_DISABLE;
3275 }
3276}
3277
3278/**
3279 * mc7_calc_size - calculate MC7 memory size
3280 * @cfg: the MC7 configuration
3281 *
3282 * Calculates the size of an MC7 memory in bytes from the value of its
3283 * configuration register.
3284 */
3285static unsigned int __devinit mc7_calc_size(u32 cfg)
3286{
3287 unsigned int width = G_WIDTH(cfg);
3288 unsigned int banks = !!(cfg & F_BKS) + 1;
3289 unsigned int org = !!(cfg & F_ORG) + 1;
3290 unsigned int density = G_DEN(cfg);
3291 unsigned int MBs = ((256 << density) * banks) / (org << width);
3292
3293 return MBs << 20;
3294}
3295
3296static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3297 unsigned int base_addr, const char *name)
3298{
3299 u32 cfg;
3300
3301 mc7->adapter = adapter;
3302 mc7->name = name;
3303 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3304 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003305 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003306 mc7->width = G_WIDTH(cfg);
3307}
3308
3309void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3310{
3311 mac->adapter = adapter;
3312 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3313 mac->nucast = 1;
3314
3315 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3316 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3317 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3318 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3319 F_ENRGMII, 0);
3320 }
3321}
3322
3323void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3324{
3325 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3326
3327 mi1_init(adapter, ai);
3328 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3329 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3330 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3331 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003332 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003333
3334 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3335 val |= F_ENRGMII;
3336
3337 /* Enable MAC clocks so we can access the registers */
3338 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3339 t3_read_reg(adapter, A_XGM_PORT_CFG);
3340
3341 val |= F_CLKDIVRESET_;
3342 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3343 t3_read_reg(adapter, A_XGM_PORT_CFG);
3344 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3345 t3_read_reg(adapter, A_XGM_PORT_CFG);
3346}
3347
3348/*
Divy Le Raye4d08352007-03-18 13:10:17 -07003349 * Reset the adapter.
3350 * Older PCIe cards lose their config space during reset, PCI-X
Divy Le Ray4d22de32007-01-18 22:04:14 -05003351 * ones don't.
3352 */
3353int t3_reset_adapter(struct adapter *adapter)
3354{
Divy Le Raye4d08352007-03-18 13:10:17 -07003355 int i, save_and_restore_pcie =
3356 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003357 uint16_t devid = 0;
3358
Divy Le Raye4d08352007-03-18 13:10:17 -07003359 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003360 pci_save_state(adapter->pdev);
3361 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3362
3363 /*
3364 * Delay. Give Some time to device to reset fully.
3365 * XXX The delay time should be modified.
3366 */
3367 for (i = 0; i < 10; i++) {
3368 msleep(50);
3369 pci_read_config_word(adapter->pdev, 0x00, &devid);
3370 if (devid == 0x1425)
3371 break;
3372 }
3373
3374 if (devid != 0x1425)
3375 return -1;
3376
Divy Le Raye4d08352007-03-18 13:10:17 -07003377 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003378 pci_restore_state(adapter->pdev);
3379 return 0;
3380}
3381
3382/*
3383 * Initialize adapter SW state for the various HW modules, set initial values
3384 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3385 * interface.
3386 */
3387int __devinit t3_prep_adapter(struct adapter *adapter,
3388 const struct adapter_info *ai, int reset)
3389{
3390 int ret;
3391 unsigned int i, j = 0;
3392
3393 get_pci_mode(adapter, &adapter->params.pci);
3394
3395 adapter->params.info = ai;
3396 adapter->params.nports = ai->nports;
3397 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3398 adapter->params.linkpoll_period = 0;
3399 adapter->params.stats_update_period = is_10G(adapter) ?
3400 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3401 adapter->params.pci.vpd_cap_addr =
3402 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3403 ret = get_vpd_params(adapter, &adapter->params.vpd);
3404 if (ret < 0)
3405 return ret;
3406
3407 if (reset && t3_reset_adapter(adapter))
3408 return -1;
3409
3410 t3_sge_prep(adapter, &adapter->params.sge);
3411
3412 if (adapter->params.vpd.mclk) {
3413 struct tp_params *p = &adapter->params.tp;
3414
3415 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3416 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3417 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3418
3419 p->nchan = ai->nports;
3420 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3421 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3422 p->cm_size = t3_mc7_size(&adapter->cm);
3423 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3424 p->chan_tx_size = p->pmtx_size / p->nchan;
3425 p->rx_pg_size = 64 * 1024;
3426 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3427 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3428 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3429 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3430 adapter->params.rev > 0 ? 12 : 6;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003431 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003432
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003433 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3434 t3_mc7_size(&adapter->pmtx) &&
3435 t3_mc7_size(&adapter->cm);
3436
3437 if (is_offload(adapter)) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05003438 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3439 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3440 DEFAULT_NFILTERS : 0;
3441 adapter->params.mc5.nroutes = 0;
3442 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3443
3444 init_mtus(adapter->params.mtus);
3445 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3446 }
3447
3448 early_hw_init(adapter, ai);
3449
3450 for_each_port(adapter, i) {
3451 u8 hw_addr[6];
3452 struct port_info *p = adap2pinfo(adapter, i);
3453
3454 while (!adapter->params.vpd.port_type[j])
3455 ++j;
3456
3457 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3458 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3459 ai->mdio_ops);
3460 mac_prep(&p->mac, adapter, j);
3461 ++j;
3462
3463 /*
3464 * The VPD EEPROM stores the base Ethernet address for the
3465 * card. A port's address is derived from the base by adding
3466 * the port's index to the base's low octet.
3467 */
3468 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3469 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3470
3471 memcpy(adapter->port[i]->dev_addr, hw_addr,
3472 ETH_ALEN);
3473 memcpy(adapter->port[i]->perm_addr, hw_addr,
3474 ETH_ALEN);
3475 init_link_config(&p->link_config, p->port_type->caps);
3476 p->phy.ops->power_down(&p->phy, 1);
3477 if (!(p->port_type->caps & SUPPORTED_IRQ))
3478 adapter->params.linkpoll_period = 10;
3479 }
3480
3481 return 0;
3482}
3483
3484void t3_led_ready(struct adapter *adapter)
3485{
3486 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3487 F_GPIO0_OUT_VAL);
3488}