blob: 6e5b4992f17e97b36310be0fe0d54d07747314e9 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
Divy Le Rayf2c68792007-01-30 19:44:13 -080037/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
Divy Le Rayb8819552007-12-17 18:47:31 -080065 return -EAGAIN;
Divy Le Ray4d22de32007-01-18 22:04:14 -050066 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700441 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700446 0,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
Divy Le Ray75758e82007-12-05 10:15:01 -0800450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
Divy Le Ray8ac3ba62007-03-31 00:23:19 -0700457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
Divy Le Ray167cdf52007-08-21 20:49:36 -0700508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
Divy Le Ray167cdf52007-08-21 20:49:36 -0700651 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500652
653 /* Old eeproms didn't have port information */
654 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
655 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
656 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
657 } else {
658 p->port_type[0] = hex2int(vpd.port0_data[0]);
659 p->port_type[1] = hex2int(vpd.port1_data[0]);
660 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
661 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
662 }
663
664 for (i = 0; i < 6; i++)
665 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
666 hex2int(vpd.na_data[2 * i + 1]);
667 return 0;
668}
669
670/* serial flash and firmware constants */
671enum {
672 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
674 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675
676 /* flash command opcodes */
677 SF_PROG_PAGE = 2, /* program page */
678 SF_WR_DISABLE = 4, /* disable writes */
679 SF_RD_STATUS = 5, /* read status register */
680 SF_WR_ENABLE = 6, /* enable writes */
681 SF_RD_DATA_FAST = 0xb, /* read flash */
682 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683
684 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
Divy Le Ray2e283962007-03-18 13:10:06 -0700685 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE = 8 /* at least version and csum */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500687};
688
689/**
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
695 *
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
699 */
700static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
701 u32 *valp)
702{
703 int ret;
704
705 if (!byte_cnt || byte_cnt > 4)
706 return -EINVAL;
707 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
708 return -EBUSY;
709 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
710 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
711 if (!ret)
712 *valp = t3_read_reg(adapter, A_SF_DATA);
713 return ret;
714}
715
716/**
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
722 *
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
726 */
727static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
728 u32 val)
729{
730 if (!byte_cnt || byte_cnt > 4)
731 return -EINVAL;
732 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
733 return -EBUSY;
734 t3_write_reg(adapter, A_SF_DATA, val);
735 t3_write_reg(adapter, A_SF_OP,
736 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
737 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
738}
739
740/**
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
745 *
746 * Wait for a flash operation to complete by polling the status register.
747 */
748static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
749{
750 int ret;
751 u32 status;
752
753 while (1) {
754 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
755 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
756 return ret;
757 if (!(status & 1))
758 return 0;
759 if (--attempts == 0)
760 return -EAGAIN;
761 if (delay)
762 msleep(delay);
763 }
764}
765
766/**
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
773 *
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
777 * natural endianess.
778 */
779int t3_read_flash(struct adapter *adapter, unsigned int addr,
780 unsigned int nwords, u32 *data, int byte_oriented)
781{
782 int ret;
783
784 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
785 return -EINVAL;
786
787 addr = swab32(addr) | SF_RD_DATA_FAST;
788
789 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
790 (ret = sf1_read(adapter, 1, 1, data)) != 0)
791 return ret;
792
793 for (; nwords; nwords--, data++) {
794 ret = sf1_read(adapter, 4, nwords > 1, data);
795 if (ret)
796 return ret;
797 if (byte_oriented)
798 *data = htonl(*data);
799 }
800 return 0;
801}
802
803/**
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
809 *
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
812 */
813static int t3_write_flash(struct adapter *adapter, unsigned int addr,
814 unsigned int n, const u8 *data)
815{
816 int ret;
817 u32 buf[64];
818 unsigned int i, c, left, val, offset = addr & 0xff;
819
820 if (addr + n > SF_SIZE || offset + n > 256)
821 return -EINVAL;
822
823 val = swab32(addr) | SF_PROG_PAGE;
824
825 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
826 (ret = sf1_write(adapter, 4, 1, val)) != 0)
827 return ret;
828
829 for (left = n; left; left -= c) {
830 c = min(left, 4U);
831 for (val = 0, i = 0; i < c; ++i)
832 val = (val << 8) + *data++;
833
834 ret = sf1_write(adapter, c, c != left, val);
835 if (ret)
836 return ret;
837 }
838 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
839 return ret;
840
841 /* Read the page to verify the write succeeded */
842 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
843 if (ret)
844 return ret;
845
846 if (memcmp(data - n, (u8 *) buf + offset, n))
847 return -EIO;
848 return 0;
849}
850
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700851/**
Divy Le Ray47330072007-08-29 19:15:52 -0700852 * t3_get_tp_version - read the tp sram version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700853 * @adapter: the adapter
Divy Le Ray47330072007-08-29 19:15:52 -0700854 * @vers: where to place the version
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700855 *
Divy Le Ray47330072007-08-29 19:15:52 -0700856 * Reads the protocol sram version from sram.
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700857 */
Divy Le Ray47330072007-08-29 19:15:52 -0700858int t3_get_tp_version(struct adapter *adapter, u32 *vers)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700859{
860 int ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500868
Divy Le Ray47330072007-08-29 19:15:52 -0700869 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 return 0;
872}
873
874/**
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
878 *
879 * Reads the protocol sram version from flash.
880 */
881int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
882{
883 int ret;
884 u32 vers;
885 unsigned int major, minor;
886
887 if (adapter->params.rev == T3_REV_A)
888 return 0;
889
890 *must_load = 1;
891
892 ret = t3_get_tp_version(adapter, &vers);
893 if (ret)
894 return ret;
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700895
896 major = G_TP_VERSION_MAJOR(vers);
897 minor = G_TP_VERSION_MINOR(vers);
898
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500899 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700900 return 0;
901
Divy Le Ray47330072007-08-29 19:15:52 -0700902 if (major != TP_VERSION_MAJOR)
903 CH_ERR(adapter, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major, minor,
905 TP_VERSION_MAJOR, TP_VERSION_MINOR);
906 else {
907 *must_load = 0;
908 CH_ERR(adapter, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major, minor,
910 TP_VERSION_MAJOR, TP_VERSION_MINOR);
911 }
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700912 return -EINVAL;
913}
914
915/**
Jeff Garzik2eab17a2007-11-23 21:59:45 -0500916 * t3_check_tpsram - check if provided protocol SRAM
Divy Le Ray480fe1a2007-05-30 21:10:58 -0700917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
920 * @size: image size
921 *
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
924 */
925int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
926{
927 u32 csum;
928 unsigned int i;
929 const u32 *p = (const u32 *)tp_sram;
930
931 /* Verify checksum */
932 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
933 csum += ntohl(p[i]);
934 if (csum != 0xffffffff) {
935 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
936 csum);
937 return -EINVAL;
938 }
939
940 return 0;
941}
942
Divy Le Ray4aac3892007-01-30 19:43:45 -0800943enum fw_version_type {
944 FW_VERSION_N3,
945 FW_VERSION_T3
946};
947
Divy Le Ray4d22de32007-01-18 22:04:14 -0500948/**
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
952 *
953 * Reads the FW version from flash.
954 */
955int t3_get_fw_version(struct adapter *adapter, u32 *vers)
956{
957 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
958}
959
960/**
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
Divy Le Raya5a3b462007-09-05 15:58:09 -0700963 * @must_load: set to 1 if loading a new FW image is required
964
Divy Le Ray4d22de32007-01-18 22:04:14 -0500965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
967 */
Divy Le Raya5a3b462007-09-05 15:58:09 -0700968int t3_check_fw_version(struct adapter *adapter, int *must_load)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500969{
970 int ret;
971 u32 vers;
Divy Le Ray4aac3892007-01-30 19:43:45 -0800972 unsigned int type, major, minor;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500973
Divy Le Raya5a3b462007-09-05 15:58:09 -0700974 *must_load = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500975 ret = t3_get_fw_version(adapter, &vers);
976 if (ret)
977 return ret;
978
Divy Le Ray4aac3892007-01-30 19:43:45 -0800979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
982
Divy Le Ray75d86262007-02-25 16:32:37 -0800983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500985 return 0;
986
Divy Le Raya5a3b462007-09-05 15:58:09 -0700987 if (major != FW_VERSION_MAJOR)
988 CH_ERR(adapter, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major, minor,
990 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray273fa902007-11-16 11:22:00 -0800991 else if (minor < FW_VERSION_MINOR) {
Divy Le Raya5a3b462007-09-05 15:58:09 -0700992 *must_load = 0;
Divy Le Ray273fa902007-11-16 11:22:00 -0800993 CH_WARN(adapter, "found old FW minor version(%u.%u), "
Divy Le Raya5a3b462007-09-05 15:58:09 -0700994 "driver compiled for version %u.%u\n", major, minor,
995 FW_VERSION_MAJOR, FW_VERSION_MINOR);
Divy Le Ray273fa902007-11-16 11:22:00 -0800996 } else {
997 CH_WARN(adapter, "found newer FW version(%u.%u), "
998 "driver compiled for version %u.%u\n", major, minor,
999 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1000 return 0;
Divy Le Raya5a3b462007-09-05 15:58:09 -07001001 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001002 return -EINVAL;
1003}
1004
1005/**
1006 * t3_flash_erase_sectors - erase a range of flash sectors
1007 * @adapter: the adapter
1008 * @start: the first sector to erase
1009 * @end: the last sector to erase
1010 *
1011 * Erases the sectors in the given range.
1012 */
1013static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1014{
1015 while (start <= end) {
1016 int ret;
1017
1018 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1019 (ret = sf1_write(adapter, 4, 0,
1020 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1021 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1022 return ret;
1023 start++;
1024 }
1025 return 0;
1026}
1027
1028/*
1029 * t3_load_fw - download firmware
1030 * @adapter: the adapter
Divy Le Ray8a9fab22007-05-30 21:10:52 -07001031 * @fw_data: the firmware image to write
Divy Le Ray4d22de32007-01-18 22:04:14 -05001032 * @size: image size
1033 *
1034 * Write the supplied firmware image to the card's serial flash.
1035 * The FW image has the following sections: @size - 8 bytes of code and
1036 * data, followed by 4 bytes of FW version, followed by the 32-bit
1037 * 1's complement checksum of the whole image.
1038 */
1039int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1040{
1041 u32 csum;
1042 unsigned int i;
1043 const u32 *p = (const u32 *)fw_data;
1044 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1045
Divy Le Ray2e283962007-03-18 13:10:06 -07001046 if ((size & 3) || size < FW_MIN_SIZE)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001047 return -EINVAL;
1048 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1049 return -EFBIG;
1050
1051 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1052 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055 csum);
1056 return -EINVAL;
1057 }
1058
1059 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1060 if (ret)
1061 goto out;
1062
1063 size -= 8; /* trim off version and checksum */
1064 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1065 unsigned int chunk_size = min(size, 256U);
1066
1067 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1068 if (ret)
1069 goto out;
1070
1071 addr += chunk_size;
1072 fw_data += chunk_size;
1073 size -= chunk_size;
1074 }
1075
1076 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1077out:
1078 if (ret)
1079 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1080 return ret;
1081}
1082
1083#define CIM_CTL_BASE 0x2000
1084
1085/**
1086 * t3_cim_ctl_blk_read - read a block from CIM control region
1087 *
1088 * @adap: the adapter
1089 * @addr: the start address within the CIM control region
1090 * @n: number of words to read
1091 * @valp: where to store the result
1092 *
1093 * Reads a block of 4-byte words from the CIM control region.
1094 */
1095int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1096 unsigned int n, unsigned int *valp)
1097{
1098 int ret = 0;
1099
1100 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1101 return -EBUSY;
1102
1103 for ( ; !ret && n--; addr += 4) {
1104 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1105 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1106 0, 5, 2);
1107 if (!ret)
1108 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1109 }
1110 return ret;
1111}
1112
1113
1114/**
1115 * t3_link_changed - handle interface link changes
1116 * @adapter: the adapter
1117 * @port_id: the port index that changed link state
1118 *
1119 * Called when a port's link settings change to propagate the new values
1120 * to the associated PHY and MAC. After performing the common tasks it
1121 * invokes an OS-specific handler.
1122 */
1123void t3_link_changed(struct adapter *adapter, int port_id)
1124{
1125 int link_ok, speed, duplex, fc;
1126 struct port_info *pi = adap2pinfo(adapter, port_id);
1127 struct cphy *phy = &pi->phy;
1128 struct cmac *mac = &pi->mac;
1129 struct link_config *lc = &pi->link_config;
1130
1131 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1132
1133 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1134 uses_xaui(adapter)) {
1135 if (link_ok)
1136 t3b_pcs_reset(mac);
1137 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1138 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1139 }
1140 lc->link_ok = link_ok;
1141 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1142 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1143 if (lc->requested_fc & PAUSE_AUTONEG)
1144 fc &= lc->requested_fc;
1145 else
1146 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1147
1148 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1149 /* Set MAC speed, duplex, and flow control to match PHY. */
1150 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1151 lc->fc = fc;
1152 }
1153
1154 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1155}
1156
1157/**
1158 * t3_link_start - apply link configuration to MAC/PHY
1159 * @phy: the PHY to setup
1160 * @mac: the MAC to setup
1161 * @lc: the requested link configuration
1162 *
1163 * Set up a port's MAC and PHY according to a desired link configuration.
1164 * - If the PHY can auto-negotiate first decide what to advertise, then
1165 * enable/disable auto-negotiation as desired, and reset.
1166 * - If the PHY does not auto-negotiate just reset it.
1167 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1168 * otherwise do it later based on the outcome of auto-negotiation.
1169 */
1170int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1171{
1172 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1173
1174 lc->link_ok = 0;
1175 if (lc->supported & SUPPORTED_Autoneg) {
1176 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1177 if (fc) {
1178 lc->advertising |= ADVERTISED_Asym_Pause;
1179 if (fc & PAUSE_RX)
1180 lc->advertising |= ADVERTISED_Pause;
1181 }
1182 phy->ops->advertise(phy, lc->advertising);
1183
1184 if (lc->autoneg == AUTONEG_DISABLE) {
1185 lc->speed = lc->requested_speed;
1186 lc->duplex = lc->requested_duplex;
1187 lc->fc = (unsigned char)fc;
1188 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1189 fc);
1190 /* Also disables autoneg */
1191 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1192 phy->ops->reset(phy, 0);
1193 } else
1194 phy->ops->autoneg_enable(phy);
1195 } else {
1196 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1197 lc->fc = (unsigned char)fc;
1198 phy->ops->reset(phy, 0);
1199 }
1200 return 0;
1201}
1202
1203/**
1204 * t3_set_vlan_accel - control HW VLAN extraction
1205 * @adapter: the adapter
1206 * @ports: bitmap of adapter ports to operate on
1207 * @on: enable (1) or disable (0) HW VLAN extraction
1208 *
1209 * Enables or disables HW extraction of VLAN tags for the given port.
1210 */
1211void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1212{
1213 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1214 ports << S_VLANEXTRACTIONENABLE,
1215 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1216}
1217
1218struct intr_info {
1219 unsigned int mask; /* bits to check in interrupt status */
1220 const char *msg; /* message to print or NULL */
1221 short stat_idx; /* stat counter to increment or -1 */
1222 unsigned short fatal:1; /* whether the condition reported is fatal */
1223};
1224
1225/**
1226 * t3_handle_intr_status - table driven interrupt handler
1227 * @adapter: the adapter that generated the interrupt
1228 * @reg: the interrupt status register to process
1229 * @mask: a mask to apply to the interrupt status
1230 * @acts: table of interrupt actions
1231 * @stats: statistics counters tracking interrupt occurences
1232 *
1233 * A table driven interrupt handler that applies a set of masks to an
1234 * interrupt status word and performs the corresponding actions if the
1235 * interrupts described by the mask have occured. The actions include
1236 * optionally printing a warning or alert message, and optionally
1237 * incrementing a stat counter. The table is terminated by an entry
1238 * specifying mask 0. Returns the number of fatal interrupt conditions.
1239 */
1240static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1241 unsigned int mask,
1242 const struct intr_info *acts,
1243 unsigned long *stats)
1244{
1245 int fatal = 0;
1246 unsigned int status = t3_read_reg(adapter, reg) & mask;
1247
1248 for (; acts->mask; ++acts) {
1249 if (!(status & acts->mask))
1250 continue;
1251 if (acts->fatal) {
1252 fatal++;
1253 CH_ALERT(adapter, "%s (0x%x)\n",
1254 acts->msg, status & acts->mask);
1255 } else if (acts->msg)
1256 CH_WARN(adapter, "%s (0x%x)\n",
1257 acts->msg, status & acts->mask);
1258 if (acts->stat_idx >= 0)
1259 stats[acts->stat_idx]++;
1260 }
1261 if (status) /* clear processed interrupts */
1262 t3_write_reg(adapter, reg, status);
1263 return fatal;
1264}
1265
Divy Le Rayb8819552007-12-17 18:47:31 -08001266#define SGE_INTR_MASK (F_RSPQDISABLED | \
1267 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1268 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1269 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1270 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1271 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1272 F_HIRCQPARITYERROR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001273#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1274 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1275 F_NFASRCHFAIL)
1276#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1277#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1278 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1279 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1280#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1281 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1282 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1283 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1284 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1285 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1286#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1287 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1288 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
Divy Le Rayb8819552007-12-17 18:47:31 -08001289 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1290 F_TXPARERR | V_BISTERR(M_BISTERR))
1291#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1292 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1293 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1294#define ULPTX_INTR_MASK 0xfc
1295#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
Divy Le Ray4d22de32007-01-18 22:04:14 -05001296 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1297 F_ZERO_SWITCH_ERROR)
1298#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1299 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1300 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
Divy Le Rayb8819552007-12-17 18:47:31 -08001301 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1302 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1303 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1304 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1305 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001306#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1307 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1308 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1309#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1310 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1311 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1312#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1313 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1314 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1315 V_MCAPARERRENB(M_MCAPARERRENB))
1316#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1317 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1318 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1319 F_MPS0 | F_CPL_SWITCH)
1320
1321/*
1322 * Interrupt handler for the PCIX1 module.
1323 */
1324static void pci_intr_handler(struct adapter *adapter)
1325{
1326 static const struct intr_info pcix1_intr_info[] = {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001327 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1328 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1329 {F_RCVTARABT, "PCI received target abort", -1, 1},
1330 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1331 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1332 {F_DETPARERR, "PCI detected parity error", -1, 1},
1333 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1334 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1335 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1336 1},
1337 {F_DETCORECCERR, "PCI correctable ECC error",
1338 STAT_PCI_CORR_ECC, 0},
1339 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1340 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1341 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1342 1},
1343 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1344 1},
1345 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1346 1},
1347 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1348 "error", -1, 1},
1349 {0}
1350 };
1351
1352 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1353 pcix1_intr_info, adapter->irq_stats))
1354 t3_fatal_err(adapter);
1355}
1356
1357/*
1358 * Interrupt handler for the PCIE module.
1359 */
1360static void pcie_intr_handler(struct adapter *adapter)
1361{
1362 static const struct intr_info pcie_intr_info[] = {
Divy Le Rayb5a44bc2007-01-30 19:44:01 -08001363 {F_PEXERR, "PCI PEX error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001364 {F_UNXSPLCPLERRR,
1365 "PCI unexpected split completion DMA read error", -1, 1},
1366 {F_UNXSPLCPLERRC,
1367 "PCI unexpected split completion DMA command error", -1, 1},
1368 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1369 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1370 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1371 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1372 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1373 "PCI MSI-X table/PBA parity error", -1, 1},
Divy Le Rayb8819552007-12-17 18:47:31 -08001374 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1375 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1376 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1377 {F_TXPARERR, "PCI Tx parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001378 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1379 {0}
1380 };
1381
Divy Le Ray3eea3332007-09-05 15:58:15 -07001382 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1383 CH_ALERT(adapter, "PEX error code 0x%x\n",
1384 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1385
Divy Le Ray4d22de32007-01-18 22:04:14 -05001386 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1387 pcie_intr_info, adapter->irq_stats))
1388 t3_fatal_err(adapter);
1389}
1390
1391/*
1392 * TP interrupt handler.
1393 */
1394static void tp_intr_handler(struct adapter *adapter)
1395{
1396 static const struct intr_info tp_intr_info[] = {
1397 {0xffffff, "TP parity error", -1, 1},
1398 {0x1000000, "TP out of Rx pages", -1, 1},
1399 {0x2000000, "TP out of Tx pages", -1, 1},
1400 {0}
1401 };
1402
Divy Le Raya2604be2007-11-16 11:22:16 -08001403 static struct intr_info tp_intr_info_t3c[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001404 {0x1fffffff, "TP parity error", -1, 1},
1405 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1406 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1407 {0}
Divy Le Raya2604be2007-11-16 11:22:16 -08001408 };
1409
Divy Le Ray4d22de32007-01-18 22:04:14 -05001410 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
Divy Le Raya2604be2007-11-16 11:22:16 -08001411 adapter->params.rev < T3_REV_C ?
Divy Le Rayb8819552007-12-17 18:47:31 -08001412 tp_intr_info : tp_intr_info_t3c, NULL))
Divy Le Ray4d22de32007-01-18 22:04:14 -05001413 t3_fatal_err(adapter);
1414}
1415
1416/*
1417 * CIM interrupt handler.
1418 */
1419static void cim_intr_handler(struct adapter *adapter)
1420{
1421 static const struct intr_info cim_intr_info[] = {
1422 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1423 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1424 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1425 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1426 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1427 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1428 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1429 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1430 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1431 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1432 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1433 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
Divy Le Rayb8819552007-12-17 18:47:31 -08001434 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1435 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1436 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1437 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1438 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1439 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1440 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1441 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1442 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1443 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1444 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1445 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001446 {0}
1447 };
1448
1449 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1450 cim_intr_info, NULL))
1451 t3_fatal_err(adapter);
1452}
1453
1454/*
1455 * ULP RX interrupt handler.
1456 */
1457static void ulprx_intr_handler(struct adapter *adapter)
1458{
1459 static const struct intr_info ulprx_intr_info[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001460 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1461 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1462 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1463 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1464 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1465 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1466 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1467 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001468 {0}
1469 };
1470
1471 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1472 ulprx_intr_info, NULL))
1473 t3_fatal_err(adapter);
1474}
1475
1476/*
1477 * ULP TX interrupt handler.
1478 */
1479static void ulptx_intr_handler(struct adapter *adapter)
1480{
1481 static const struct intr_info ulptx_intr_info[] = {
1482 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1483 STAT_ULP_CH0_PBL_OOB, 0},
1484 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1485 STAT_ULP_CH1_PBL_OOB, 0},
Divy Le Rayb8819552007-12-17 18:47:31 -08001486 {0xfc, "ULP TX parity error", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001487 {0}
1488 };
1489
1490 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1491 ulptx_intr_info, adapter->irq_stats))
1492 t3_fatal_err(adapter);
1493}
1494
1495#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1496 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1497 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1498 F_ICSPI1_TX_FRAMING_ERROR)
1499#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1500 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1501 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1502 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1503
1504/*
1505 * PM TX interrupt handler.
1506 */
1507static void pmtx_intr_handler(struct adapter *adapter)
1508{
1509 static const struct intr_info pmtx_intr_info[] = {
1510 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1511 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1512 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1513 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1514 "PMTX ispi parity error", -1, 1},
1515 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1516 "PMTX ospi parity error", -1, 1},
1517 {0}
1518 };
1519
1520 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1521 pmtx_intr_info, NULL))
1522 t3_fatal_err(adapter);
1523}
1524
1525#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1526 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1527 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1528 F_IESPI1_TX_FRAMING_ERROR)
1529#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1530 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1531 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1532 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1533
1534/*
1535 * PM RX interrupt handler.
1536 */
1537static void pmrx_intr_handler(struct adapter *adapter)
1538{
1539 static const struct intr_info pmrx_intr_info[] = {
1540 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1541 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1542 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1543 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1544 "PMRX ispi parity error", -1, 1},
1545 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1546 "PMRX ospi parity error", -1, 1},
1547 {0}
1548 };
1549
1550 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1551 pmrx_intr_info, NULL))
1552 t3_fatal_err(adapter);
1553}
1554
1555/*
1556 * CPL switch interrupt handler.
1557 */
1558static void cplsw_intr_handler(struct adapter *adapter)
1559{
1560 static const struct intr_info cplsw_intr_info[] = {
Divy Le Rayb8819552007-12-17 18:47:31 -08001561 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1562 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001563 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1564 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1565 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1566 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1567 {0}
1568 };
1569
1570 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1571 cplsw_intr_info, NULL))
1572 t3_fatal_err(adapter);
1573}
1574
1575/*
1576 * MPS interrupt handler.
1577 */
1578static void mps_intr_handler(struct adapter *adapter)
1579{
1580 static const struct intr_info mps_intr_info[] = {
1581 {0x1ff, "MPS parity error", -1, 1},
1582 {0}
1583 };
1584
1585 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1586 mps_intr_info, NULL))
1587 t3_fatal_err(adapter);
1588}
1589
1590#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1591
1592/*
1593 * MC7 interrupt handler.
1594 */
1595static void mc7_intr_handler(struct mc7 *mc7)
1596{
1597 struct adapter *adapter = mc7->adapter;
1598 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1599
1600 if (cause & F_CE) {
1601 mc7->stats.corr_err++;
1602 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1603 "data 0x%x 0x%x 0x%x\n", mc7->name,
1604 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1605 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1606 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1607 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1608 }
1609
1610 if (cause & F_UE) {
1611 mc7->stats.uncorr_err++;
1612 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1613 "data 0x%x 0x%x 0x%x\n", mc7->name,
1614 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1615 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1616 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1617 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1618 }
1619
1620 if (G_PE(cause)) {
1621 mc7->stats.parity_err++;
1622 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1623 mc7->name, G_PE(cause));
1624 }
1625
1626 if (cause & F_AE) {
1627 u32 addr = 0;
1628
1629 if (adapter->params.rev > 0)
1630 addr = t3_read_reg(adapter,
1631 mc7->offset + A_MC7_ERR_ADDR);
1632 mc7->stats.addr_err++;
1633 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1634 mc7->name, addr);
1635 }
1636
1637 if (cause & MC7_INTR_FATAL)
1638 t3_fatal_err(adapter);
1639
1640 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1641}
1642
1643#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1644 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1645/*
1646 * XGMAC interrupt handler.
1647 */
1648static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1649{
1650 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1651 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1652
1653 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1654 mac->stats.tx_fifo_parity_err++;
1655 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1656 }
1657 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1658 mac->stats.rx_fifo_parity_err++;
1659 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1660 }
1661 if (cause & F_TXFIFO_UNDERRUN)
1662 mac->stats.tx_fifo_urun++;
1663 if (cause & F_RXFIFO_OVERFLOW)
1664 mac->stats.rx_fifo_ovfl++;
1665 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1666 mac->stats.serdes_signal_loss++;
1667 if (cause & F_XAUIPCSCTCERR)
1668 mac->stats.xaui_pcs_ctc_err++;
1669 if (cause & F_XAUIPCSALIGNCHANGE)
1670 mac->stats.xaui_pcs_align_change++;
1671
1672 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1673 if (cause & XGM_INTR_FATAL)
1674 t3_fatal_err(adap);
1675 return cause != 0;
1676}
1677
1678/*
1679 * Interrupt handler for PHY events.
1680 */
1681int t3_phy_intr_handler(struct adapter *adapter)
1682{
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001683 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001684 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1685
1686 for_each_port(adapter, i) {
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001687 struct port_info *p = adap2pinfo(adapter, i);
1688
1689 mask = gpi - (gpi & (gpi - 1));
1690 gpi -= mask;
1691
1692 if (!(p->port_type->caps & SUPPORTED_IRQ))
1693 continue;
1694
1695 if (cause & mask) {
1696 int phy_cause = p->phy.ops->intr_handler(&p->phy);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001697
1698 if (phy_cause & cphy_cause_link_change)
1699 t3_link_changed(adapter, i);
1700 if (phy_cause & cphy_cause_fifo_error)
Divy Le Ray1ca03cb2007-04-17 11:06:36 -07001701 p->phy.fifo_errors++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001702 }
1703 }
1704
1705 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1706 return 0;
1707}
1708
1709/*
1710 * T3 slow path (non-data) interrupt handler.
1711 */
1712int t3_slow_intr_handler(struct adapter *adapter)
1713{
1714 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1715
1716 cause &= adapter->slow_intr_mask;
1717 if (!cause)
1718 return 0;
1719 if (cause & F_PCIM0) {
1720 if (is_pcie(adapter))
1721 pcie_intr_handler(adapter);
1722 else
1723 pci_intr_handler(adapter);
1724 }
1725 if (cause & F_SGE3)
1726 t3_sge_err_intr_handler(adapter);
1727 if (cause & F_MC7_PMRX)
1728 mc7_intr_handler(&adapter->pmrx);
1729 if (cause & F_MC7_PMTX)
1730 mc7_intr_handler(&adapter->pmtx);
1731 if (cause & F_MC7_CM)
1732 mc7_intr_handler(&adapter->cm);
1733 if (cause & F_CIM)
1734 cim_intr_handler(adapter);
1735 if (cause & F_TP1)
1736 tp_intr_handler(adapter);
1737 if (cause & F_ULP2_RX)
1738 ulprx_intr_handler(adapter);
1739 if (cause & F_ULP2_TX)
1740 ulptx_intr_handler(adapter);
1741 if (cause & F_PM1_RX)
1742 pmrx_intr_handler(adapter);
1743 if (cause & F_PM1_TX)
1744 pmtx_intr_handler(adapter);
1745 if (cause & F_CPL_SWITCH)
1746 cplsw_intr_handler(adapter);
1747 if (cause & F_MPS0)
1748 mps_intr_handler(adapter);
1749 if (cause & F_MC5A)
1750 t3_mc5_intr_handler(&adapter->mc5);
1751 if (cause & F_XGMAC0_0)
1752 mac_intr_handler(adapter, 0);
1753 if (cause & F_XGMAC0_1)
1754 mac_intr_handler(adapter, 1);
1755 if (cause & F_T3DBG)
1756 t3_os_ext_intr_handler(adapter);
1757
1758 /* Clear the interrupts just processed. */
1759 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1760 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1761 return 1;
1762}
1763
1764/**
1765 * t3_intr_enable - enable interrupts
1766 * @adapter: the adapter whose interrupts should be enabled
1767 *
1768 * Enable interrupts by setting the interrupt enable registers of the
1769 * various HW modules and then enabling the top-level interrupt
1770 * concentrator.
1771 */
1772void t3_intr_enable(struct adapter *adapter)
1773{
1774 static const struct addr_val_pair intr_en_avp[] = {
1775 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1776 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1777 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1778 MC7_INTR_MASK},
1779 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1780 MC7_INTR_MASK},
1781 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1782 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
Divy Le Ray4d22de32007-01-18 22:04:14 -05001783 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1784 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1785 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1786 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1787 };
1788
1789 adapter->slow_intr_mask = PL_INTR_MASK;
1790
1791 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
Divy Le Raya2604be2007-11-16 11:22:16 -08001792 t3_write_reg(adapter, A_TP_INT_ENABLE,
1793 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001794
1795 if (adapter->params.rev > 0) {
1796 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1797 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1798 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1799 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1800 F_PBL_BOUND_ERR_CH1);
1801 } else {
1802 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1803 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1804 }
1805
1806 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1807 adapter_info(adapter)->gpio_intr);
1808 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1809 adapter_info(adapter)->gpio_intr);
1810 if (is_pcie(adapter))
1811 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1812 else
1813 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1814 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1815 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1816}
1817
1818/**
1819 * t3_intr_disable - disable a card's interrupts
1820 * @adapter: the adapter whose interrupts should be disabled
1821 *
1822 * Disable interrupts. We only disable the top-level interrupt
1823 * concentrator and the SGE data interrupts.
1824 */
1825void t3_intr_disable(struct adapter *adapter)
1826{
1827 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1828 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1829 adapter->slow_intr_mask = 0;
1830}
1831
1832/**
1833 * t3_intr_clear - clear all interrupts
1834 * @adapter: the adapter whose interrupts should be cleared
1835 *
1836 * Clears all interrupts.
1837 */
1838void t3_intr_clear(struct adapter *adapter)
1839{
1840 static const unsigned int cause_reg_addr[] = {
1841 A_SG_INT_CAUSE,
1842 A_SG_RSPQ_FL_STATUS,
1843 A_PCIX_INT_CAUSE,
1844 A_MC7_INT_CAUSE,
1845 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1846 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1847 A_CIM_HOST_INT_CAUSE,
1848 A_TP_INT_CAUSE,
1849 A_MC5_DB_INT_CAUSE,
1850 A_ULPRX_INT_CAUSE,
1851 A_ULPTX_INT_CAUSE,
1852 A_CPL_INTR_CAUSE,
1853 A_PM1_TX_INT_CAUSE,
1854 A_PM1_RX_INT_CAUSE,
1855 A_MPS_INT_CAUSE,
1856 A_T3DBG_INT_CAUSE,
1857 };
1858 unsigned int i;
1859
1860 /* Clear PHY and MAC interrupts for each port. */
1861 for_each_port(adapter, i)
1862 t3_port_intr_clear(adapter, i);
1863
1864 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1865 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1866
Divy Le Ray3eea3332007-09-05 15:58:15 -07001867 if (is_pcie(adapter))
1868 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001869 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1870 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1871}
1872
1873/**
1874 * t3_port_intr_enable - enable port-specific interrupts
1875 * @adapter: associated adapter
1876 * @idx: index of port whose interrupts should be enabled
1877 *
1878 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1879 * adapter port.
1880 */
1881void t3_port_intr_enable(struct adapter *adapter, int idx)
1882{
1883 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1884
1885 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1886 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1887 phy->ops->intr_enable(phy);
1888}
1889
1890/**
1891 * t3_port_intr_disable - disable port-specific interrupts
1892 * @adapter: associated adapter
1893 * @idx: index of port whose interrupts should be disabled
1894 *
1895 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1896 * adapter port.
1897 */
1898void t3_port_intr_disable(struct adapter *adapter, int idx)
1899{
1900 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1901
1902 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1903 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1904 phy->ops->intr_disable(phy);
1905}
1906
1907/**
1908 * t3_port_intr_clear - clear port-specific interrupts
1909 * @adapter: associated adapter
1910 * @idx: index of port whose interrupts to clear
1911 *
1912 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1913 * adapter port.
1914 */
1915void t3_port_intr_clear(struct adapter *adapter, int idx)
1916{
1917 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1918
1919 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1920 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1921 phy->ops->intr_clear(phy);
1922}
1923
Divy Le Raybb9366a2007-09-05 15:58:30 -07001924#define SG_CONTEXT_CMD_ATTEMPTS 100
1925
Divy Le Ray4d22de32007-01-18 22:04:14 -05001926/**
1927 * t3_sge_write_context - write an SGE context
1928 * @adapter: the adapter
1929 * @id: the context id
1930 * @type: the context type
1931 *
1932 * Program an SGE context with the values already loaded in the
1933 * CONTEXT_DATA? registers.
1934 */
1935static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1936 unsigned int type)
1937{
1938 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1939 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1940 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1941 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1942 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1943 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1944 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07001945 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001946}
1947
Divy Le Rayb8819552007-12-17 18:47:31 -08001948static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1949 unsigned int type)
1950{
1951 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1952 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1953 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1954 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1955 return t3_sge_write_context(adap, id, type);
1956}
1957
Divy Le Ray4d22de32007-01-18 22:04:14 -05001958/**
1959 * t3_sge_init_ecntxt - initialize an SGE egress context
1960 * @adapter: the adapter to configure
1961 * @id: the context id
1962 * @gts_enable: whether to enable GTS for the context
1963 * @type: the egress context type
1964 * @respq: associated response queue
1965 * @base_addr: base address of queue
1966 * @size: number of queue entries
1967 * @token: uP token
1968 * @gen: initial generation value for the context
1969 * @cidx: consumer pointer
1970 *
1971 * Initialize an SGE egress context and make it ready for use. If the
1972 * platform allows concurrent context operations, the caller is
1973 * responsible for appropriate locking.
1974 */
1975int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1976 enum sge_context_type type, int respq, u64 base_addr,
1977 unsigned int size, unsigned int token, int gen,
1978 unsigned int cidx)
1979{
1980 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1981
1982 if (base_addr & 0xfff) /* must be 4K aligned */
1983 return -EINVAL;
1984 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1985 return -EBUSY;
1986
1987 base_addr >>= 12;
1988 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1989 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1990 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1991 V_EC_BASE_LO(base_addr & 0xffff));
1992 base_addr >>= 16;
1993 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1994 base_addr >>= 32;
1995 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1996 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1997 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1998 F_EC_VALID);
1999 return t3_sge_write_context(adapter, id, F_EGRESS);
2000}
2001
2002/**
2003 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2004 * @adapter: the adapter to configure
2005 * @id: the context id
2006 * @gts_enable: whether to enable GTS for the context
2007 * @base_addr: base address of queue
2008 * @size: number of queue entries
2009 * @bsize: size of each buffer for this queue
2010 * @cong_thres: threshold to signal congestion to upstream producers
2011 * @gen: initial generation value for the context
2012 * @cidx: consumer pointer
2013 *
2014 * Initialize an SGE free list context and make it ready for use. The
2015 * caller is responsible for ensuring only one context operation occurs
2016 * at a time.
2017 */
2018int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2019 int gts_enable, u64 base_addr, unsigned int size,
2020 unsigned int bsize, unsigned int cong_thres, int gen,
2021 unsigned int cidx)
2022{
2023 if (base_addr & 0xfff) /* must be 4K aligned */
2024 return -EINVAL;
2025 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2026 return -EBUSY;
2027
2028 base_addr >>= 12;
2029 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2030 base_addr >>= 32;
2031 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2032 V_FL_BASE_HI((u32) base_addr) |
2033 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2034 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2035 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2036 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2037 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2038 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2039 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2040 return t3_sge_write_context(adapter, id, F_FREELIST);
2041}
2042
2043/**
2044 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2045 * @adapter: the adapter to configure
2046 * @id: the context id
2047 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2048 * @base_addr: base address of queue
2049 * @size: number of queue entries
2050 * @fl_thres: threshold for selecting the normal or jumbo free list
2051 * @gen: initial generation value for the context
2052 * @cidx: consumer pointer
2053 *
2054 * Initialize an SGE response queue context and make it ready for use.
2055 * The caller is responsible for ensuring only one context operation
2056 * occurs at a time.
2057 */
2058int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2059 int irq_vec_idx, u64 base_addr, unsigned int size,
2060 unsigned int fl_thres, int gen, unsigned int cidx)
2061{
2062 unsigned int intr = 0;
2063
2064 if (base_addr & 0xfff) /* must be 4K aligned */
2065 return -EINVAL;
2066 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2067 return -EBUSY;
2068
2069 base_addr >>= 12;
2070 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2071 V_CQ_INDEX(cidx));
2072 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2073 base_addr >>= 32;
2074 if (irq_vec_idx >= 0)
2075 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2076 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2077 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2078 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2079 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2080}
2081
2082/**
2083 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2084 * @adapter: the adapter to configure
2085 * @id: the context id
2086 * @base_addr: base address of queue
2087 * @size: number of queue entries
2088 * @rspq: response queue for async notifications
2089 * @ovfl_mode: CQ overflow mode
2090 * @credits: completion queue credits
2091 * @credit_thres: the credit threshold
2092 *
2093 * Initialize an SGE completion queue context and make it ready for use.
2094 * The caller is responsible for ensuring only one context operation
2095 * occurs at a time.
2096 */
2097int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2098 unsigned int size, int rspq, int ovfl_mode,
2099 unsigned int credits, unsigned int credit_thres)
2100{
2101 if (base_addr & 0xfff) /* must be 4K aligned */
2102 return -EINVAL;
2103 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2104 return -EBUSY;
2105
2106 base_addr >>= 12;
2107 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2108 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2109 base_addr >>= 32;
2110 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2111 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
Divy Le Ray1c17ae82007-09-05 15:58:25 -07002112 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2113 V_CQ_ERR(ovfl_mode));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002114 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2115 V_CQ_CREDIT_THRES(credit_thres));
2116 return t3_sge_write_context(adapter, id, F_CQ);
2117}
2118
2119/**
2120 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2121 * @adapter: the adapter
2122 * @id: the egress context id
2123 * @enable: enable (1) or disable (0) the context
2124 *
2125 * Enable or disable an SGE egress context. The caller is responsible for
2126 * ensuring only one context operation occurs at a time.
2127 */
2128int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2129{
2130 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2131 return -EBUSY;
2132
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2138 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2139 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2140 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002141 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002142}
2143
2144/**
2145 * t3_sge_disable_fl - disable an SGE free-buffer list
2146 * @adapter: the adapter
2147 * @id: the free list context id
2148 *
2149 * Disable an SGE free-buffer list. The caller is responsible for
2150 * ensuring only one context operation occurs at a time.
2151 */
2152int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2153{
2154 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2155 return -EBUSY;
2156
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2162 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2163 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2164 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002165 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002166}
2167
2168/**
2169 * t3_sge_disable_rspcntxt - disable an SGE response queue
2170 * @adapter: the adapter
2171 * @id: the response queue context id
2172 *
2173 * Disable an SGE response queue. The caller is responsible for
2174 * ensuring only one context operation occurs at a time.
2175 */
2176int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2177{
2178 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2179 return -EBUSY;
2180
2181 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2182 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2184 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2185 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2186 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2187 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2188 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002189 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002190}
2191
2192/**
2193 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2194 * @adapter: the adapter
2195 * @id: the completion queue context id
2196 *
2197 * Disable an SGE completion queue. The caller is responsible for
2198 * ensuring only one context operation occurs at a time.
2199 */
2200int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2201{
2202 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2203 return -EBUSY;
2204
2205 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2206 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2208 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2210 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2211 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2212 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002213 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002214}
2215
2216/**
2217 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2218 * @adapter: the adapter
2219 * @id: the context id
2220 * @op: the operation to perform
2221 *
2222 * Perform the selected operation on an SGE completion queue context.
2223 * The caller is responsible for ensuring only one context operation
2224 * occurs at a time.
2225 */
2226int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2227 unsigned int credits)
2228{
2229 u32 val;
2230
2231 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2232 return -EBUSY;
2233
2234 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2235 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2236 V_CONTEXT(id) | F_CQ);
2237 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002238 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002239 return -EIO;
2240
2241 if (op >= 2 && op < 7) {
2242 if (adapter->params.rev > 0)
2243 return G_CQ_INDEX(val);
2244
2245 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2246 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2247 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002248 F_CONTEXT_CMD_BUSY, 0,
2249 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002250 return -EIO;
2251 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2252 }
2253 return 0;
2254}
2255
2256/**
2257 * t3_sge_read_context - read an SGE context
2258 * @type: the context type
2259 * @adapter: the adapter
2260 * @id: the context id
2261 * @data: holds the retrieved context
2262 *
2263 * Read an SGE egress context. The caller is responsible for ensuring
2264 * only one context operation occurs at a time.
2265 */
2266static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2267 unsigned int id, u32 data[4])
2268{
2269 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2270 return -EBUSY;
2271
2272 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2273 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2274 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
Divy Le Raybb9366a2007-09-05 15:58:30 -07002275 SG_CONTEXT_CMD_ATTEMPTS, 1))
Divy Le Ray4d22de32007-01-18 22:04:14 -05002276 return -EIO;
2277 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2278 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2279 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2280 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2281 return 0;
2282}
2283
2284/**
2285 * t3_sge_read_ecntxt - read an SGE egress context
2286 * @adapter: the adapter
2287 * @id: the context id
2288 * @data: holds the retrieved context
2289 *
2290 * Read an SGE egress context. The caller is responsible for ensuring
2291 * only one context operation occurs at a time.
2292 */
2293int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2294{
2295 if (id >= 65536)
2296 return -EINVAL;
2297 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2298}
2299
2300/**
2301 * t3_sge_read_cq - read an SGE CQ context
2302 * @adapter: the adapter
2303 * @id: the context id
2304 * @data: holds the retrieved context
2305 *
2306 * Read an SGE CQ context. The caller is responsible for ensuring
2307 * only one context operation occurs at a time.
2308 */
2309int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2310{
2311 if (id >= 65536)
2312 return -EINVAL;
2313 return t3_sge_read_context(F_CQ, adapter, id, data);
2314}
2315
2316/**
2317 * t3_sge_read_fl - read an SGE free-list context
2318 * @adapter: the adapter
2319 * @id: the context id
2320 * @data: holds the retrieved context
2321 *
2322 * Read an SGE free-list context. The caller is responsible for ensuring
2323 * only one context operation occurs at a time.
2324 */
2325int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2326{
2327 if (id >= SGE_QSETS * 2)
2328 return -EINVAL;
2329 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2330}
2331
2332/**
2333 * t3_sge_read_rspq - read an SGE response queue context
2334 * @adapter: the adapter
2335 * @id: the context id
2336 * @data: holds the retrieved context
2337 *
2338 * Read an SGE response queue context. The caller is responsible for
2339 * ensuring only one context operation occurs at a time.
2340 */
2341int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2342{
2343 if (id >= SGE_QSETS)
2344 return -EINVAL;
2345 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2346}
2347
2348/**
2349 * t3_config_rss - configure Rx packet steering
2350 * @adapter: the adapter
2351 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2352 * @cpus: values for the CPU lookup table (0xff terminated)
2353 * @rspq: values for the response queue lookup table (0xffff terminated)
2354 *
2355 * Programs the receive packet steering logic. @cpus and @rspq provide
2356 * the values for the CPU and response queue lookup tables. If they
2357 * provide fewer values than the size of the tables the supplied values
2358 * are used repeatedly until the tables are fully populated.
2359 */
2360void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2361 const u8 * cpus, const u16 *rspq)
2362{
2363 int i, j, cpu_idx = 0, q_idx = 0;
2364
2365 if (cpus)
2366 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2367 u32 val = i << 16;
2368
2369 for (j = 0; j < 2; ++j) {
2370 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2371 if (cpus[cpu_idx] == 0xff)
2372 cpu_idx = 0;
2373 }
2374 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2375 }
2376
2377 if (rspq)
2378 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2379 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2380 (i << 16) | rspq[q_idx++]);
2381 if (rspq[q_idx] == 0xffff)
2382 q_idx = 0;
2383 }
2384
2385 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2386}
2387
2388/**
2389 * t3_read_rss - read the contents of the RSS tables
2390 * @adapter: the adapter
2391 * @lkup: holds the contents of the RSS lookup table
2392 * @map: holds the contents of the RSS map table
2393 *
2394 * Reads the contents of the receive packet steering tables.
2395 */
2396int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2397{
2398 int i;
2399 u32 val;
2400
2401 if (lkup)
2402 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2403 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2404 0xffff0000 | i);
2405 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2406 if (!(val & 0x80000000))
2407 return -EAGAIN;
2408 *lkup++ = val;
2409 *lkup++ = (val >> 8);
2410 }
2411
2412 if (map)
2413 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2414 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2415 0xffff0000 | i);
2416 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2417 if (!(val & 0x80000000))
2418 return -EAGAIN;
2419 *map++ = val;
2420 }
2421 return 0;
2422}
2423
2424/**
2425 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2426 * @adap: the adapter
2427 * @enable: 1 to select offload mode, 0 for regular NIC
2428 *
2429 * Switches TP to NIC/offload mode.
2430 */
2431void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2432{
2433 if (is_offload(adap) || !enable)
2434 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2435 V_NICMODE(!enable));
2436}
2437
2438/**
2439 * pm_num_pages - calculate the number of pages of the payload memory
2440 * @mem_size: the size of the payload memory
2441 * @pg_size: the size of each payload memory page
2442 *
2443 * Calculate the number of pages, each of the given size, that fit in a
2444 * memory of the specified size, respecting the HW requirement that the
2445 * number of pages must be a multiple of 24.
2446 */
2447static inline unsigned int pm_num_pages(unsigned int mem_size,
2448 unsigned int pg_size)
2449{
2450 unsigned int n = mem_size / pg_size;
2451
2452 return n - n % 24;
2453}
2454
2455#define mem_region(adap, start, size, reg) \
2456 t3_write_reg((adap), A_ ## reg, (start)); \
2457 start += size
2458
Divy Le Rayb8819552007-12-17 18:47:31 -08002459/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05002460 * partition_mem - partition memory and configure TP memory settings
2461 * @adap: the adapter
2462 * @p: the TP parameters
2463 *
2464 * Partitions context and payload memory and configures TP's memory
2465 * registers.
2466 */
2467static void partition_mem(struct adapter *adap, const struct tp_params *p)
2468{
2469 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2470 unsigned int timers = 0, timers_shift = 22;
2471
2472 if (adap->params.rev > 0) {
2473 if (tids <= 16 * 1024) {
2474 timers = 1;
2475 timers_shift = 16;
2476 } else if (tids <= 64 * 1024) {
2477 timers = 2;
2478 timers_shift = 18;
2479 } else if (tids <= 256 * 1024) {
2480 timers = 3;
2481 timers_shift = 20;
2482 }
2483 }
2484
2485 t3_write_reg(adap, A_TP_PMM_SIZE,
2486 p->chan_rx_size | (p->chan_tx_size >> 16));
2487
2488 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2489 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2490 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2491 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2492 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2493
2494 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2495 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2496 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2497
2498 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2499 /* Add a bit of headroom and make multiple of 24 */
2500 pstructs += 48;
2501 pstructs -= pstructs % 24;
2502 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2503
2504 m = tids * TCB_SIZE;
2505 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2506 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2507 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2508 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2509 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2510 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2511 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2512 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2513
2514 m = (m + 4095) & ~0xfff;
2515 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2516 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2517
2518 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2519 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2520 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2521 if (tids < m)
2522 adap->params.mc5.nservers += m - tids;
2523}
2524
2525static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2526 u32 val)
2527{
2528 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2529 t3_write_reg(adap, A_TP_PIO_DATA, val);
2530}
2531
2532static void tp_config(struct adapter *adap, const struct tp_params *p)
2533{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002534 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2535 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2536 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2537 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2538 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002539 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002540 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2541 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2542 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2543 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
Divy Le Rayb8819552007-12-17 18:47:31 -08002544 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
Divy Le Ray4d22de32007-01-18 22:04:14 -05002545 F_IPV6ENABLE | F_NICMODE);
2546 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2547 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002548 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2549 adap->params.rev > 0 ? F_ENABLEESND :
2550 F_T3A_ENABLEESND);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002551
Divy Le Ray3b1d3072007-01-30 19:44:07 -08002552 t3_set_reg_field(adap, A_TP_PC_CONFIG,
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002553 F_ENABLEEPCMDAFULL,
2554 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2555 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
Divy Le Rayb8819552007-12-17 18:47:31 -08002556 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2557 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2558 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002559 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2560 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002561
Divy Le Ray4d22de32007-01-18 22:04:14 -05002562 if (adap->params.rev > 0) {
2563 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2564 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2565 F_TXPACEAUTO);
2566 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2567 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2568 } else
2569 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2570
Divy Le Raya2604be2007-11-16 11:22:16 -08002571 if (adap->params.rev == T3_REV_C)
2572 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2573 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2574 V_TABLELATENCYDELTA(4));
2575
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002576 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2577 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2578 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2579 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002580}
2581
2582/* Desired TP timer resolution in usec */
2583#define TP_TMR_RES 50
2584
2585/* TCP timer values in ms */
2586#define TP_DACK_TIMER 50
2587#define TP_RTO_MIN 250
2588
2589/**
2590 * tp_set_timers - set TP timing parameters
2591 * @adap: the adapter to set
2592 * @core_clk: the core clock frequency in Hz
2593 *
2594 * Set TP's timing parameters, such as the various timer resolutions and
2595 * the TCP timer values.
2596 */
2597static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2598{
2599 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2600 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2601 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2602 unsigned int tps = core_clk >> tre;
2603
2604 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2605 V_DELAYEDACKRESOLUTION(dack_re) |
2606 V_TIMESTAMPRESOLUTION(tstamp_re));
2607 t3_write_reg(adap, A_TP_DACK_TIMER,
2608 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2609 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2610 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2611 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2612 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2613 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2614 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2615 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2616 V_KEEPALIVEMAX(9));
2617
2618#define SECONDS * tps
2619
2620 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2621 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2622 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2623 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2624 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2625 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2626 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2627 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2628 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2629
2630#undef SECONDS
2631}
2632
2633/**
2634 * t3_tp_set_coalescing_size - set receive coalescing size
2635 * @adap: the adapter
2636 * @size: the receive coalescing size
2637 * @psh: whether a set PSH bit should deliver coalesced data
2638 *
2639 * Set the receive coalescing size and PSH bit handling.
2640 */
2641int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2642{
2643 u32 val;
2644
2645 if (size > MAX_RX_COALESCING_LEN)
2646 return -EINVAL;
2647
2648 val = t3_read_reg(adap, A_TP_PARA_REG3);
2649 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2650
2651 if (size) {
2652 val |= F_RXCOALESCEENABLE;
2653 if (psh)
2654 val |= F_RXCOALESCEPSHEN;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002655 size = min(MAX_RX_COALESCING_LEN, size);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002656 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2657 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2658 }
2659 t3_write_reg(adap, A_TP_PARA_REG3, val);
2660 return 0;
2661}
2662
2663/**
2664 * t3_tp_set_max_rxsize - set the max receive size
2665 * @adap: the adapter
2666 * @size: the max receive size
2667 *
2668 * Set TP's max receive size. This is the limit that applies when
2669 * receive coalescing is disabled.
2670 */
2671void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2672{
2673 t3_write_reg(adap, A_TP_PARA_REG7,
2674 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2675}
2676
2677static void __devinit init_mtus(unsigned short mtus[])
2678{
2679 /*
2680 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2681 * it can accomodate max size TCP/IP headers when SACK and timestamps
2682 * are enabled and still have at least 8 bytes of payload.
2683 */
Divy Le Ray75758e82007-12-05 10:15:01 -08002684 mtus[0] = 88;
Divy Le Ray8a9fab22007-05-30 21:10:52 -07002685 mtus[1] = 88;
2686 mtus[2] = 256;
2687 mtus[3] = 512;
2688 mtus[4] = 576;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002689 mtus[5] = 1024;
2690 mtus[6] = 1280;
2691 mtus[7] = 1492;
2692 mtus[8] = 1500;
2693 mtus[9] = 2002;
2694 mtus[10] = 2048;
2695 mtus[11] = 4096;
2696 mtus[12] = 4352;
2697 mtus[13] = 8192;
2698 mtus[14] = 9000;
2699 mtus[15] = 9600;
2700}
2701
2702/*
2703 * Initial congestion control parameters.
2704 */
2705static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2706{
2707 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2708 a[9] = 2;
2709 a[10] = 3;
2710 a[11] = 4;
2711 a[12] = 5;
2712 a[13] = 6;
2713 a[14] = 7;
2714 a[15] = 8;
2715 a[16] = 9;
2716 a[17] = 10;
2717 a[18] = 14;
2718 a[19] = 17;
2719 a[20] = 21;
2720 a[21] = 25;
2721 a[22] = 30;
2722 a[23] = 35;
2723 a[24] = 45;
2724 a[25] = 60;
2725 a[26] = 80;
2726 a[27] = 100;
2727 a[28] = 200;
2728 a[29] = 300;
2729 a[30] = 400;
2730 a[31] = 500;
2731
2732 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2733 b[9] = b[10] = 1;
2734 b[11] = b[12] = 2;
2735 b[13] = b[14] = b[15] = b[16] = 3;
2736 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2737 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2738 b[28] = b[29] = 6;
2739 b[30] = b[31] = 7;
2740}
2741
2742/* The minimum additive increment value for the congestion control table */
2743#define CC_MIN_INCR 2U
2744
2745/**
2746 * t3_load_mtus - write the MTU and congestion control HW tables
2747 * @adap: the adapter
2748 * @mtus: the unrestricted values for the MTU table
2749 * @alphs: the values for the congestion control alpha parameter
2750 * @beta: the values for the congestion control beta parameter
2751 * @mtu_cap: the maximum permitted effective MTU
2752 *
2753 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2754 * Update the high-speed congestion control table with the supplied alpha,
2755 * beta, and MTUs.
2756 */
2757void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2758 unsigned short alpha[NCCTRL_WIN],
2759 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2760{
2761 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2762 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2763 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2764 28672, 40960, 57344, 81920, 114688, 163840, 229376
2765 };
2766
2767 unsigned int i, w;
2768
2769 for (i = 0; i < NMTUS; ++i) {
2770 unsigned int mtu = min(mtus[i], mtu_cap);
2771 unsigned int log2 = fls(mtu);
2772
2773 if (!(mtu & ((1 << log2) >> 2))) /* round */
2774 log2--;
2775 t3_write_reg(adap, A_TP_MTU_TABLE,
2776 (i << 24) | (log2 << 16) | mtu);
2777
2778 for (w = 0; w < NCCTRL_WIN; ++w) {
2779 unsigned int inc;
2780
2781 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2782 CC_MIN_INCR);
2783
2784 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2785 (w << 16) | (beta[w] << 13) | inc);
2786 }
2787 }
2788}
2789
2790/**
2791 * t3_read_hw_mtus - returns the values in the HW MTU table
2792 * @adap: the adapter
2793 * @mtus: where to store the HW MTU values
2794 *
2795 * Reads the HW MTU table.
2796 */
2797void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2798{
2799 int i;
2800
2801 for (i = 0; i < NMTUS; ++i) {
2802 unsigned int val;
2803
2804 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2805 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2806 mtus[i] = val & 0x3fff;
2807 }
2808}
2809
2810/**
2811 * t3_get_cong_cntl_tab - reads the congestion control table
2812 * @adap: the adapter
2813 * @incr: where to store the alpha values
2814 *
2815 * Reads the additive increments programmed into the HW congestion
2816 * control table.
2817 */
2818void t3_get_cong_cntl_tab(struct adapter *adap,
2819 unsigned short incr[NMTUS][NCCTRL_WIN])
2820{
2821 unsigned int mtu, w;
2822
2823 for (mtu = 0; mtu < NMTUS; ++mtu)
2824 for (w = 0; w < NCCTRL_WIN; ++w) {
2825 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2826 0xffff0000 | (mtu << 5) | w);
2827 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2828 0x1fff;
2829 }
2830}
2831
2832/**
2833 * t3_tp_get_mib_stats - read TP's MIB counters
2834 * @adap: the adapter
2835 * @tps: holds the returned counter values
2836 *
2837 * Returns the values of TP's MIB counters.
2838 */
2839void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2840{
2841 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2842 sizeof(*tps) / sizeof(u32), 0);
2843}
2844
2845#define ulp_region(adap, name, start, len) \
2846 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2847 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2848 (start) + (len) - 1); \
2849 start += len
2850
2851#define ulptx_region(adap, name, start, len) \
2852 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2853 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2854 (start) + (len) - 1)
2855
2856static void ulp_config(struct adapter *adap, const struct tp_params *p)
2857{
2858 unsigned int m = p->chan_rx_size;
2859
2860 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2861 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2862 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2863 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2864 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2865 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2866 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2867 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2868}
2869
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002870/**
2871 * t3_set_proto_sram - set the contents of the protocol sram
2872 * @adapter: the adapter
2873 * @data: the protocol image
2874 *
2875 * Write the contents of the protocol SRAM.
2876 */
2877int t3_set_proto_sram(struct adapter *adap, u8 *data)
2878{
2879 int i;
2880 u32 *buf = (u32 *)data;
2881
2882 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2883 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2884 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2885 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2886 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2887 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
Jeff Garzik2eab17a2007-11-23 21:59:45 -05002888
Divy Le Ray480fe1a2007-05-30 21:10:58 -07002889 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2890 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2891 return -EIO;
2892 }
2893 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2894
2895 return 0;
2896}
2897
Divy Le Ray4d22de32007-01-18 22:04:14 -05002898void t3_config_trace_filter(struct adapter *adapter,
2899 const struct trace_params *tp, int filter_index,
2900 int invert, int enable)
2901{
2902 u32 addr, key[4], mask[4];
2903
2904 key[0] = tp->sport | (tp->sip << 16);
2905 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2906 key[2] = tp->dip;
2907 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2908
2909 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2910 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2911 mask[2] = tp->dip_mask;
2912 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2913
2914 if (invert)
2915 key[3] |= (1 << 29);
2916 if (enable)
2917 key[3] |= (1 << 28);
2918
2919 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2920 tp_wr_indirect(adapter, addr++, key[0]);
2921 tp_wr_indirect(adapter, addr++, mask[0]);
2922 tp_wr_indirect(adapter, addr++, key[1]);
2923 tp_wr_indirect(adapter, addr++, mask[1]);
2924 tp_wr_indirect(adapter, addr++, key[2]);
2925 tp_wr_indirect(adapter, addr++, mask[2]);
2926 tp_wr_indirect(adapter, addr++, key[3]);
2927 tp_wr_indirect(adapter, addr, mask[3]);
2928 t3_read_reg(adapter, A_TP_PIO_DATA);
2929}
2930
2931/**
2932 * t3_config_sched - configure a HW traffic scheduler
2933 * @adap: the adapter
2934 * @kbps: target rate in Kbps
2935 * @sched: the scheduler index
2936 *
2937 * Configure a HW scheduler for the target rate
2938 */
2939int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2940{
2941 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2942 unsigned int clk = adap->params.vpd.cclk * 1000;
2943 unsigned int selected_cpt = 0, selected_bpt = 0;
2944
2945 if (kbps > 0) {
2946 kbps *= 125; /* -> bytes */
2947 for (cpt = 1; cpt <= 255; cpt++) {
2948 tps = clk / cpt;
2949 bpt = (kbps + tps / 2) / tps;
2950 if (bpt > 0 && bpt <= 255) {
2951 v = bpt * tps;
2952 delta = v >= kbps ? v - kbps : kbps - v;
2953 if (delta <= mindelta) {
2954 mindelta = delta;
2955 selected_cpt = cpt;
2956 selected_bpt = bpt;
2957 }
2958 } else if (selected_cpt)
2959 break;
2960 }
2961 if (!selected_cpt)
2962 return -EINVAL;
2963 }
2964 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2965 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2966 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2967 if (sched & 1)
2968 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2969 else
2970 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2971 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2972 return 0;
2973}
2974
2975static int tp_init(struct adapter *adap, const struct tp_params *p)
2976{
2977 int busy = 0;
2978
2979 tp_config(adap, p);
2980 t3_set_vlan_accel(adap, 3, 0);
2981
2982 if (is_offload(adap)) {
2983 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2984 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2985 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2986 0, 1000, 5);
2987 if (busy)
2988 CH_ERR(adap, "TP initialization timed out\n");
2989 }
2990
2991 if (!busy)
2992 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2993 return busy;
2994}
2995
2996int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2997{
2998 if (port_mask & ~((1 << adap->params.nports) - 1))
2999 return -EINVAL;
3000 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3001 port_mask << S_PORT0ACTIVE);
3002 return 0;
3003}
3004
3005/*
3006 * Perform the bits of HW initialization that are dependent on the number
3007 * of available ports.
3008 */
3009static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3010{
3011 int i;
3012
3013 if (nports == 1) {
3014 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3015 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3016 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3017 F_PORT0ACTIVE | F_ENFORCEPKT);
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003018 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003019 } else {
3020 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3021 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3022 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3023 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3024 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3025 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3026 F_ENFORCEPKT);
3027 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3028 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3029 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3030 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3031 for (i = 0; i < 16; i++)
3032 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3033 (i << 16) | 0x1010);
3034 }
3035}
3036
3037static int calibrate_xgm(struct adapter *adapter)
3038{
3039 if (uses_xaui(adapter)) {
3040 unsigned int v, i;
3041
3042 for (i = 0; i < 5; ++i) {
3043 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3044 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3045 msleep(1);
3046 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3047 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3048 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3049 V_XAUIIMP(G_CALIMP(v) >> 2));
3050 return 0;
3051 }
3052 }
3053 CH_ERR(adapter, "MAC calibration failed\n");
3054 return -1;
3055 } else {
3056 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3057 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3058 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3059 F_XGM_IMPSETUPDATE);
3060 }
3061 return 0;
3062}
3063
3064static void calibrate_xgm_t3b(struct adapter *adapter)
3065{
3066 if (!uses_xaui(adapter)) {
3067 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3068 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3069 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3070 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3071 F_XGM_IMPSETUPDATE);
3072 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3073 0);
3074 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3075 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3076 }
3077}
3078
3079struct mc7_timing_params {
3080 unsigned char ActToPreDly;
3081 unsigned char ActToRdWrDly;
3082 unsigned char PreCyc;
3083 unsigned char RefCyc[5];
3084 unsigned char BkCyc;
3085 unsigned char WrToRdDly;
3086 unsigned char RdToWrDly;
3087};
3088
3089/*
3090 * Write a value to a register and check that the write completed. These
3091 * writes normally complete in a cycle or two, so one read should suffice.
3092 * The very first read exists to flush the posted write to the device.
3093 */
3094static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3095{
3096 t3_write_reg(adapter, addr, val);
3097 t3_read_reg(adapter, addr); /* flush */
3098 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3099 return 0;
3100 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3101 return -EIO;
3102}
3103
3104static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3105{
3106 static const unsigned int mc7_mode[] = {
3107 0x632, 0x642, 0x652, 0x432, 0x442
3108 };
3109 static const struct mc7_timing_params mc7_timings[] = {
3110 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3111 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3112 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3113 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3114 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3115 };
3116
3117 u32 val;
3118 unsigned int width, density, slow, attempts;
3119 struct adapter *adapter = mc7->adapter;
3120 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3121
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003122 if (!mc7->size)
3123 return 0;
3124
Divy Le Ray4d22de32007-01-18 22:04:14 -05003125 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3126 slow = val & F_SLOW;
3127 width = G_WIDTH(val);
3128 density = G_DEN(val);
3129
3130 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3131 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3132 msleep(1);
3133
3134 if (!slow) {
3135 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3136 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3137 msleep(1);
3138 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3139 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3140 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3141 mc7->name);
3142 goto out_fail;
3143 }
3144 }
3145
3146 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3147 V_ACTTOPREDLY(p->ActToPreDly) |
3148 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3149 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3150 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3151
3152 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3153 val | F_CLKEN | F_TERM150);
3154 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3155
3156 if (!slow)
3157 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3158 F_DLLENB);
3159 udelay(1);
3160
3161 val = slow ? 3 : 6;
3162 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3163 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3164 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3165 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3166 goto out_fail;
3167
3168 if (!slow) {
3169 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3170 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3171 udelay(5);
3172 }
3173
3174 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3175 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3176 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3177 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3178 mc7_mode[mem_type]) ||
3179 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3180 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3181 goto out_fail;
3182
3183 /* clock value is in KHz */
3184 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3185 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3186
3187 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3188 F_PERREFEN | V_PREREFDIV(mc7_clock));
3189 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3190
3191 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3192 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3193 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3194 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3195 (mc7->size << width) - 1);
3196 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3197 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3198
3199 attempts = 50;
3200 do {
3201 msleep(250);
3202 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3203 } while ((val & F_BUSY) && --attempts);
3204 if (val & F_BUSY) {
3205 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3206 goto out_fail;
3207 }
3208
3209 /* Enable normal memory accesses. */
3210 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3211 return 0;
3212
3213out_fail:
3214 return -1;
3215}
3216
3217static void config_pcie(struct adapter *adap)
3218{
3219 static const u16 ack_lat[4][6] = {
3220 {237, 416, 559, 1071, 2095, 4143},
3221 {128, 217, 289, 545, 1057, 2081},
3222 {73, 118, 154, 282, 538, 1050},
3223 {67, 107, 86, 150, 278, 534}
3224 };
3225 static const u16 rpl_tmr[4][6] = {
3226 {711, 1248, 1677, 3213, 6285, 12429},
3227 {384, 651, 867, 1635, 3171, 6243},
3228 {219, 354, 462, 846, 1614, 3150},
3229 {201, 321, 258, 450, 834, 1602}
3230 };
3231
3232 u16 val;
3233 unsigned int log2_width, pldsize;
3234 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3235
3236 pci_read_config_word(adap->pdev,
3237 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3238 &val);
3239 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3240 pci_read_config_word(adap->pdev,
3241 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3242 &val);
3243
3244 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3245 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3246 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3247 log2_width = fls(adap->params.pci.width) - 1;
3248 acklat = ack_lat[log2_width][pldsize];
3249 if (val & 1) /* check LOsEnable */
3250 acklat += fst_trn_tx * 4;
3251 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3252
3253 if (adap->params.rev == 0)
3254 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3255 V_T3A_ACKLAT(M_T3A_ACKLAT),
3256 V_T3A_ACKLAT(acklat));
3257 else
3258 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3259 V_ACKLAT(acklat));
3260
3261 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3262 V_REPLAYLMT(rpllmt));
3263
3264 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
Divy Le Rayb8819552007-12-17 18:47:31 -08003265 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3266 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003267}
3268
3269/*
3270 * Initialize and configure T3 HW modules. This performs the
3271 * initialization steps that need to be done once after a card is reset.
3272 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3273 *
3274 * fw_params are passed to FW and their value is platform dependent. Only the
3275 * top 8 bits are available for use, the rest must be 0.
3276 */
3277int t3_init_hw(struct adapter *adapter, u32 fw_params)
3278{
Divy Le Rayb8819552007-12-17 18:47:31 -08003279 int err = -EIO, attempts, i;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003280 const struct vpd_params *vpd = &adapter->params.vpd;
3281
3282 if (adapter->params.rev > 0)
3283 calibrate_xgm_t3b(adapter);
3284 else if (calibrate_xgm(adapter))
3285 goto out_err;
3286
3287 if (vpd->mclk) {
3288 partition_mem(adapter, &adapter->params.tp);
3289
3290 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3291 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3292 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3293 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3294 adapter->params.mc5.nfilters,
3295 adapter->params.mc5.nroutes))
3296 goto out_err;
Divy Le Rayb8819552007-12-17 18:47:31 -08003297
3298 for (i = 0; i < 32; i++)
3299 if (clear_sge_ctxt(adapter, i, F_CQ))
3300 goto out_err;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003301 }
3302
3303 if (tp_init(adapter, &adapter->params.tp))
3304 goto out_err;
3305
3306 t3_tp_set_coalescing_size(adapter,
3307 min(adapter->params.sge.max_pkt_size,
3308 MAX_RX_COALESCING_LEN), 1);
3309 t3_tp_set_max_rxsize(adapter,
3310 min(adapter->params.sge.max_pkt_size, 16384U));
3311 ulp_config(adapter, &adapter->params.tp);
3312
3313 if (is_pcie(adapter))
3314 config_pcie(adapter);
3315 else
Divy Le Rayb8819552007-12-17 18:47:31 -08003316 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3317 F_DMASTOPEN | F_CLIDECEN);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003318
Divy Le Raya2604be2007-11-16 11:22:16 -08003319 if (adapter->params.rev == T3_REV_C)
3320 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3321 F_CFG_CQE_SOP_MASK);
3322
Divy Le Ray8a9fab22007-05-30 21:10:52 -07003323 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
Divy Le Ray3f61e422007-08-21 20:49:41 -07003324 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3325 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003326 init_hw_for_avail_ports(adapter, adapter->params.nports);
3327 t3_sge_init(adapter, &adapter->params.sge);
3328
3329 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3330 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3331 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3332 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3333
Divy Le Rayb8819552007-12-17 18:47:31 -08003334 attempts = 100;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003335 do { /* wait for uP to initialize */
3336 msleep(20);
3337 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003338 if (!attempts) {
3339 CH_ERR(adapter, "uP initialization timed out\n");
Divy Le Ray4d22de32007-01-18 22:04:14 -05003340 goto out_err;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003341 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003342
3343 err = 0;
3344out_err:
3345 return err;
3346}
3347
3348/**
3349 * get_pci_mode - determine a card's PCI mode
3350 * @adapter: the adapter
3351 * @p: where to store the PCI settings
3352 *
3353 * Determines a card's PCI mode and associated parameters, such as speed
3354 * and width.
3355 */
3356static void __devinit get_pci_mode(struct adapter *adapter,
3357 struct pci_params *p)
3358{
3359 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3360 u32 pci_mode, pcie_cap;
3361
3362 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3363 if (pcie_cap) {
3364 u16 val;
3365
3366 p->variant = PCI_VARIANT_PCIE;
3367 p->pcie_cap_addr = pcie_cap;
3368 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3369 &val);
3370 p->width = (val >> 4) & 0x3f;
3371 return;
3372 }
3373
3374 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3375 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3376 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3377 pci_mode = G_PCIXINITPAT(pci_mode);
3378 if (pci_mode == 0)
3379 p->variant = PCI_VARIANT_PCI;
3380 else if (pci_mode < 4)
3381 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3382 else if (pci_mode < 8)
3383 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3384 else
3385 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3386}
3387
3388/**
3389 * init_link_config - initialize a link's SW state
3390 * @lc: structure holding the link state
3391 * @ai: information about the current card
3392 *
3393 * Initializes the SW state maintained for each link, including the link's
3394 * capabilities and default speed/duplex/flow-control/autonegotiation
3395 * settings.
3396 */
3397static void __devinit init_link_config(struct link_config *lc,
3398 unsigned int caps)
3399{
3400 lc->supported = caps;
3401 lc->requested_speed = lc->speed = SPEED_INVALID;
3402 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3403 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3404 if (lc->supported & SUPPORTED_Autoneg) {
3405 lc->advertising = lc->supported;
3406 lc->autoneg = AUTONEG_ENABLE;
3407 lc->requested_fc |= PAUSE_AUTONEG;
3408 } else {
3409 lc->advertising = 0;
3410 lc->autoneg = AUTONEG_DISABLE;
3411 }
3412}
3413
3414/**
3415 * mc7_calc_size - calculate MC7 memory size
3416 * @cfg: the MC7 configuration
3417 *
3418 * Calculates the size of an MC7 memory in bytes from the value of its
3419 * configuration register.
3420 */
3421static unsigned int __devinit mc7_calc_size(u32 cfg)
3422{
3423 unsigned int width = G_WIDTH(cfg);
3424 unsigned int banks = !!(cfg & F_BKS) + 1;
3425 unsigned int org = !!(cfg & F_ORG) + 1;
3426 unsigned int density = G_DEN(cfg);
3427 unsigned int MBs = ((256 << density) * banks) / (org << width);
3428
3429 return MBs << 20;
3430}
3431
3432static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3433 unsigned int base_addr, const char *name)
3434{
3435 u32 cfg;
3436
3437 mc7->adapter = adapter;
3438 mc7->name = name;
3439 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3440 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003441 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003442 mc7->width = G_WIDTH(cfg);
3443}
3444
3445void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3446{
3447 mac->adapter = adapter;
3448 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3449 mac->nucast = 1;
3450
3451 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3452 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3453 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3454 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3455 F_ENRGMII, 0);
3456 }
3457}
3458
3459void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3460{
3461 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3462
3463 mi1_init(adapter, ai);
3464 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3465 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3466 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3467 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003468 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
Divy Le Rayb8819552007-12-17 18:47:31 -08003469 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
Divy Le Ray4d22de32007-01-18 22:04:14 -05003470
3471 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3472 val |= F_ENRGMII;
3473
3474 /* Enable MAC clocks so we can access the registers */
3475 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3476 t3_read_reg(adapter, A_XGM_PORT_CFG);
3477
3478 val |= F_CLKDIVRESET_;
3479 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3480 t3_read_reg(adapter, A_XGM_PORT_CFG);
3481 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3482 t3_read_reg(adapter, A_XGM_PORT_CFG);
3483}
3484
3485/*
Jeff Garzik2eab17a2007-11-23 21:59:45 -05003486 * Reset the adapter.
Divy Le Raye4d08352007-03-18 13:10:17 -07003487 * Older PCIe cards lose their config space during reset, PCI-X
Divy Le Ray4d22de32007-01-18 22:04:14 -05003488 * ones don't.
3489 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07003490static int t3_reset_adapter(struct adapter *adapter)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003491{
Jeff Garzik2eab17a2007-11-23 21:59:45 -05003492 int i, save_and_restore_pcie =
Divy Le Raye4d08352007-03-18 13:10:17 -07003493 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003494 uint16_t devid = 0;
3495
Divy Le Raye4d08352007-03-18 13:10:17 -07003496 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003497 pci_save_state(adapter->pdev);
3498 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3499
3500 /*
3501 * Delay. Give Some time to device to reset fully.
3502 * XXX The delay time should be modified.
3503 */
3504 for (i = 0; i < 10; i++) {
3505 msleep(50);
3506 pci_read_config_word(adapter->pdev, 0x00, &devid);
3507 if (devid == 0x1425)
3508 break;
3509 }
3510
3511 if (devid != 0x1425)
3512 return -1;
3513
Divy Le Raye4d08352007-03-18 13:10:17 -07003514 if (save_and_restore_pcie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003515 pci_restore_state(adapter->pdev);
3516 return 0;
3517}
3518
Divy Le Rayb8819552007-12-17 18:47:31 -08003519static int __devinit init_parity(struct adapter *adap)
3520{
3521 int i, err, addr;
3522
3523 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3524 return -EBUSY;
3525
3526 for (err = i = 0; !err && i < 16; i++)
3527 err = clear_sge_ctxt(adap, i, F_EGRESS);
3528 for (i = 0xfff0; !err && i <= 0xffff; i++)
3529 err = clear_sge_ctxt(adap, i, F_EGRESS);
3530 for (i = 0; !err && i < SGE_QSETS; i++)
3531 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3532 if (err)
3533 return err;
3534
3535 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3536 for (i = 0; i < 4; i++)
3537 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3538 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3539 F_IBQDBGWR | V_IBQDBGQID(i) |
3540 V_IBQDBGADDR(addr));
3541 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3542 F_IBQDBGBUSY, 0, 2, 1);
3543 if (err)
3544 return err;
3545 }
3546 return 0;
3547}
3548
Divy Le Ray4d22de32007-01-18 22:04:14 -05003549/*
3550 * Initialize adapter SW state for the various HW modules, set initial values
3551 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3552 * interface.
3553 */
3554int __devinit t3_prep_adapter(struct adapter *adapter,
3555 const struct adapter_info *ai, int reset)
3556{
3557 int ret;
3558 unsigned int i, j = 0;
3559
3560 get_pci_mode(adapter, &adapter->params.pci);
3561
3562 adapter->params.info = ai;
3563 adapter->params.nports = ai->nports;
3564 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3565 adapter->params.linkpoll_period = 0;
3566 adapter->params.stats_update_period = is_10G(adapter) ?
3567 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3568 adapter->params.pci.vpd_cap_addr =
3569 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3570 ret = get_vpd_params(adapter, &adapter->params.vpd);
3571 if (ret < 0)
3572 return ret;
3573
3574 if (reset && t3_reset_adapter(adapter))
3575 return -1;
3576
3577 t3_sge_prep(adapter, &adapter->params.sge);
3578
3579 if (adapter->params.vpd.mclk) {
3580 struct tp_params *p = &adapter->params.tp;
3581
3582 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3583 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3584 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3585
3586 p->nchan = ai->nports;
3587 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3588 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3589 p->cm_size = t3_mc7_size(&adapter->cm);
3590 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3591 p->chan_tx_size = p->pmtx_size / p->nchan;
3592 p->rx_pg_size = 64 * 1024;
3593 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3594 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3595 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3596 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3597 adapter->params.rev > 0 ? 12 : 6;
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003598 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05003599
Divy Le Ray8ac3ba62007-03-31 00:23:19 -07003600 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3601 t3_mc7_size(&adapter->pmtx) &&
3602 t3_mc7_size(&adapter->cm);
3603
3604 if (is_offload(adapter)) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05003605 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3606 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3607 DEFAULT_NFILTERS : 0;
3608 adapter->params.mc5.nroutes = 0;
3609 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3610
3611 init_mtus(adapter->params.mtus);
3612 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3613 }
3614
3615 early_hw_init(adapter, ai);
Divy Le Rayb8819552007-12-17 18:47:31 -08003616 ret = init_parity(adapter);
3617 if (ret)
3618 return ret;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003619
3620 for_each_port(adapter, i) {
3621 u8 hw_addr[6];
3622 struct port_info *p = adap2pinfo(adapter, i);
3623
3624 while (!adapter->params.vpd.port_type[j])
3625 ++j;
3626
3627 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3628 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3629 ai->mdio_ops);
3630 mac_prep(&p->mac, adapter, j);
3631 ++j;
3632
3633 /*
3634 * The VPD EEPROM stores the base Ethernet address for the
3635 * card. A port's address is derived from the base by adding
3636 * the port's index to the base's low octet.
3637 */
3638 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3639 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3640
3641 memcpy(adapter->port[i]->dev_addr, hw_addr,
3642 ETH_ALEN);
3643 memcpy(adapter->port[i]->perm_addr, hw_addr,
3644 ETH_ALEN);
3645 init_link_config(&p->link_config, p->port_type->caps);
3646 p->phy.ops->power_down(&p->phy, 1);
3647 if (!(p->port_type->caps & SUPPORTED_IRQ))
3648 adapter->params.linkpoll_period = 10;
3649 }
3650
3651 return 0;
3652}
3653
3654void t3_led_ready(struct adapter *adapter)
3655{
3656 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3657 F_GPIO0_OUT_VAL);
3658}