blob: 13609bf056b018de1521099636aa49b69510cacf [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
Joe Perches005b5712010-12-14 21:36:53 +0000186 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000333#define EEPROM_STAT_ADDR 0x7bfc
334#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000335#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000336
337/**
338 * t4_seeprom_wp - enable/disable EEPROM write protection
339 * @adapter: the adapter
340 * @enable: whether to enable or disable write protection
341 *
342 * Enables or disables write protection on the serial EEPROM.
343 */
344int t4_seeprom_wp(struct adapter *adapter, bool enable)
345{
346 unsigned int v = enable ? 0xc : 0;
347 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
348 return ret < 0 ? ret : 0;
349}
350
351/**
352 * get_vpd_params - read VPD parameters from VPD EEPROM
353 * @adapter: adapter to read
354 * @p: where to store the parameters
355 *
356 * Reads card parameters stored in VPD EEPROM.
357 */
358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
359{
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000360 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000361 int ec, sn;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000362 u8 vpd[VPD_LEN], csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000363 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000364
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000366 if (ret < 0)
367 return ret;
368
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
371 return -EINVAL;
372 }
373
374 id_len = pci_vpd_lrdt_size(vpd);
375 if (id_len > ID_LEN)
376 id_len = ID_LEN;
377
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
379 if (i < 0) {
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
381 return -EINVAL;
382 }
383
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
388 return -EINVAL;
389 }
390
391#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000393 if (var < 0) { \
394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
395 return -EINVAL; \
396 } \
397 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
398} while (0)
399
400 FIND_VPD_KW(i, "RV");
401 for (csum = 0; i >= 0; i--)
402 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000403
404 if (csum) {
405 dev_err(adapter->pdev_dev,
406 "corrupted VPD EEPROM, actual csum %u\n", csum);
407 return -EINVAL;
408 }
409
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000410 FIND_VPD_KW(ec, "EC");
411 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000412#undef FIND_VPD_KW
413
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000415 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000416 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000417 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000420 strim(p->sn);
421 return 0;
422}
423
424/* serial flash and firmware constants */
425enum {
426 SF_ATTEMPTS = 10, /* max retries for SF operations */
427
428 /* flash command opcodes */
429 SF_PROG_PAGE = 2, /* program page */
430 SF_WR_DISABLE = 4, /* disable writes */
431 SF_RD_STATUS = 5, /* read status register */
432 SF_WR_ENABLE = 6, /* enable writes */
433 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000434 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000435 SF_ERASE_SECTOR = 0xd8, /* erase sector */
436
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000437 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000438};
439
440/**
441 * sf1_read - read data from the serial flash
442 * @adapter: the adapter
443 * @byte_cnt: number of bytes to read
444 * @cont: whether another operation will be chained
445 * @lock: whether to lock SF for PL access only
446 * @valp: where to store the read data
447 *
448 * Reads up to 4 bytes of data from the serial flash. The location of
449 * the read needs to be specified prior to calling this by issuing the
450 * appropriate commands to the serial flash.
451 */
452static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
453 int lock, u32 *valp)
454{
455 int ret;
456
457 if (!byte_cnt || byte_cnt > 4)
458 return -EINVAL;
459 if (t4_read_reg(adapter, SF_OP) & BUSY)
460 return -EBUSY;
461 cont = cont ? SF_CONT : 0;
462 lock = lock ? SF_LOCK : 0;
463 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
464 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
465 if (!ret)
466 *valp = t4_read_reg(adapter, SF_DATA);
467 return ret;
468}
469
470/**
471 * sf1_write - write data to the serial flash
472 * @adapter: the adapter
473 * @byte_cnt: number of bytes to write
474 * @cont: whether another operation will be chained
475 * @lock: whether to lock SF for PL access only
476 * @val: value to write
477 *
478 * Writes up to 4 bytes of data to the serial flash. The location of
479 * the write needs to be specified prior to calling this by issuing the
480 * appropriate commands to the serial flash.
481 */
482static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
483 int lock, u32 val)
484{
485 if (!byte_cnt || byte_cnt > 4)
486 return -EINVAL;
487 if (t4_read_reg(adapter, SF_OP) & BUSY)
488 return -EBUSY;
489 cont = cont ? SF_CONT : 0;
490 lock = lock ? SF_LOCK : 0;
491 t4_write_reg(adapter, SF_DATA, val);
492 t4_write_reg(adapter, SF_OP, lock |
493 cont | BYTECNT(byte_cnt - 1) | OP_WR);
494 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
495}
496
497/**
498 * flash_wait_op - wait for a flash operation to complete
499 * @adapter: the adapter
500 * @attempts: max number of polls of the status register
501 * @delay: delay between polls in ms
502 *
503 * Wait for a flash operation to complete by polling the status register.
504 */
505static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
506{
507 int ret;
508 u32 status;
509
510 while (1) {
511 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
512 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
513 return ret;
514 if (!(status & 1))
515 return 0;
516 if (--attempts == 0)
517 return -EAGAIN;
518 if (delay)
519 msleep(delay);
520 }
521}
522
523/**
524 * t4_read_flash - read words from serial flash
525 * @adapter: the adapter
526 * @addr: the start address for the read
527 * @nwords: how many 32-bit words to read
528 * @data: where to store the read data
529 * @byte_oriented: whether to store data as bytes or as words
530 *
531 * Read the specified number of 32-bit words from the serial flash.
532 * If @byte_oriented is set the read data is stored as a byte array
533 * (i.e., big-endian), otherwise as 32-bit words in the platform's
534 * natural endianess.
535 */
Roland Dreierde498c82010-04-21 08:59:17 +0000536static int t4_read_flash(struct adapter *adapter, unsigned int addr,
537 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000538{
539 int ret;
540
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000541 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000542 return -EINVAL;
543
544 addr = swab32(addr) | SF_RD_DATA_FAST;
545
546 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
547 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
548 return ret;
549
550 for ( ; nwords; nwords--, data++) {
551 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
552 if (nwords == 1)
553 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
554 if (ret)
555 return ret;
556 if (byte_oriented)
557 *data = htonl(*data);
558 }
559 return 0;
560}
561
562/**
563 * t4_write_flash - write up to a page of data to the serial flash
564 * @adapter: the adapter
565 * @addr: the start address to write
566 * @n: length of data to write in bytes
567 * @data: the data to write
568 *
569 * Writes up to a page of data (256 bytes) to the serial flash starting
570 * at the given address. All the data must be written to the same page.
571 */
572static int t4_write_flash(struct adapter *adapter, unsigned int addr,
573 unsigned int n, const u8 *data)
574{
575 int ret;
576 u32 buf[64];
577 unsigned int i, c, left, val, offset = addr & 0xff;
578
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000579 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000580 return -EINVAL;
581
582 val = swab32(addr) | SF_PROG_PAGE;
583
584 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
585 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
586 goto unlock;
587
588 for (left = n; left; left -= c) {
589 c = min(left, 4U);
590 for (val = 0, i = 0; i < c; ++i)
591 val = (val << 8) + *data++;
592
593 ret = sf1_write(adapter, c, c != left, 1, val);
594 if (ret)
595 goto unlock;
596 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000597 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000598 if (ret)
599 goto unlock;
600
601 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
602
603 /* Read the page to verify the write succeeded */
604 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
605 if (ret)
606 return ret;
607
608 if (memcmp(data - n, (u8 *)buf + offset, n)) {
609 dev_err(adapter->pdev_dev,
610 "failed to correctly write the flash page at %#x\n",
611 addr);
612 return -EIO;
613 }
614 return 0;
615
616unlock:
617 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
618 return ret;
619}
620
621/**
622 * get_fw_version - read the firmware version
623 * @adapter: the adapter
624 * @vers: where to place the version
625 *
626 * Reads the FW version from flash.
627 */
628static int get_fw_version(struct adapter *adapter, u32 *vers)
629{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000630 return t4_read_flash(adapter, adapter->params.sf_fw_start +
631 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000632}
633
634/**
635 * get_tp_version - read the TP microcode version
636 * @adapter: the adapter
637 * @vers: where to place the version
638 *
639 * Reads the TP microcode version from flash.
640 */
641static int get_tp_version(struct adapter *adapter, u32 *vers)
642{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000643 return t4_read_flash(adapter, adapter->params.sf_fw_start +
644 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000645 1, vers, 0);
646}
647
648/**
649 * t4_check_fw_version - check if the FW is compatible with this driver
650 * @adapter: the adapter
651 *
652 * Checks if an adapter's FW is compatible with the driver. Returns 0
653 * if there's exact match, a negative error if the version could not be
654 * read or there's a major version mismatch, and a positive value if the
655 * expected major version is found but there's a minor version mismatch.
656 */
657int t4_check_fw_version(struct adapter *adapter)
658{
659 u32 api_vers[2];
660 int ret, major, minor, micro;
661
662 ret = get_fw_version(adapter, &adapter->params.fw_vers);
663 if (!ret)
664 ret = get_tp_version(adapter, &adapter->params.tp_vers);
665 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000666 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
667 offsetof(struct fw_hdr, intfver_nic),
668 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000669 if (ret)
670 return ret;
671
672 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
673 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
674 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
675 memcpy(adapter->params.api_vers, api_vers,
676 sizeof(adapter->params.api_vers));
677
678 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
679 dev_err(adapter->pdev_dev,
680 "card FW has major version %u, driver wants %u\n",
681 major, FW_VERSION_MAJOR);
682 return -EINVAL;
683 }
684
685 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
686 return 0; /* perfect match */
687
688 /* Minor/micro version mismatch. Report it but often it's OK. */
689 return 1;
690}
691
692/**
693 * t4_flash_erase_sectors - erase a range of flash sectors
694 * @adapter: the adapter
695 * @start: the first sector to erase
696 * @end: the last sector to erase
697 *
698 * Erases the sectors in the given inclusive range.
699 */
700static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
701{
702 int ret = 0;
703
704 while (start <= end) {
705 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
706 (ret = sf1_write(adapter, 4, 0, 1,
707 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000708 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000709 dev_err(adapter->pdev_dev,
710 "erase of flash sector %d failed, error %d\n",
711 start, ret);
712 break;
713 }
714 start++;
715 }
716 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
717 return ret;
718}
719
720/**
721 * t4_load_fw - download firmware
722 * @adap: the adapter
723 * @fw_data: the firmware image to write
724 * @size: image size
725 *
726 * Write the supplied firmware image to the card's serial flash.
727 */
728int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
729{
730 u32 csum;
731 int ret, addr;
732 unsigned int i;
733 u8 first_page[SF_PAGE_SIZE];
734 const u32 *p = (const u32 *)fw_data;
735 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000736 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
737 unsigned int fw_img_start = adap->params.sf_fw_start;
738 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000739
740 if (!size) {
741 dev_err(adap->pdev_dev, "FW image has no data\n");
742 return -EINVAL;
743 }
744 if (size & 511) {
745 dev_err(adap->pdev_dev,
746 "FW image size not multiple of 512 bytes\n");
747 return -EINVAL;
748 }
749 if (ntohs(hdr->len512) * 512 != size) {
750 dev_err(adap->pdev_dev,
751 "FW image size differs from size in FW header\n");
752 return -EINVAL;
753 }
754 if (size > FW_MAX_SIZE) {
755 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
756 FW_MAX_SIZE);
757 return -EFBIG;
758 }
759
760 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
761 csum += ntohl(p[i]);
762
763 if (csum != 0xffffffff) {
764 dev_err(adap->pdev_dev,
765 "corrupted firmware image, checksum %#x\n", csum);
766 return -EINVAL;
767 }
768
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000769 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
770 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000771 if (ret)
772 goto out;
773
774 /*
775 * We write the correct version at the end so the driver can see a bad
776 * version if the FW write fails. Start by writing a copy of the
777 * first page with a bad version.
778 */
779 memcpy(first_page, fw_data, SF_PAGE_SIZE);
780 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000781 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000782 if (ret)
783 goto out;
784
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000785 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000786 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
787 addr += SF_PAGE_SIZE;
788 fw_data += SF_PAGE_SIZE;
789 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
790 if (ret)
791 goto out;
792 }
793
794 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000795 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000796 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
797out:
798 if (ret)
799 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
800 ret);
801 return ret;
802}
803
804#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
805 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
806
807/**
808 * t4_link_start - apply link configuration to MAC/PHY
809 * @phy: the PHY to setup
810 * @mac: the MAC to setup
811 * @lc: the requested link configuration
812 *
813 * Set up a port's MAC and PHY according to a desired link configuration.
814 * - If the PHY can auto-negotiate first decide what to advertise, then
815 * enable/disable auto-negotiation as desired, and reset.
816 * - If the PHY does not auto-negotiate just reset it.
817 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
818 * otherwise do it later based on the outcome of auto-negotiation.
819 */
820int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
821 struct link_config *lc)
822{
823 struct fw_port_cmd c;
824 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
825
826 lc->link_ok = 0;
827 if (lc->requested_fc & PAUSE_RX)
828 fc |= FW_PORT_CAP_FC_RX;
829 if (lc->requested_fc & PAUSE_TX)
830 fc |= FW_PORT_CAP_FC_TX;
831
832 memset(&c, 0, sizeof(c));
833 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
834 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
835 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
836 FW_LEN16(c));
837
838 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
839 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
840 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
841 } else if (lc->autoneg == AUTONEG_DISABLE) {
842 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
843 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
844 } else
845 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
846
847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
848}
849
850/**
851 * t4_restart_aneg - restart autonegotiation
852 * @adap: the adapter
853 * @mbox: mbox to use for the FW command
854 * @port: the port id
855 *
856 * Restarts autonegotiation for the selected port.
857 */
858int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
859{
860 struct fw_port_cmd c;
861
862 memset(&c, 0, sizeof(c));
863 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
864 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
865 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
866 FW_LEN16(c));
867 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
868 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
869}
870
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000871struct intr_info {
872 unsigned int mask; /* bits to check in interrupt status */
873 const char *msg; /* message to print or NULL */
874 short stat_idx; /* stat counter to increment or -1 */
875 unsigned short fatal; /* whether the condition reported is fatal */
876};
877
878/**
879 * t4_handle_intr_status - table driven interrupt handler
880 * @adapter: the adapter that generated the interrupt
881 * @reg: the interrupt status register to process
882 * @acts: table of interrupt actions
883 *
884 * A table driven interrupt handler that applies a set of masks to an
885 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000887 * optionally emitting a warning or alert message. The table is terminated
888 * by an entry specifying mask 0. Returns the number of fatal interrupt
889 * conditions.
890 */
891static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
892 const struct intr_info *acts)
893{
894 int fatal = 0;
895 unsigned int mask = 0;
896 unsigned int status = t4_read_reg(adapter, reg);
897
898 for ( ; acts->mask; ++acts) {
899 if (!(status & acts->mask))
900 continue;
901 if (acts->fatal) {
902 fatal++;
903 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
904 status & acts->mask);
905 } else if (acts->msg && printk_ratelimit())
906 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
907 status & acts->mask);
908 mask |= acts->mask;
909 }
910 status &= mask;
911 if (status) /* clear processed interrupts */
912 t4_write_reg(adapter, reg, status);
913 return fatal;
914}
915
916/*
917 * Interrupt handler for the PCIE module.
918 */
919static void pcie_intr_handler(struct adapter *adapter)
920{
Joe Perches005b5712010-12-14 21:36:53 +0000921 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000922 { RNPP, "RXNP array parity error", -1, 1 },
923 { RPCP, "RXPC array parity error", -1, 1 },
924 { RCIP, "RXCIF array parity error", -1, 1 },
925 { RCCP, "Rx completions control array parity error", -1, 1 },
926 { RFTP, "RXFT array parity error", -1, 1 },
927 { 0 }
928 };
Joe Perches005b5712010-12-14 21:36:53 +0000929 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000930 { TPCP, "TXPC array parity error", -1, 1 },
931 { TNPP, "TXNP array parity error", -1, 1 },
932 { TFTP, "TXFT array parity error", -1, 1 },
933 { TCAP, "TXCA array parity error", -1, 1 },
934 { TCIP, "TXCIF array parity error", -1, 1 },
935 { RCAP, "RXCA array parity error", -1, 1 },
936 { OTDD, "outbound request TLP discarded", -1, 1 },
937 { RDPE, "Rx data parity error", -1, 1 },
938 { TDUE, "Tx uncorrectable data error", -1, 1 },
939 { 0 }
940 };
Joe Perches005b5712010-12-14 21:36:53 +0000941 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000942 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
943 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
944 { MSIDATAPERR, "MSI data parity error", -1, 1 },
945 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
946 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
947 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
948 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
949 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
950 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
951 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
952 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
953 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
954 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
955 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
956 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
957 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
958 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
959 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
960 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
961 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
962 { FIDPERR, "PCI FID parity error", -1, 1 },
963 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
964 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
965 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
966 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
967 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
968 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
969 { PCIESINT, "PCI core secondary fault", -1, 1 },
970 { PCIEPINT, "PCI core primary fault", -1, 1 },
971 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
972 { 0 }
973 };
974
975 int fat;
976
977 fat = t4_handle_intr_status(adapter,
978 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
979 sysbus_intr_info) +
980 t4_handle_intr_status(adapter,
981 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
982 pcie_port_intr_info) +
983 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
984 if (fat)
985 t4_fatal_err(adapter);
986}
987
988/*
989 * TP interrupt handler.
990 */
991static void tp_intr_handler(struct adapter *adapter)
992{
Joe Perches005b5712010-12-14 21:36:53 +0000993 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000994 { 0x3fffffff, "TP parity error", -1, 1 },
995 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
996 { 0 }
997 };
998
999 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1000 t4_fatal_err(adapter);
1001}
1002
1003/*
1004 * SGE interrupt handler.
1005 */
1006static void sge_intr_handler(struct adapter *adapter)
1007{
1008 u64 v;
1009
Joe Perches005b5712010-12-14 21:36:53 +00001010 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001011 { ERR_CPL_EXCEED_IQE_SIZE,
1012 "SGE received CPL exceeding IQE size", -1, 1 },
1013 { ERR_INVALID_CIDX_INC,
1014 "SGE GTS CIDX increment too large", -1, 0 },
1015 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya881806b2012-05-18 15:29:24 +05301016 { F_DBFIFO_LP_INT, NULL, -1, 0 },
1017 { F_DBFIFO_HP_INT, NULL, -1, 0 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001018 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1019 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1020 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1021 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1022 0 },
1023 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1024 0 },
1025 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1026 0 },
1027 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1028 0 },
1029 { ERR_ING_CTXT_PRIO,
1030 "SGE too many priority ingress contexts", -1, 0 },
1031 { ERR_EGR_CTXT_PRIO,
1032 "SGE too many priority egress contexts", -1, 0 },
1033 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1034 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1035 { 0 }
1036 };
1037
1038 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1039 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1040 if (v) {
1041 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1042 (unsigned long long)v);
1043 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1044 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1045 }
1046
Vipul Pandya881806b2012-05-18 15:29:24 +05301047 err = t4_read_reg(adapter, A_SGE_INT_CAUSE3);
1048 if (err & (F_DBFIFO_HP_INT|F_DBFIFO_LP_INT))
1049 t4_db_full(adapter);
1050 if (err & F_ERR_DROPPED_DB)
1051 t4_db_dropped(adapter);
1052
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001053 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1054 v != 0)
1055 t4_fatal_err(adapter);
1056}
1057
1058/*
1059 * CIM interrupt handler.
1060 */
1061static void cim_intr_handler(struct adapter *adapter)
1062{
Joe Perches005b5712010-12-14 21:36:53 +00001063 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001064 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1065 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1066 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1067 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1068 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1069 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1070 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1071 { 0 }
1072 };
Joe Perches005b5712010-12-14 21:36:53 +00001073 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001074 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1075 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1076 { ILLWRINT, "CIM illegal write", -1, 1 },
1077 { ILLRDINT, "CIM illegal read", -1, 1 },
1078 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1079 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1080 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1081 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1082 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1083 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1084 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1085 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1086 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1087 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1088 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1089 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1090 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1091 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1092 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1093 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1094 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1095 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1096 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1097 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1098 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1099 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1100 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1101 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1102 { 0 }
1103 };
1104
1105 int fat;
1106
1107 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1108 cim_intr_info) +
1109 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1110 cim_upintr_info);
1111 if (fat)
1112 t4_fatal_err(adapter);
1113}
1114
1115/*
1116 * ULP RX interrupt handler.
1117 */
1118static void ulprx_intr_handler(struct adapter *adapter)
1119{
Joe Perches005b5712010-12-14 21:36:53 +00001120 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001121 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001122 { 0x7fffff, "ULPRX parity error", -1, 1 },
1123 { 0 }
1124 };
1125
1126 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1127 t4_fatal_err(adapter);
1128}
1129
1130/*
1131 * ULP TX interrupt handler.
1132 */
1133static void ulptx_intr_handler(struct adapter *adapter)
1134{
Joe Perches005b5712010-12-14 21:36:53 +00001135 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001136 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1137 0 },
1138 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1139 0 },
1140 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1141 0 },
1142 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1143 0 },
1144 { 0xfffffff, "ULPTX parity error", -1, 1 },
1145 { 0 }
1146 };
1147
1148 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1149 t4_fatal_err(adapter);
1150}
1151
1152/*
1153 * PM TX interrupt handler.
1154 */
1155static void pmtx_intr_handler(struct adapter *adapter)
1156{
Joe Perches005b5712010-12-14 21:36:53 +00001157 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001158 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1159 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1160 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1161 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1162 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1163 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1164 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1165 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1166 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1167 { 0 }
1168 };
1169
1170 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1171 t4_fatal_err(adapter);
1172}
1173
1174/*
1175 * PM RX interrupt handler.
1176 */
1177static void pmrx_intr_handler(struct adapter *adapter)
1178{
Joe Perches005b5712010-12-14 21:36:53 +00001179 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001180 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1181 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1182 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1183 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1184 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1185 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1186 { 0 }
1187 };
1188
1189 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1190 t4_fatal_err(adapter);
1191}
1192
1193/*
1194 * CPL switch interrupt handler.
1195 */
1196static void cplsw_intr_handler(struct adapter *adapter)
1197{
Joe Perches005b5712010-12-14 21:36:53 +00001198 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001199 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1200 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1201 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1202 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1203 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1204 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1205 { 0 }
1206 };
1207
1208 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1209 t4_fatal_err(adapter);
1210}
1211
1212/*
1213 * LE interrupt handler.
1214 */
1215static void le_intr_handler(struct adapter *adap)
1216{
Joe Perches005b5712010-12-14 21:36:53 +00001217 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001218 { LIPMISS, "LE LIP miss", -1, 0 },
1219 { LIP0, "LE 0 LIP error", -1, 0 },
1220 { PARITYERR, "LE parity error", -1, 1 },
1221 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1222 { REQQPARERR, "LE request queue parity error", -1, 1 },
1223 { 0 }
1224 };
1225
1226 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1227 t4_fatal_err(adap);
1228}
1229
1230/*
1231 * MPS interrupt handler.
1232 */
1233static void mps_intr_handler(struct adapter *adapter)
1234{
Joe Perches005b5712010-12-14 21:36:53 +00001235 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001236 { 0xffffff, "MPS Rx parity error", -1, 1 },
1237 { 0 }
1238 };
Joe Perches005b5712010-12-14 21:36:53 +00001239 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001240 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1241 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1242 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1243 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1244 { BUBBLE, "MPS Tx underflow", -1, 1 },
1245 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1246 { FRMERR, "MPS Tx framing error", -1, 1 },
1247 { 0 }
1248 };
Joe Perches005b5712010-12-14 21:36:53 +00001249 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001250 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1251 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1252 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1253 { 0 }
1254 };
Joe Perches005b5712010-12-14 21:36:53 +00001255 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001256 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1257 { 0 }
1258 };
Joe Perches005b5712010-12-14 21:36:53 +00001259 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001260 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1261 { 0 }
1262 };
Joe Perches005b5712010-12-14 21:36:53 +00001263 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001264 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1265 { 0 }
1266 };
Joe Perches005b5712010-12-14 21:36:53 +00001267 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001268 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1269 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1270 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1271 { 0 }
1272 };
1273
1274 int fat;
1275
1276 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1277 mps_rx_intr_info) +
1278 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1279 mps_tx_intr_info) +
1280 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1281 mps_trc_intr_info) +
1282 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1283 mps_stat_sram_intr_info) +
1284 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1285 mps_stat_tx_intr_info) +
1286 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1287 mps_stat_rx_intr_info) +
1288 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1289 mps_cls_intr_info);
1290
1291 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1292 RXINT | TXINT | STATINT);
1293 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1294 if (fat)
1295 t4_fatal_err(adapter);
1296}
1297
1298#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1299
1300/*
1301 * EDC/MC interrupt handler.
1302 */
1303static void mem_intr_handler(struct adapter *adapter, int idx)
1304{
1305 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1306
1307 unsigned int addr, cnt_addr, v;
1308
1309 if (idx <= MEM_EDC1) {
1310 addr = EDC_REG(EDC_INT_CAUSE, idx);
1311 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1312 } else {
1313 addr = MC_INT_CAUSE;
1314 cnt_addr = MC_ECC_STATUS;
1315 }
1316
1317 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1318 if (v & PERR_INT_CAUSE)
1319 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1320 name[idx]);
1321 if (v & ECC_CE_INT_CAUSE) {
1322 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1323
1324 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1325 if (printk_ratelimit())
1326 dev_warn(adapter->pdev_dev,
1327 "%u %s correctable ECC data error%s\n",
1328 cnt, name[idx], cnt > 1 ? "s" : "");
1329 }
1330 if (v & ECC_UE_INT_CAUSE)
1331 dev_alert(adapter->pdev_dev,
1332 "%s uncorrectable ECC data error\n", name[idx]);
1333
1334 t4_write_reg(adapter, addr, v);
1335 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1336 t4_fatal_err(adapter);
1337}
1338
1339/*
1340 * MA interrupt handler.
1341 */
1342static void ma_intr_handler(struct adapter *adap)
1343{
1344 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1345
1346 if (status & MEM_PERR_INT_CAUSE)
1347 dev_alert(adap->pdev_dev,
1348 "MA parity error, parity status %#x\n",
1349 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1350 if (status & MEM_WRAP_INT_CAUSE) {
1351 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1352 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1353 "client %u to address %#x\n",
1354 MEM_WRAP_CLIENT_NUM_GET(v),
1355 MEM_WRAP_ADDRESS_GET(v) << 4);
1356 }
1357 t4_write_reg(adap, MA_INT_CAUSE, status);
1358 t4_fatal_err(adap);
1359}
1360
1361/*
1362 * SMB interrupt handler.
1363 */
1364static void smb_intr_handler(struct adapter *adap)
1365{
Joe Perches005b5712010-12-14 21:36:53 +00001366 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001367 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1368 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1369 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1370 { 0 }
1371 };
1372
1373 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1374 t4_fatal_err(adap);
1375}
1376
1377/*
1378 * NC-SI interrupt handler.
1379 */
1380static void ncsi_intr_handler(struct adapter *adap)
1381{
Joe Perches005b5712010-12-14 21:36:53 +00001382 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001383 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1384 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1385 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1386 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1387 { 0 }
1388 };
1389
1390 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1391 t4_fatal_err(adap);
1392}
1393
1394/*
1395 * XGMAC interrupt handler.
1396 */
1397static void xgmac_intr_handler(struct adapter *adap, int port)
1398{
1399 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1400
1401 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1402 if (!v)
1403 return;
1404
1405 if (v & TXFIFO_PRTY_ERR)
1406 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1407 port);
1408 if (v & RXFIFO_PRTY_ERR)
1409 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1410 port);
1411 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1412 t4_fatal_err(adap);
1413}
1414
1415/*
1416 * PL interrupt handler.
1417 */
1418static void pl_intr_handler(struct adapter *adap)
1419{
Joe Perches005b5712010-12-14 21:36:53 +00001420 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001421 { FATALPERR, "T4 fatal parity error", -1, 1 },
1422 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1423 { 0 }
1424 };
1425
1426 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1427 t4_fatal_err(adap);
1428}
1429
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001430#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001431#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1432 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1433 CPL_SWITCH | SGE | ULP_TX)
1434
1435/**
1436 * t4_slow_intr_handler - control path interrupt handler
1437 * @adapter: the adapter
1438 *
1439 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1440 * The designation 'slow' is because it involves register reads, while
1441 * data interrupts typically don't involve any MMIOs.
1442 */
1443int t4_slow_intr_handler(struct adapter *adapter)
1444{
1445 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1446
1447 if (!(cause & GLBL_INTR_MASK))
1448 return 0;
1449 if (cause & CIM)
1450 cim_intr_handler(adapter);
1451 if (cause & MPS)
1452 mps_intr_handler(adapter);
1453 if (cause & NCSI)
1454 ncsi_intr_handler(adapter);
1455 if (cause & PL)
1456 pl_intr_handler(adapter);
1457 if (cause & SMB)
1458 smb_intr_handler(adapter);
1459 if (cause & XGMAC0)
1460 xgmac_intr_handler(adapter, 0);
1461 if (cause & XGMAC1)
1462 xgmac_intr_handler(adapter, 1);
1463 if (cause & XGMAC_KR0)
1464 xgmac_intr_handler(adapter, 2);
1465 if (cause & XGMAC_KR1)
1466 xgmac_intr_handler(adapter, 3);
1467 if (cause & PCIE)
1468 pcie_intr_handler(adapter);
1469 if (cause & MC)
1470 mem_intr_handler(adapter, MEM_MC);
1471 if (cause & EDC0)
1472 mem_intr_handler(adapter, MEM_EDC0);
1473 if (cause & EDC1)
1474 mem_intr_handler(adapter, MEM_EDC1);
1475 if (cause & LE)
1476 le_intr_handler(adapter);
1477 if (cause & TP)
1478 tp_intr_handler(adapter);
1479 if (cause & MA)
1480 ma_intr_handler(adapter);
1481 if (cause & PM_TX)
1482 pmtx_intr_handler(adapter);
1483 if (cause & PM_RX)
1484 pmrx_intr_handler(adapter);
1485 if (cause & ULP_RX)
1486 ulprx_intr_handler(adapter);
1487 if (cause & CPL_SWITCH)
1488 cplsw_intr_handler(adapter);
1489 if (cause & SGE)
1490 sge_intr_handler(adapter);
1491 if (cause & ULP_TX)
1492 ulptx_intr_handler(adapter);
1493
1494 /* Clear the interrupts just processed for which we are the master. */
1495 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1496 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1497 return 1;
1498}
1499
1500/**
1501 * t4_intr_enable - enable interrupts
1502 * @adapter: the adapter whose interrupts should be enabled
1503 *
1504 * Enable PF-specific interrupts for the calling function and the top-level
1505 * interrupt concentrator for global interrupts. Interrupts are already
1506 * enabled at each module, here we just enable the roots of the interrupt
1507 * hierarchies.
1508 *
1509 * Note: this function should be called only when the driver manages
1510 * non PF-specific interrupts from the various HW modules. Only one PCI
1511 * function at a time should be doing this.
1512 */
1513void t4_intr_enable(struct adapter *adapter)
1514{
1515 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1516
1517 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1518 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1519 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1520 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1521 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1522 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1523 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya881806b2012-05-18 15:29:24 +05301524 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001525 EGRESS_SIZE_ERR);
1526 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1527 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1528}
1529
1530/**
1531 * t4_intr_disable - disable interrupts
1532 * @adapter: the adapter whose interrupts should be disabled
1533 *
1534 * Disable interrupts. We only disable the top-level interrupt
1535 * concentrators. The caller must be a PCI function managing global
1536 * interrupts.
1537 */
1538void t4_intr_disable(struct adapter *adapter)
1539{
1540 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1541
1542 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1543 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1544}
1545
1546/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001547 * hash_mac_addr - return the hash value of a MAC address
1548 * @addr: the 48-bit Ethernet MAC address
1549 *
1550 * Hashes a MAC address according to the hash function used by HW inexact
1551 * (hash) address matching.
1552 */
1553static int hash_mac_addr(const u8 *addr)
1554{
1555 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1556 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1557 a ^= b;
1558 a ^= (a >> 12);
1559 a ^= (a >> 6);
1560 return a & 0x3f;
1561}
1562
1563/**
1564 * t4_config_rss_range - configure a portion of the RSS mapping table
1565 * @adapter: the adapter
1566 * @mbox: mbox to use for the FW command
1567 * @viid: virtual interface whose RSS subtable is to be written
1568 * @start: start entry in the table to write
1569 * @n: how many table entries to write
1570 * @rspq: values for the response queue lookup table
1571 * @nrspq: number of values in @rspq
1572 *
1573 * Programs the selected part of the VI's RSS mapping table with the
1574 * provided values. If @nrspq < @n the supplied values are used repeatedly
1575 * until the full table range is populated.
1576 *
1577 * The caller must ensure the values in @rspq are in the range allowed for
1578 * @viid.
1579 */
1580int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1581 int start, int n, const u16 *rspq, unsigned int nrspq)
1582{
1583 int ret;
1584 const u16 *rsp = rspq;
1585 const u16 *rsp_end = rspq + nrspq;
1586 struct fw_rss_ind_tbl_cmd cmd;
1587
1588 memset(&cmd, 0, sizeof(cmd));
1589 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1590 FW_CMD_REQUEST | FW_CMD_WRITE |
1591 FW_RSS_IND_TBL_CMD_VIID(viid));
1592 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1593
1594 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1595 while (n > 0) {
1596 int nq = min(n, 32);
1597 __be32 *qp = &cmd.iq0_to_iq2;
1598
1599 cmd.niqid = htons(nq);
1600 cmd.startidx = htons(start);
1601
1602 start += nq;
1603 n -= nq;
1604
1605 while (nq > 0) {
1606 unsigned int v;
1607
1608 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1609 if (++rsp >= rsp_end)
1610 rsp = rspq;
1611 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1612 if (++rsp >= rsp_end)
1613 rsp = rspq;
1614 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1615 if (++rsp >= rsp_end)
1616 rsp = rspq;
1617
1618 *qp++ = htonl(v);
1619 nq -= 3;
1620 }
1621
1622 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1623 if (ret)
1624 return ret;
1625 }
1626 return 0;
1627}
1628
1629/**
1630 * t4_config_glbl_rss - configure the global RSS mode
1631 * @adapter: the adapter
1632 * @mbox: mbox to use for the FW command
1633 * @mode: global RSS mode
1634 * @flags: mode-specific flags
1635 *
1636 * Sets the global RSS mode.
1637 */
1638int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1639 unsigned int flags)
1640{
1641 struct fw_rss_glb_config_cmd c;
1642
1643 memset(&c, 0, sizeof(c));
1644 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1645 FW_CMD_REQUEST | FW_CMD_WRITE);
1646 c.retval_len16 = htonl(FW_LEN16(c));
1647 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1648 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1649 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1650 c.u.basicvirtual.mode_pkd =
1651 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1652 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1653 } else
1654 return -EINVAL;
1655 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1656}
1657
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001658/**
1659 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1660 * @adap: the adapter
1661 * @v4: holds the TCP/IP counter values
1662 * @v6: holds the TCP/IPv6 counter values
1663 *
1664 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1665 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1666 */
1667void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1668 struct tp_tcp_stats *v6)
1669{
1670 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1671
1672#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1673#define STAT(x) val[STAT_IDX(x)]
1674#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1675
1676 if (v4) {
1677 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1678 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1679 v4->tcpOutRsts = STAT(OUT_RST);
1680 v4->tcpInSegs = STAT64(IN_SEG);
1681 v4->tcpOutSegs = STAT64(OUT_SEG);
1682 v4->tcpRetransSegs = STAT64(RXT_SEG);
1683 }
1684 if (v6) {
1685 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1686 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1687 v6->tcpOutRsts = STAT(OUT_RST);
1688 v6->tcpInSegs = STAT64(IN_SEG);
1689 v6->tcpOutSegs = STAT64(OUT_SEG);
1690 v6->tcpRetransSegs = STAT64(RXT_SEG);
1691 }
1692#undef STAT64
1693#undef STAT
1694#undef STAT_IDX
1695}
1696
1697/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001698 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1699 * @adap: the adapter
1700 * @mtus: where to store the MTU values
1701 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1702 *
1703 * Reads the HW path MTU table.
1704 */
1705void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1706{
1707 u32 v;
1708 int i;
1709
1710 for (i = 0; i < NMTUS; ++i) {
1711 t4_write_reg(adap, TP_MTU_TABLE,
1712 MTUINDEX(0xff) | MTUVALUE(i));
1713 v = t4_read_reg(adap, TP_MTU_TABLE);
1714 mtus[i] = MTUVALUE_GET(v);
1715 if (mtu_log)
1716 mtu_log[i] = MTUWIDTH_GET(v);
1717 }
1718}
1719
1720/**
1721 * init_cong_ctrl - initialize congestion control parameters
1722 * @a: the alpha values for congestion control
1723 * @b: the beta values for congestion control
1724 *
1725 * Initialize the congestion control parameters.
1726 */
1727static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1728{
1729 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1730 a[9] = 2;
1731 a[10] = 3;
1732 a[11] = 4;
1733 a[12] = 5;
1734 a[13] = 6;
1735 a[14] = 7;
1736 a[15] = 8;
1737 a[16] = 9;
1738 a[17] = 10;
1739 a[18] = 14;
1740 a[19] = 17;
1741 a[20] = 21;
1742 a[21] = 25;
1743 a[22] = 30;
1744 a[23] = 35;
1745 a[24] = 45;
1746 a[25] = 60;
1747 a[26] = 80;
1748 a[27] = 100;
1749 a[28] = 200;
1750 a[29] = 300;
1751 a[30] = 400;
1752 a[31] = 500;
1753
1754 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1755 b[9] = b[10] = 1;
1756 b[11] = b[12] = 2;
1757 b[13] = b[14] = b[15] = b[16] = 3;
1758 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1759 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1760 b[28] = b[29] = 6;
1761 b[30] = b[31] = 7;
1762}
1763
1764/* The minimum additive increment value for the congestion control table */
1765#define CC_MIN_INCR 2U
1766
1767/**
1768 * t4_load_mtus - write the MTU and congestion control HW tables
1769 * @adap: the adapter
1770 * @mtus: the values for the MTU table
1771 * @alpha: the values for the congestion control alpha parameter
1772 * @beta: the values for the congestion control beta parameter
1773 *
1774 * Write the HW MTU table with the supplied MTUs and the high-speed
1775 * congestion control table with the supplied alpha, beta, and MTUs.
1776 * We write the two tables together because the additive increments
1777 * depend on the MTUs.
1778 */
1779void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1780 const unsigned short *alpha, const unsigned short *beta)
1781{
1782 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1783 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1784 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1785 28672, 40960, 57344, 81920, 114688, 163840, 229376
1786 };
1787
1788 unsigned int i, w;
1789
1790 for (i = 0; i < NMTUS; ++i) {
1791 unsigned int mtu = mtus[i];
1792 unsigned int log2 = fls(mtu);
1793
1794 if (!(mtu & ((1 << log2) >> 2))) /* round */
1795 log2--;
1796 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1797 MTUWIDTH(log2) | MTUVALUE(mtu));
1798
1799 for (w = 0; w < NCCTRL_WIN; ++w) {
1800 unsigned int inc;
1801
1802 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1803 CC_MIN_INCR);
1804
1805 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1806 (w << 16) | (beta[w] << 13) | inc);
1807 }
1808 }
1809}
1810
1811/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001812 * get_mps_bg_map - return the buffer groups associated with a port
1813 * @adap: the adapter
1814 * @idx: the port index
1815 *
1816 * Returns a bitmap indicating which MPS buffer groups are associated
1817 * with the given port. Bit i is set if buffer group i is used by the
1818 * port.
1819 */
1820static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1821{
1822 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1823
1824 if (n == 0)
1825 return idx == 0 ? 0xf : 0;
1826 if (n == 1)
1827 return idx < 2 ? (3 << (2 * idx)) : 0;
1828 return 1 << idx;
1829}
1830
1831/**
1832 * t4_get_port_stats - collect port statistics
1833 * @adap: the adapter
1834 * @idx: the port index
1835 * @p: the stats structure to fill
1836 *
1837 * Collect statistics related to the given port from HW.
1838 */
1839void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1840{
1841 u32 bgmap = get_mps_bg_map(adap, idx);
1842
1843#define GET_STAT(name) \
1844 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1845#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1846
1847 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1848 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1849 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1850 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1851 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1852 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1853 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1854 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1855 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1856 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1857 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1858 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1859 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1860 p->tx_drop = GET_STAT(TX_PORT_DROP);
1861 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1862 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1863 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1864 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1865 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1866 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1867 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1868 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1869 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1870
1871 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1872 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1873 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1874 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1875 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1876 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1877 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1878 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1879 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1880 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1881 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1882 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1883 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1884 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1885 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1886 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1887 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1888 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1889 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1890 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1891 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1892 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1893 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1894 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1895 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1896 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1897 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1898
1899 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1900 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1901 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1902 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1903 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1904 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1905 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1906 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1907
1908#undef GET_STAT
1909#undef GET_STAT_COM
1910}
1911
1912/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001913 * t4_wol_magic_enable - enable/disable magic packet WoL
1914 * @adap: the adapter
1915 * @port: the physical port index
1916 * @addr: MAC address expected in magic packets, %NULL to disable
1917 *
1918 * Enables/disables magic packet wake-on-LAN for the selected port.
1919 */
1920void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1921 const u8 *addr)
1922{
1923 if (addr) {
1924 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1925 (addr[2] << 24) | (addr[3] << 16) |
1926 (addr[4] << 8) | addr[5]);
1927 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1928 (addr[0] << 8) | addr[1]);
1929 }
1930 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1931 addr ? MAGICEN : 0);
1932}
1933
1934/**
1935 * t4_wol_pat_enable - enable/disable pattern-based WoL
1936 * @adap: the adapter
1937 * @port: the physical port index
1938 * @map: bitmap of which HW pattern filters to set
1939 * @mask0: byte mask for bytes 0-63 of a packet
1940 * @mask1: byte mask for bytes 64-127 of a packet
1941 * @crc: Ethernet CRC for selected bytes
1942 * @enable: enable/disable switch
1943 *
1944 * Sets the pattern filters indicated in @map to mask out the bytes
1945 * specified in @mask0/@mask1 in received packets and compare the CRC of
1946 * the resulting packet against @crc. If @enable is %true pattern-based
1947 * WoL is enabled, otherwise disabled.
1948 */
1949int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1950 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1951{
1952 int i;
1953
1954 if (!enable) {
1955 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1956 PATEN, 0);
1957 return 0;
1958 }
1959 if (map > 0xff)
1960 return -EINVAL;
1961
1962#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1963
1964 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1965 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1966 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1967
1968 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1969 if (!(map & 1))
1970 continue;
1971
1972 /* write byte masks */
1973 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1974 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1975 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1976 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1977 return -ETIMEDOUT;
1978
1979 /* write CRC */
1980 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1981 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1982 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1983 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1984 return -ETIMEDOUT;
1985 }
1986#undef EPIO_REG
1987
1988 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1989 return 0;
1990}
1991
1992#define INIT_CMD(var, cmd, rd_wr) do { \
1993 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1994 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1995 (var).retval_len16 = htonl(FW_LEN16(var)); \
1996} while (0)
1997
1998/**
1999 * t4_mdio_rd - read a PHY register through MDIO
2000 * @adap: the adapter
2001 * @mbox: mailbox to use for the FW command
2002 * @phy_addr: the PHY address
2003 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2004 * @reg: the register to read
2005 * @valp: where to store the value
2006 *
2007 * Issues a FW command through the given mailbox to read a PHY register.
2008 */
2009int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2010 unsigned int mmd, unsigned int reg, u16 *valp)
2011{
2012 int ret;
2013 struct fw_ldst_cmd c;
2014
2015 memset(&c, 0, sizeof(c));
2016 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2017 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2018 c.cycles_to_len16 = htonl(FW_LEN16(c));
2019 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2020 FW_LDST_CMD_MMD(mmd));
2021 c.u.mdio.raddr = htons(reg);
2022
2023 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2024 if (ret == 0)
2025 *valp = ntohs(c.u.mdio.rval);
2026 return ret;
2027}
2028
2029/**
2030 * t4_mdio_wr - write a PHY register through MDIO
2031 * @adap: the adapter
2032 * @mbox: mailbox to use for the FW command
2033 * @phy_addr: the PHY address
2034 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2035 * @reg: the register to write
2036 * @valp: value to write
2037 *
2038 * Issues a FW command through the given mailbox to write a PHY register.
2039 */
2040int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2041 unsigned int mmd, unsigned int reg, u16 val)
2042{
2043 struct fw_ldst_cmd c;
2044
2045 memset(&c, 0, sizeof(c));
2046 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2047 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2048 c.cycles_to_len16 = htonl(FW_LEN16(c));
2049 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2050 FW_LDST_CMD_MMD(mmd));
2051 c.u.mdio.raddr = htons(reg);
2052 c.u.mdio.rval = htons(val);
2053
2054 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2055}
2056
2057/**
2058 * t4_fw_hello - establish communication with FW
2059 * @adap: the adapter
2060 * @mbox: mailbox to use for the FW command
2061 * @evt_mbox: mailbox to receive async FW events
2062 * @master: specifies the caller's willingness to be the device master
2063 * @state: returns the current device state
2064 *
2065 * Issues a command to establish communication with FW.
2066 */
2067int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2068 enum dev_master master, enum dev_state *state)
2069{
2070 int ret;
2071 struct fw_hello_cmd c;
2072
2073 INIT_CMD(c, HELLO, WRITE);
2074 c.err_to_mbasyncnot = htonl(
2075 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2076 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2077 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2078 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2079
2080 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2081 if (ret == 0 && state) {
2082 u32 v = ntohl(c.err_to_mbasyncnot);
2083 if (v & FW_HELLO_CMD_INIT)
2084 *state = DEV_STATE_INIT;
2085 else if (v & FW_HELLO_CMD_ERR)
2086 *state = DEV_STATE_ERR;
2087 else
2088 *state = DEV_STATE_UNINIT;
2089 }
2090 return ret;
2091}
2092
2093/**
2094 * t4_fw_bye - end communication with FW
2095 * @adap: the adapter
2096 * @mbox: mailbox to use for the FW command
2097 *
2098 * Issues a command to terminate communication with FW.
2099 */
2100int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2101{
2102 struct fw_bye_cmd c;
2103
2104 INIT_CMD(c, BYE, WRITE);
2105 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2106}
2107
2108/**
2109 * t4_init_cmd - ask FW to initialize the device
2110 * @adap: the adapter
2111 * @mbox: mailbox to use for the FW command
2112 *
2113 * Issues a command to FW to partially initialize the device. This
2114 * performs initialization that generally doesn't depend on user input.
2115 */
2116int t4_early_init(struct adapter *adap, unsigned int mbox)
2117{
2118 struct fw_initialize_cmd c;
2119
2120 INIT_CMD(c, INITIALIZE, WRITE);
2121 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2122}
2123
2124/**
2125 * t4_fw_reset - issue a reset to FW
2126 * @adap: the adapter
2127 * @mbox: mailbox to use for the FW command
2128 * @reset: specifies the type of reset to perform
2129 *
2130 * Issues a reset command of the specified type to FW.
2131 */
2132int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2133{
2134 struct fw_reset_cmd c;
2135
2136 INIT_CMD(c, RESET, WRITE);
2137 c.val = htonl(reset);
2138 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2139}
2140
2141/**
2142 * t4_query_params - query FW or device parameters
2143 * @adap: the adapter
2144 * @mbox: mailbox to use for the FW command
2145 * @pf: the PF
2146 * @vf: the VF
2147 * @nparams: the number of parameters
2148 * @params: the parameter names
2149 * @val: the parameter values
2150 *
2151 * Reads the value of FW or device parameters. Up to 7 parameters can be
2152 * queried at once.
2153 */
2154int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2155 unsigned int vf, unsigned int nparams, const u32 *params,
2156 u32 *val)
2157{
2158 int i, ret;
2159 struct fw_params_cmd c;
2160 __be32 *p = &c.param[0].mnem;
2161
2162 if (nparams > 7)
2163 return -EINVAL;
2164
2165 memset(&c, 0, sizeof(c));
2166 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2167 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2168 FW_PARAMS_CMD_VFN(vf));
2169 c.retval_len16 = htonl(FW_LEN16(c));
2170 for (i = 0; i < nparams; i++, p += 2)
2171 *p = htonl(*params++);
2172
2173 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2174 if (ret == 0)
2175 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2176 *val++ = ntohl(*p);
2177 return ret;
2178}
2179
2180/**
2181 * t4_set_params - sets FW or device parameters
2182 * @adap: the adapter
2183 * @mbox: mailbox to use for the FW command
2184 * @pf: the PF
2185 * @vf: the VF
2186 * @nparams: the number of parameters
2187 * @params: the parameter names
2188 * @val: the parameter values
2189 *
2190 * Sets the value of FW or device parameters. Up to 7 parameters can be
2191 * specified at once.
2192 */
2193int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2194 unsigned int vf, unsigned int nparams, const u32 *params,
2195 const u32 *val)
2196{
2197 struct fw_params_cmd c;
2198 __be32 *p = &c.param[0].mnem;
2199
2200 if (nparams > 7)
2201 return -EINVAL;
2202
2203 memset(&c, 0, sizeof(c));
2204 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2205 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2206 FW_PARAMS_CMD_VFN(vf));
2207 c.retval_len16 = htonl(FW_LEN16(c));
2208 while (nparams--) {
2209 *p++ = htonl(*params++);
2210 *p++ = htonl(*val++);
2211 }
2212
2213 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2214}
2215
2216/**
2217 * t4_cfg_pfvf - configure PF/VF resource limits
2218 * @adap: the adapter
2219 * @mbox: mailbox to use for the FW command
2220 * @pf: the PF being configured
2221 * @vf: the VF being configured
2222 * @txq: the max number of egress queues
2223 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2224 * @rxqi: the max number of interrupt-capable ingress queues
2225 * @rxq: the max number of interruptless ingress queues
2226 * @tc: the PCI traffic class
2227 * @vi: the max number of virtual interfaces
2228 * @cmask: the channel access rights mask for the PF/VF
2229 * @pmask: the port access rights mask for the PF/VF
2230 * @nexact: the maximum number of exact MPS filters
2231 * @rcaps: read capabilities
2232 * @wxcaps: write/execute capabilities
2233 *
2234 * Configures resource limits and capabilities for a physical or virtual
2235 * function.
2236 */
2237int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2238 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2239 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2240 unsigned int vi, unsigned int cmask, unsigned int pmask,
2241 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2242{
2243 struct fw_pfvf_cmd c;
2244
2245 memset(&c, 0, sizeof(c));
2246 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2247 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2248 FW_PFVF_CMD_VFN(vf));
2249 c.retval_len16 = htonl(FW_LEN16(c));
2250 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2251 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002252 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002253 FW_PFVF_CMD_PMASK(pmask) |
2254 FW_PFVF_CMD_NEQ(txq));
2255 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2256 FW_PFVF_CMD_NEXACTF(nexact));
2257 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2258 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2259 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2261}
2262
2263/**
2264 * t4_alloc_vi - allocate a virtual interface
2265 * @adap: the adapter
2266 * @mbox: mailbox to use for the FW command
2267 * @port: physical port associated with the VI
2268 * @pf: the PF owning the VI
2269 * @vf: the VF owning the VI
2270 * @nmac: number of MAC addresses needed (1 to 5)
2271 * @mac: the MAC addresses of the VI
2272 * @rss_size: size of RSS table slice associated with this VI
2273 *
2274 * Allocates a virtual interface for the given physical port. If @mac is
2275 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2276 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2277 * stored consecutively so the space needed is @nmac * 6 bytes.
2278 * Returns a negative error number or the non-negative VI id.
2279 */
2280int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2281 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2282 unsigned int *rss_size)
2283{
2284 int ret;
2285 struct fw_vi_cmd c;
2286
2287 memset(&c, 0, sizeof(c));
2288 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2289 FW_CMD_WRITE | FW_CMD_EXEC |
2290 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2291 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2292 c.portid_pkd = FW_VI_CMD_PORTID(port);
2293 c.nmac = nmac - 1;
2294
2295 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2296 if (ret)
2297 return ret;
2298
2299 if (mac) {
2300 memcpy(mac, c.mac, sizeof(c.mac));
2301 switch (nmac) {
2302 case 5:
2303 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2304 case 4:
2305 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2306 case 3:
2307 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2308 case 2:
2309 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2310 }
2311 }
2312 if (rss_size)
2313 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002314 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002315}
2316
2317/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002318 * t4_set_rxmode - set Rx properties of a virtual interface
2319 * @adap: the adapter
2320 * @mbox: mailbox to use for the FW command
2321 * @viid: the VI id
2322 * @mtu: the new MTU or -1
2323 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2324 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2325 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002326 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002327 * @sleep_ok: if true we may sleep while awaiting command completion
2328 *
2329 * Sets Rx properties of a virtual interface.
2330 */
2331int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002332 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2333 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002334{
2335 struct fw_vi_rxmode_cmd c;
2336
2337 /* convert to FW values */
2338 if (mtu < 0)
2339 mtu = FW_RXMODE_MTU_NO_CHG;
2340 if (promisc < 0)
2341 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2342 if (all_multi < 0)
2343 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2344 if (bcast < 0)
2345 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002346 if (vlanex < 0)
2347 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002348
2349 memset(&c, 0, sizeof(c));
2350 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2351 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2352 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002353 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2354 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2355 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2356 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2357 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002358 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2359}
2360
2361/**
2362 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2363 * @adap: the adapter
2364 * @mbox: mailbox to use for the FW command
2365 * @viid: the VI id
2366 * @free: if true any existing filters for this VI id are first removed
2367 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2368 * @addr: the MAC address(es)
2369 * @idx: where to store the index of each allocated filter
2370 * @hash: pointer to hash address filter bitmap
2371 * @sleep_ok: call is allowed to sleep
2372 *
2373 * Allocates an exact-match filter for each of the supplied addresses and
2374 * sets it to the corresponding address. If @idx is not %NULL it should
2375 * have at least @naddr entries, each of which will be set to the index of
2376 * the filter allocated for the corresponding MAC address. If a filter
2377 * could not be allocated for an address its index is set to 0xffff.
2378 * If @hash is not %NULL addresses that fail to allocate an exact filter
2379 * are hashed and update the hash filter bitmap pointed at by @hash.
2380 *
2381 * Returns a negative error number or the number of filters allocated.
2382 */
2383int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2384 unsigned int viid, bool free, unsigned int naddr,
2385 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2386{
2387 int i, ret;
2388 struct fw_vi_mac_cmd c;
2389 struct fw_vi_mac_exact *p;
2390
2391 if (naddr > 7)
2392 return -EINVAL;
2393
2394 memset(&c, 0, sizeof(c));
2395 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2396 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2397 FW_VI_MAC_CMD_VIID(viid));
2398 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2399 FW_CMD_LEN16((naddr + 2) / 2));
2400
2401 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2402 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2403 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2404 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2405 }
2406
2407 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2408 if (ret)
2409 return ret;
2410
2411 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2412 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2413
2414 if (idx)
2415 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2416 if (index < NEXACT_MAC)
2417 ret++;
2418 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00002419 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002420 }
2421 return ret;
2422}
2423
2424/**
2425 * t4_change_mac - modifies the exact-match filter for a MAC address
2426 * @adap: the adapter
2427 * @mbox: mailbox to use for the FW command
2428 * @viid: the VI id
2429 * @idx: index of existing filter for old value of MAC address, or -1
2430 * @addr: the new MAC address value
2431 * @persist: whether a new MAC allocation should be persistent
2432 * @add_smt: if true also add the address to the HW SMT
2433 *
2434 * Modifies an exact-match filter and sets it to the new MAC address.
2435 * Note that in general it is not possible to modify the value of a given
2436 * filter so the generic way to modify an address filter is to free the one
2437 * being used by the old address value and allocate a new filter for the
2438 * new address value. @idx can be -1 if the address is a new addition.
2439 *
2440 * Returns a negative error number or the index of the filter with the new
2441 * MAC value.
2442 */
2443int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2444 int idx, const u8 *addr, bool persist, bool add_smt)
2445{
2446 int ret, mode;
2447 struct fw_vi_mac_cmd c;
2448 struct fw_vi_mac_exact *p = c.u.exact;
2449
2450 if (idx < 0) /* new allocation */
2451 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2452 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2453
2454 memset(&c, 0, sizeof(c));
2455 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2456 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2457 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2458 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2459 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2460 FW_VI_MAC_CMD_IDX(idx));
2461 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2462
2463 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2464 if (ret == 0) {
2465 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2466 if (ret >= NEXACT_MAC)
2467 ret = -ENOMEM;
2468 }
2469 return ret;
2470}
2471
2472/**
2473 * t4_set_addr_hash - program the MAC inexact-match hash filter
2474 * @adap: the adapter
2475 * @mbox: mailbox to use for the FW command
2476 * @viid: the VI id
2477 * @ucast: whether the hash filter should also match unicast addresses
2478 * @vec: the value to be written to the hash filter
2479 * @sleep_ok: call is allowed to sleep
2480 *
2481 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2482 */
2483int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2484 bool ucast, u64 vec, bool sleep_ok)
2485{
2486 struct fw_vi_mac_cmd c;
2487
2488 memset(&c, 0, sizeof(c));
2489 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2490 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2491 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2492 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2493 FW_CMD_LEN16(1));
2494 c.u.hash.hashvec = cpu_to_be64(vec);
2495 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2496}
2497
2498/**
2499 * t4_enable_vi - enable/disable a virtual interface
2500 * @adap: the adapter
2501 * @mbox: mailbox to use for the FW command
2502 * @viid: the VI id
2503 * @rx_en: 1=enable Rx, 0=disable Rx
2504 * @tx_en: 1=enable Tx, 0=disable Tx
2505 *
2506 * Enables/disables a virtual interface.
2507 */
2508int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2509 bool rx_en, bool tx_en)
2510{
2511 struct fw_vi_enable_cmd c;
2512
2513 memset(&c, 0, sizeof(c));
2514 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2515 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2516 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2517 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2518 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2519}
2520
2521/**
2522 * t4_identify_port - identify a VI's port by blinking its LED
2523 * @adap: the adapter
2524 * @mbox: mailbox to use for the FW command
2525 * @viid: the VI id
2526 * @nblinks: how many times to blink LED at 2.5 Hz
2527 *
2528 * Identifies a VI's port by blinking its LED.
2529 */
2530int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2531 unsigned int nblinks)
2532{
2533 struct fw_vi_enable_cmd c;
2534
2535 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2536 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2537 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2538 c.blinkdur = htons(nblinks);
2539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2540}
2541
2542/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002543 * t4_iq_free - free an ingress queue and its FLs
2544 * @adap: the adapter
2545 * @mbox: mailbox to use for the FW command
2546 * @pf: the PF owning the queues
2547 * @vf: the VF owning the queues
2548 * @iqtype: the ingress queue type
2549 * @iqid: ingress queue id
2550 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2551 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2552 *
2553 * Frees an ingress queue and its associated FLs, if any.
2554 */
2555int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2556 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2557 unsigned int fl0id, unsigned int fl1id)
2558{
2559 struct fw_iq_cmd c;
2560
2561 memset(&c, 0, sizeof(c));
2562 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2563 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2564 FW_IQ_CMD_VFN(vf));
2565 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2566 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2567 c.iqid = htons(iqid);
2568 c.fl0id = htons(fl0id);
2569 c.fl1id = htons(fl1id);
2570 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2571}
2572
2573/**
2574 * t4_eth_eq_free - free an Ethernet egress queue
2575 * @adap: the adapter
2576 * @mbox: mailbox to use for the FW command
2577 * @pf: the PF owning the queue
2578 * @vf: the VF owning the queue
2579 * @eqid: egress queue id
2580 *
2581 * Frees an Ethernet egress queue.
2582 */
2583int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2584 unsigned int vf, unsigned int eqid)
2585{
2586 struct fw_eq_eth_cmd c;
2587
2588 memset(&c, 0, sizeof(c));
2589 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2590 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2591 FW_EQ_ETH_CMD_VFN(vf));
2592 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2593 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2594 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2595}
2596
2597/**
2598 * t4_ctrl_eq_free - free a control egress queue
2599 * @adap: the adapter
2600 * @mbox: mailbox to use for the FW command
2601 * @pf: the PF owning the queue
2602 * @vf: the VF owning the queue
2603 * @eqid: egress queue id
2604 *
2605 * Frees a control egress queue.
2606 */
2607int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2608 unsigned int vf, unsigned int eqid)
2609{
2610 struct fw_eq_ctrl_cmd c;
2611
2612 memset(&c, 0, sizeof(c));
2613 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2614 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2615 FW_EQ_CTRL_CMD_VFN(vf));
2616 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2617 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2618 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2619}
2620
2621/**
2622 * t4_ofld_eq_free - free an offload egress queue
2623 * @adap: the adapter
2624 * @mbox: mailbox to use for the FW command
2625 * @pf: the PF owning the queue
2626 * @vf: the VF owning the queue
2627 * @eqid: egress queue id
2628 *
2629 * Frees a control egress queue.
2630 */
2631int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2632 unsigned int vf, unsigned int eqid)
2633{
2634 struct fw_eq_ofld_cmd c;
2635
2636 memset(&c, 0, sizeof(c));
2637 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2638 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2639 FW_EQ_OFLD_CMD_VFN(vf));
2640 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2641 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2642 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2643}
2644
2645/**
2646 * t4_handle_fw_rpl - process a FW reply message
2647 * @adap: the adapter
2648 * @rpl: start of the FW message
2649 *
2650 * Processes a FW message, such as link state change messages.
2651 */
2652int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2653{
2654 u8 opcode = *(const u8 *)rpl;
2655
2656 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2657 int speed = 0, fc = 0;
2658 const struct fw_port_cmd *p = (void *)rpl;
2659 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2660 int port = adap->chan_map[chan];
2661 struct port_info *pi = adap2pinfo(adap, port);
2662 struct link_config *lc = &pi->link_cfg;
2663 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2664 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2665 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2666
2667 if (stat & FW_PORT_CMD_RXPAUSE)
2668 fc |= PAUSE_RX;
2669 if (stat & FW_PORT_CMD_TXPAUSE)
2670 fc |= PAUSE_TX;
2671 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2672 speed = SPEED_100;
2673 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2674 speed = SPEED_1000;
2675 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2676 speed = SPEED_10000;
2677
2678 if (link_ok != lc->link_ok || speed != lc->speed ||
2679 fc != lc->fc) { /* something changed */
2680 lc->link_ok = link_ok;
2681 lc->speed = speed;
2682 lc->fc = fc;
2683 t4_os_link_changed(adap, port, link_ok);
2684 }
2685 if (mod != pi->mod_type) {
2686 pi->mod_type = mod;
2687 t4_os_portmod_changed(adap, port);
2688 }
2689 }
2690 return 0;
2691}
2692
2693static void __devinit get_pci_mode(struct adapter *adapter,
2694 struct pci_params *p)
2695{
2696 u16 val;
2697 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2698
2699 if (pcie_cap) {
2700 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2701 &val);
2702 p->speed = val & PCI_EXP_LNKSTA_CLS;
2703 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2704 }
2705}
2706
2707/**
2708 * init_link_config - initialize a link's SW state
2709 * @lc: structure holding the link state
2710 * @caps: link capabilities
2711 *
2712 * Initializes the SW state maintained for each link, including the link's
2713 * capabilities and default speed/flow-control/autonegotiation settings.
2714 */
2715static void __devinit init_link_config(struct link_config *lc,
2716 unsigned int caps)
2717{
2718 lc->supported = caps;
2719 lc->requested_speed = 0;
2720 lc->speed = 0;
2721 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2722 if (lc->supported & FW_PORT_CAP_ANEG) {
2723 lc->advertising = lc->supported & ADVERT_MASK;
2724 lc->autoneg = AUTONEG_ENABLE;
2725 lc->requested_fc |= PAUSE_AUTONEG;
2726 } else {
2727 lc->advertising = 0;
2728 lc->autoneg = AUTONEG_DISABLE;
2729 }
2730}
2731
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002732int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002733{
2734 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2735 return 0;
2736 msleep(500);
2737 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2738}
2739
Dimitris Michailidis900a6592010-06-18 10:05:27 +00002740static int __devinit get_flash_params(struct adapter *adap)
2741{
2742 int ret;
2743 u32 info;
2744
2745 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2746 if (!ret)
2747 ret = sf1_read(adap, 3, 0, 1, &info);
2748 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2749 if (ret)
2750 return ret;
2751
2752 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2753 return -EINVAL;
2754 info >>= 16; /* log2 of size */
2755 if (info >= 0x14 && info < 0x18)
2756 adap->params.sf_nsec = 1 << (info - 16);
2757 else if (info == 0x18)
2758 adap->params.sf_nsec = 64;
2759 else
2760 return -EINVAL;
2761 adap->params.sf_size = 1 << info;
2762 adap->params.sf_fw_start =
2763 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2764 return 0;
2765}
2766
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002767/**
2768 * t4_prep_adapter - prepare SW and HW for operation
2769 * @adapter: the adapter
2770 * @reset: if true perform a HW reset
2771 *
2772 * Initialize adapter SW state for the various HW modules, set initial
2773 * values for some adapter tunables, take PHYs out of reset, and
2774 * initialize the MDIO interface.
2775 */
2776int __devinit t4_prep_adapter(struct adapter *adapter)
2777{
2778 int ret;
2779
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002780 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002781 if (ret < 0)
2782 return ret;
2783
2784 get_pci_mode(adapter, &adapter->params.pci);
2785 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2786
Dimitris Michailidis900a6592010-06-18 10:05:27 +00002787 ret = get_flash_params(adapter);
2788 if (ret < 0) {
2789 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2790 return ret;
2791 }
2792
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002793 ret = get_vpd_params(adapter, &adapter->params.vpd);
2794 if (ret < 0)
2795 return ret;
2796
2797 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2798
2799 /*
2800 * Default port for debugging in case we can't reach FW.
2801 */
2802 adapter->params.nports = 1;
2803 adapter->params.portvec = 1;
2804 return 0;
2805}
2806
2807int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2808{
2809 u8 addr[6];
2810 int ret, i, j = 0;
2811 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002812 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002813
2814 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002815 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002816
2817 for_each_port(adap, i) {
2818 unsigned int rss_size;
2819 struct port_info *p = adap2pinfo(adap, i);
2820
2821 while ((adap->params.portvec & (1 << j)) == 0)
2822 j++;
2823
2824 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2825 FW_CMD_REQUEST | FW_CMD_READ |
2826 FW_PORT_CMD_PORTID(j));
2827 c.action_to_len16 = htonl(
2828 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2829 FW_LEN16(c));
2830 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2831 if (ret)
2832 return ret;
2833
2834 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2835 if (ret < 0)
2836 return ret;
2837
2838 p->viid = ret;
2839 p->tx_chan = j;
2840 p->lport = j;
2841 p->rss_size = rss_size;
2842 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2843 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00002844 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002845
2846 ret = ntohl(c.u.info.lstatus_to_modtype);
2847 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2848 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2849 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002850 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002851
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002852 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2853 FW_CMD_REQUEST | FW_CMD_READ |
2854 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2855 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2856 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2857 if (ret)
2858 return ret;
2859 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2860
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002861 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
2862 j++;
2863 }
2864 return 0;
2865}