blob: c7fb549776dcc5db182e574d7b99dc2eef4d0abf [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
Joe Perches005b5712010-12-14 21:36:53 +0000186 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000333/*
334 * Partial EEPROM Vital Product Data structure. Includes only the ID and
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000335 * VPD-R header.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000336 */
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000337struct t4_vpd_hdr {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000338 u8 id_tag;
339 u8 id_len[2];
340 u8 id_data[ID_LEN];
341 u8 vpdr_tag;
342 u8 vpdr_len[2];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000343};
344
345#define EEPROM_STAT_ADDR 0x7bfc
346#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000347#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000348
349/**
350 * t4_seeprom_wp - enable/disable EEPROM write protection
351 * @adapter: the adapter
352 * @enable: whether to enable or disable write protection
353 *
354 * Enables or disables write protection on the serial EEPROM.
355 */
356int t4_seeprom_wp(struct adapter *adapter, bool enable)
357{
358 unsigned int v = enable ? 0xc : 0;
359 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
360 return ret < 0 ? ret : 0;
361}
362
363/**
364 * get_vpd_params - read VPD parameters from VPD EEPROM
365 * @adapter: adapter to read
366 * @p: where to store the parameters
367 *
368 * Reads card parameters stored in VPD EEPROM.
369 */
370static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
371{
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000372 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000373 int ec, sn;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000374 u8 vpd[VPD_LEN], csum;
375 unsigned int vpdr_len;
376 const struct t4_vpd_hdr *v;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000377
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000378 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000379 if (ret < 0)
380 return ret;
381
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000382 v = (const struct t4_vpd_hdr *)vpd;
383 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
384 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
385 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
386 return -EINVAL;
387 }
388
389#define FIND_VPD_KW(var, name) do { \
390 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
391 vpdr_len, name); \
392 if (var < 0) { \
393 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
394 return -EINVAL; \
395 } \
396 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
397} while (0)
398
399 FIND_VPD_KW(i, "RV");
400 for (csum = 0; i >= 0; i--)
401 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000402
403 if (csum) {
404 dev_err(adapter->pdev_dev,
405 "corrupted VPD EEPROM, actual csum %u\n", csum);
406 return -EINVAL;
407 }
408
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000409 FIND_VPD_KW(ec, "EC");
410 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000411#undef FIND_VPD_KW
412
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000413 memcpy(p->id, v->id_data, ID_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000414 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000415 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000416 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000417 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
418 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000419 strim(p->sn);
420 return 0;
421}
422
423/* serial flash and firmware constants */
424enum {
425 SF_ATTEMPTS = 10, /* max retries for SF operations */
426
427 /* flash command opcodes */
428 SF_PROG_PAGE = 2, /* program page */
429 SF_WR_DISABLE = 4, /* disable writes */
430 SF_RD_STATUS = 5, /* read status register */
431 SF_WR_ENABLE = 6, /* enable writes */
432 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000433 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000434 SF_ERASE_SECTOR = 0xd8, /* erase sector */
435
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000436 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000437};
438
439/**
440 * sf1_read - read data from the serial flash
441 * @adapter: the adapter
442 * @byte_cnt: number of bytes to read
443 * @cont: whether another operation will be chained
444 * @lock: whether to lock SF for PL access only
445 * @valp: where to store the read data
446 *
447 * Reads up to 4 bytes of data from the serial flash. The location of
448 * the read needs to be specified prior to calling this by issuing the
449 * appropriate commands to the serial flash.
450 */
451static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
452 int lock, u32 *valp)
453{
454 int ret;
455
456 if (!byte_cnt || byte_cnt > 4)
457 return -EINVAL;
458 if (t4_read_reg(adapter, SF_OP) & BUSY)
459 return -EBUSY;
460 cont = cont ? SF_CONT : 0;
461 lock = lock ? SF_LOCK : 0;
462 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
463 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
464 if (!ret)
465 *valp = t4_read_reg(adapter, SF_DATA);
466 return ret;
467}
468
469/**
470 * sf1_write - write data to the serial flash
471 * @adapter: the adapter
472 * @byte_cnt: number of bytes to write
473 * @cont: whether another operation will be chained
474 * @lock: whether to lock SF for PL access only
475 * @val: value to write
476 *
477 * Writes up to 4 bytes of data to the serial flash. The location of
478 * the write needs to be specified prior to calling this by issuing the
479 * appropriate commands to the serial flash.
480 */
481static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
482 int lock, u32 val)
483{
484 if (!byte_cnt || byte_cnt > 4)
485 return -EINVAL;
486 if (t4_read_reg(adapter, SF_OP) & BUSY)
487 return -EBUSY;
488 cont = cont ? SF_CONT : 0;
489 lock = lock ? SF_LOCK : 0;
490 t4_write_reg(adapter, SF_DATA, val);
491 t4_write_reg(adapter, SF_OP, lock |
492 cont | BYTECNT(byte_cnt - 1) | OP_WR);
493 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
494}
495
496/**
497 * flash_wait_op - wait for a flash operation to complete
498 * @adapter: the adapter
499 * @attempts: max number of polls of the status register
500 * @delay: delay between polls in ms
501 *
502 * Wait for a flash operation to complete by polling the status register.
503 */
504static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
505{
506 int ret;
507 u32 status;
508
509 while (1) {
510 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
511 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
512 return ret;
513 if (!(status & 1))
514 return 0;
515 if (--attempts == 0)
516 return -EAGAIN;
517 if (delay)
518 msleep(delay);
519 }
520}
521
522/**
523 * t4_read_flash - read words from serial flash
524 * @adapter: the adapter
525 * @addr: the start address for the read
526 * @nwords: how many 32-bit words to read
527 * @data: where to store the read data
528 * @byte_oriented: whether to store data as bytes or as words
529 *
530 * Read the specified number of 32-bit words from the serial flash.
531 * If @byte_oriented is set the read data is stored as a byte array
532 * (i.e., big-endian), otherwise as 32-bit words in the platform's
533 * natural endianess.
534 */
Roland Dreierde498c82010-04-21 08:59:17 +0000535static int t4_read_flash(struct adapter *adapter, unsigned int addr,
536 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000537{
538 int ret;
539
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000540 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000541 return -EINVAL;
542
543 addr = swab32(addr) | SF_RD_DATA_FAST;
544
545 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
546 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
547 return ret;
548
549 for ( ; nwords; nwords--, data++) {
550 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
551 if (nwords == 1)
552 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
553 if (ret)
554 return ret;
555 if (byte_oriented)
556 *data = htonl(*data);
557 }
558 return 0;
559}
560
561/**
562 * t4_write_flash - write up to a page of data to the serial flash
563 * @adapter: the adapter
564 * @addr: the start address to write
565 * @n: length of data to write in bytes
566 * @data: the data to write
567 *
568 * Writes up to a page of data (256 bytes) to the serial flash starting
569 * at the given address. All the data must be written to the same page.
570 */
571static int t4_write_flash(struct adapter *adapter, unsigned int addr,
572 unsigned int n, const u8 *data)
573{
574 int ret;
575 u32 buf[64];
576 unsigned int i, c, left, val, offset = addr & 0xff;
577
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000578 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000579 return -EINVAL;
580
581 val = swab32(addr) | SF_PROG_PAGE;
582
583 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
584 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
585 goto unlock;
586
587 for (left = n; left; left -= c) {
588 c = min(left, 4U);
589 for (val = 0, i = 0; i < c; ++i)
590 val = (val << 8) + *data++;
591
592 ret = sf1_write(adapter, c, c != left, 1, val);
593 if (ret)
594 goto unlock;
595 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000596 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000597 if (ret)
598 goto unlock;
599
600 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
601
602 /* Read the page to verify the write succeeded */
603 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
604 if (ret)
605 return ret;
606
607 if (memcmp(data - n, (u8 *)buf + offset, n)) {
608 dev_err(adapter->pdev_dev,
609 "failed to correctly write the flash page at %#x\n",
610 addr);
611 return -EIO;
612 }
613 return 0;
614
615unlock:
616 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
617 return ret;
618}
619
620/**
621 * get_fw_version - read the firmware version
622 * @adapter: the adapter
623 * @vers: where to place the version
624 *
625 * Reads the FW version from flash.
626 */
627static int get_fw_version(struct adapter *adapter, u32 *vers)
628{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000629 return t4_read_flash(adapter, adapter->params.sf_fw_start +
630 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000631}
632
633/**
634 * get_tp_version - read the TP microcode version
635 * @adapter: the adapter
636 * @vers: where to place the version
637 *
638 * Reads the TP microcode version from flash.
639 */
640static int get_tp_version(struct adapter *adapter, u32 *vers)
641{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000642 return t4_read_flash(adapter, adapter->params.sf_fw_start +
643 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000644 1, vers, 0);
645}
646
647/**
648 * t4_check_fw_version - check if the FW is compatible with this driver
649 * @adapter: the adapter
650 *
651 * Checks if an adapter's FW is compatible with the driver. Returns 0
652 * if there's exact match, a negative error if the version could not be
653 * read or there's a major version mismatch, and a positive value if the
654 * expected major version is found but there's a minor version mismatch.
655 */
656int t4_check_fw_version(struct adapter *adapter)
657{
658 u32 api_vers[2];
659 int ret, major, minor, micro;
660
661 ret = get_fw_version(adapter, &adapter->params.fw_vers);
662 if (!ret)
663 ret = get_tp_version(adapter, &adapter->params.tp_vers);
664 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000665 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
666 offsetof(struct fw_hdr, intfver_nic),
667 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000668 if (ret)
669 return ret;
670
671 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
672 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
673 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
674 memcpy(adapter->params.api_vers, api_vers,
675 sizeof(adapter->params.api_vers));
676
677 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
678 dev_err(adapter->pdev_dev,
679 "card FW has major version %u, driver wants %u\n",
680 major, FW_VERSION_MAJOR);
681 return -EINVAL;
682 }
683
684 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
685 return 0; /* perfect match */
686
687 /* Minor/micro version mismatch. Report it but often it's OK. */
688 return 1;
689}
690
691/**
692 * t4_flash_erase_sectors - erase a range of flash sectors
693 * @adapter: the adapter
694 * @start: the first sector to erase
695 * @end: the last sector to erase
696 *
697 * Erases the sectors in the given inclusive range.
698 */
699static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
700{
701 int ret = 0;
702
703 while (start <= end) {
704 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
705 (ret = sf1_write(adapter, 4, 0, 1,
706 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000707 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000708 dev_err(adapter->pdev_dev,
709 "erase of flash sector %d failed, error %d\n",
710 start, ret);
711 break;
712 }
713 start++;
714 }
715 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
716 return ret;
717}
718
719/**
720 * t4_load_fw - download firmware
721 * @adap: the adapter
722 * @fw_data: the firmware image to write
723 * @size: image size
724 *
725 * Write the supplied firmware image to the card's serial flash.
726 */
727int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
728{
729 u32 csum;
730 int ret, addr;
731 unsigned int i;
732 u8 first_page[SF_PAGE_SIZE];
733 const u32 *p = (const u32 *)fw_data;
734 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000735 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
736 unsigned int fw_img_start = adap->params.sf_fw_start;
737 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000738
739 if (!size) {
740 dev_err(adap->pdev_dev, "FW image has no data\n");
741 return -EINVAL;
742 }
743 if (size & 511) {
744 dev_err(adap->pdev_dev,
745 "FW image size not multiple of 512 bytes\n");
746 return -EINVAL;
747 }
748 if (ntohs(hdr->len512) * 512 != size) {
749 dev_err(adap->pdev_dev,
750 "FW image size differs from size in FW header\n");
751 return -EINVAL;
752 }
753 if (size > FW_MAX_SIZE) {
754 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
755 FW_MAX_SIZE);
756 return -EFBIG;
757 }
758
759 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
760 csum += ntohl(p[i]);
761
762 if (csum != 0xffffffff) {
763 dev_err(adap->pdev_dev,
764 "corrupted firmware image, checksum %#x\n", csum);
765 return -EINVAL;
766 }
767
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000768 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
769 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000770 if (ret)
771 goto out;
772
773 /*
774 * We write the correct version at the end so the driver can see a bad
775 * version if the FW write fails. Start by writing a copy of the
776 * first page with a bad version.
777 */
778 memcpy(first_page, fw_data, SF_PAGE_SIZE);
779 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000780 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000781 if (ret)
782 goto out;
783
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000784 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000785 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
786 addr += SF_PAGE_SIZE;
787 fw_data += SF_PAGE_SIZE;
788 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
789 if (ret)
790 goto out;
791 }
792
793 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000794 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000795 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
796out:
797 if (ret)
798 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
799 ret);
800 return ret;
801}
802
803#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
804 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
805
806/**
807 * t4_link_start - apply link configuration to MAC/PHY
808 * @phy: the PHY to setup
809 * @mac: the MAC to setup
810 * @lc: the requested link configuration
811 *
812 * Set up a port's MAC and PHY according to a desired link configuration.
813 * - If the PHY can auto-negotiate first decide what to advertise, then
814 * enable/disable auto-negotiation as desired, and reset.
815 * - If the PHY does not auto-negotiate just reset it.
816 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
817 * otherwise do it later based on the outcome of auto-negotiation.
818 */
819int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
820 struct link_config *lc)
821{
822 struct fw_port_cmd c;
823 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
824
825 lc->link_ok = 0;
826 if (lc->requested_fc & PAUSE_RX)
827 fc |= FW_PORT_CAP_FC_RX;
828 if (lc->requested_fc & PAUSE_TX)
829 fc |= FW_PORT_CAP_FC_TX;
830
831 memset(&c, 0, sizeof(c));
832 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
833 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
834 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
835 FW_LEN16(c));
836
837 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
838 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
839 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
840 } else if (lc->autoneg == AUTONEG_DISABLE) {
841 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
842 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
843 } else
844 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
845
846 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
847}
848
849/**
850 * t4_restart_aneg - restart autonegotiation
851 * @adap: the adapter
852 * @mbox: mbox to use for the FW command
853 * @port: the port id
854 *
855 * Restarts autonegotiation for the selected port.
856 */
857int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
858{
859 struct fw_port_cmd c;
860
861 memset(&c, 0, sizeof(c));
862 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
863 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
864 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
865 FW_LEN16(c));
866 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
867 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
868}
869
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000870struct intr_info {
871 unsigned int mask; /* bits to check in interrupt status */
872 const char *msg; /* message to print or NULL */
873 short stat_idx; /* stat counter to increment or -1 */
874 unsigned short fatal; /* whether the condition reported is fatal */
875};
876
877/**
878 * t4_handle_intr_status - table driven interrupt handler
879 * @adapter: the adapter that generated the interrupt
880 * @reg: the interrupt status register to process
881 * @acts: table of interrupt actions
882 *
883 * A table driven interrupt handler that applies a set of masks to an
884 * interrupt status word and performs the corresponding actions if the
885 * interrupts described by the mask have occured. The actions include
886 * optionally emitting a warning or alert message. The table is terminated
887 * by an entry specifying mask 0. Returns the number of fatal interrupt
888 * conditions.
889 */
890static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
891 const struct intr_info *acts)
892{
893 int fatal = 0;
894 unsigned int mask = 0;
895 unsigned int status = t4_read_reg(adapter, reg);
896
897 for ( ; acts->mask; ++acts) {
898 if (!(status & acts->mask))
899 continue;
900 if (acts->fatal) {
901 fatal++;
902 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
903 status & acts->mask);
904 } else if (acts->msg && printk_ratelimit())
905 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
906 status & acts->mask);
907 mask |= acts->mask;
908 }
909 status &= mask;
910 if (status) /* clear processed interrupts */
911 t4_write_reg(adapter, reg, status);
912 return fatal;
913}
914
915/*
916 * Interrupt handler for the PCIE module.
917 */
918static void pcie_intr_handler(struct adapter *adapter)
919{
Joe Perches005b5712010-12-14 21:36:53 +0000920 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000921 { RNPP, "RXNP array parity error", -1, 1 },
922 { RPCP, "RXPC array parity error", -1, 1 },
923 { RCIP, "RXCIF array parity error", -1, 1 },
924 { RCCP, "Rx completions control array parity error", -1, 1 },
925 { RFTP, "RXFT array parity error", -1, 1 },
926 { 0 }
927 };
Joe Perches005b5712010-12-14 21:36:53 +0000928 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000929 { TPCP, "TXPC array parity error", -1, 1 },
930 { TNPP, "TXNP array parity error", -1, 1 },
931 { TFTP, "TXFT array parity error", -1, 1 },
932 { TCAP, "TXCA array parity error", -1, 1 },
933 { TCIP, "TXCIF array parity error", -1, 1 },
934 { RCAP, "RXCA array parity error", -1, 1 },
935 { OTDD, "outbound request TLP discarded", -1, 1 },
936 { RDPE, "Rx data parity error", -1, 1 },
937 { TDUE, "Tx uncorrectable data error", -1, 1 },
938 { 0 }
939 };
Joe Perches005b5712010-12-14 21:36:53 +0000940 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000941 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
942 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
943 { MSIDATAPERR, "MSI data parity error", -1, 1 },
944 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
945 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
946 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
947 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
948 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
949 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
950 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
951 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
952 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
953 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
954 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
955 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
956 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
957 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
958 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
959 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
960 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
961 { FIDPERR, "PCI FID parity error", -1, 1 },
962 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
963 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
964 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
965 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
966 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
967 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
968 { PCIESINT, "PCI core secondary fault", -1, 1 },
969 { PCIEPINT, "PCI core primary fault", -1, 1 },
970 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
971 { 0 }
972 };
973
974 int fat;
975
976 fat = t4_handle_intr_status(adapter,
977 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
978 sysbus_intr_info) +
979 t4_handle_intr_status(adapter,
980 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
981 pcie_port_intr_info) +
982 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
983 if (fat)
984 t4_fatal_err(adapter);
985}
986
987/*
988 * TP interrupt handler.
989 */
990static void tp_intr_handler(struct adapter *adapter)
991{
Joe Perches005b5712010-12-14 21:36:53 +0000992 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000993 { 0x3fffffff, "TP parity error", -1, 1 },
994 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
995 { 0 }
996 };
997
998 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
999 t4_fatal_err(adapter);
1000}
1001
1002/*
1003 * SGE interrupt handler.
1004 */
1005static void sge_intr_handler(struct adapter *adapter)
1006{
1007 u64 v;
1008
Joe Perches005b5712010-12-14 21:36:53 +00001009 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001010 { ERR_CPL_EXCEED_IQE_SIZE,
1011 "SGE received CPL exceeding IQE size", -1, 1 },
1012 { ERR_INVALID_CIDX_INC,
1013 "SGE GTS CIDX increment too large", -1, 0 },
1014 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1015 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1016 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1017 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1018 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1019 0 },
1020 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1021 0 },
1022 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1023 0 },
1024 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1025 0 },
1026 { ERR_ING_CTXT_PRIO,
1027 "SGE too many priority ingress contexts", -1, 0 },
1028 { ERR_EGR_CTXT_PRIO,
1029 "SGE too many priority egress contexts", -1, 0 },
1030 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1031 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1032 { 0 }
1033 };
1034
1035 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1036 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1037 if (v) {
1038 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1039 (unsigned long long)v);
1040 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1041 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1042 }
1043
1044 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1045 v != 0)
1046 t4_fatal_err(adapter);
1047}
1048
1049/*
1050 * CIM interrupt handler.
1051 */
1052static void cim_intr_handler(struct adapter *adapter)
1053{
Joe Perches005b5712010-12-14 21:36:53 +00001054 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001055 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1056 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1057 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1058 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1059 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1060 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1061 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1062 { 0 }
1063 };
Joe Perches005b5712010-12-14 21:36:53 +00001064 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001065 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1066 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1067 { ILLWRINT, "CIM illegal write", -1, 1 },
1068 { ILLRDINT, "CIM illegal read", -1, 1 },
1069 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1070 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1071 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1072 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1073 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1074 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1075 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1076 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1077 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1078 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1079 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1080 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1081 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1082 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1083 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1084 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1085 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1086 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1087 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1088 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1089 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1090 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1091 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1092 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1093 { 0 }
1094 };
1095
1096 int fat;
1097
1098 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1099 cim_intr_info) +
1100 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1101 cim_upintr_info);
1102 if (fat)
1103 t4_fatal_err(adapter);
1104}
1105
1106/*
1107 * ULP RX interrupt handler.
1108 */
1109static void ulprx_intr_handler(struct adapter *adapter)
1110{
Joe Perches005b5712010-12-14 21:36:53 +00001111 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001112 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001113 { 0x7fffff, "ULPRX parity error", -1, 1 },
1114 { 0 }
1115 };
1116
1117 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1118 t4_fatal_err(adapter);
1119}
1120
1121/*
1122 * ULP TX interrupt handler.
1123 */
1124static void ulptx_intr_handler(struct adapter *adapter)
1125{
Joe Perches005b5712010-12-14 21:36:53 +00001126 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001127 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1128 0 },
1129 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1130 0 },
1131 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1132 0 },
1133 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1134 0 },
1135 { 0xfffffff, "ULPTX parity error", -1, 1 },
1136 { 0 }
1137 };
1138
1139 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1140 t4_fatal_err(adapter);
1141}
1142
1143/*
1144 * PM TX interrupt handler.
1145 */
1146static void pmtx_intr_handler(struct adapter *adapter)
1147{
Joe Perches005b5712010-12-14 21:36:53 +00001148 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001149 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1150 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1151 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1152 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1153 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1154 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1155 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1156 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1157 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1158 { 0 }
1159 };
1160
1161 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1162 t4_fatal_err(adapter);
1163}
1164
1165/*
1166 * PM RX interrupt handler.
1167 */
1168static void pmrx_intr_handler(struct adapter *adapter)
1169{
Joe Perches005b5712010-12-14 21:36:53 +00001170 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001171 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1172 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1173 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1174 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1175 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1176 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1177 { 0 }
1178 };
1179
1180 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1181 t4_fatal_err(adapter);
1182}
1183
1184/*
1185 * CPL switch interrupt handler.
1186 */
1187static void cplsw_intr_handler(struct adapter *adapter)
1188{
Joe Perches005b5712010-12-14 21:36:53 +00001189 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001190 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1191 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1192 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1193 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1194 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1195 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1196 { 0 }
1197 };
1198
1199 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1200 t4_fatal_err(adapter);
1201}
1202
1203/*
1204 * LE interrupt handler.
1205 */
1206static void le_intr_handler(struct adapter *adap)
1207{
Joe Perches005b5712010-12-14 21:36:53 +00001208 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001209 { LIPMISS, "LE LIP miss", -1, 0 },
1210 { LIP0, "LE 0 LIP error", -1, 0 },
1211 { PARITYERR, "LE parity error", -1, 1 },
1212 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1213 { REQQPARERR, "LE request queue parity error", -1, 1 },
1214 { 0 }
1215 };
1216
1217 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1218 t4_fatal_err(adap);
1219}
1220
1221/*
1222 * MPS interrupt handler.
1223 */
1224static void mps_intr_handler(struct adapter *adapter)
1225{
Joe Perches005b5712010-12-14 21:36:53 +00001226 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001227 { 0xffffff, "MPS Rx parity error", -1, 1 },
1228 { 0 }
1229 };
Joe Perches005b5712010-12-14 21:36:53 +00001230 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001231 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1232 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1233 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1234 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1235 { BUBBLE, "MPS Tx underflow", -1, 1 },
1236 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1237 { FRMERR, "MPS Tx framing error", -1, 1 },
1238 { 0 }
1239 };
Joe Perches005b5712010-12-14 21:36:53 +00001240 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001241 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1242 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1243 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1244 { 0 }
1245 };
Joe Perches005b5712010-12-14 21:36:53 +00001246 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001247 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1248 { 0 }
1249 };
Joe Perches005b5712010-12-14 21:36:53 +00001250 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001251 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1252 { 0 }
1253 };
Joe Perches005b5712010-12-14 21:36:53 +00001254 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001255 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1256 { 0 }
1257 };
Joe Perches005b5712010-12-14 21:36:53 +00001258 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001259 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1260 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1261 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1262 { 0 }
1263 };
1264
1265 int fat;
1266
1267 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1268 mps_rx_intr_info) +
1269 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1270 mps_tx_intr_info) +
1271 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1272 mps_trc_intr_info) +
1273 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1274 mps_stat_sram_intr_info) +
1275 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1276 mps_stat_tx_intr_info) +
1277 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1278 mps_stat_rx_intr_info) +
1279 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1280 mps_cls_intr_info);
1281
1282 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1283 RXINT | TXINT | STATINT);
1284 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1285 if (fat)
1286 t4_fatal_err(adapter);
1287}
1288
1289#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1290
1291/*
1292 * EDC/MC interrupt handler.
1293 */
1294static void mem_intr_handler(struct adapter *adapter, int idx)
1295{
1296 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1297
1298 unsigned int addr, cnt_addr, v;
1299
1300 if (idx <= MEM_EDC1) {
1301 addr = EDC_REG(EDC_INT_CAUSE, idx);
1302 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1303 } else {
1304 addr = MC_INT_CAUSE;
1305 cnt_addr = MC_ECC_STATUS;
1306 }
1307
1308 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1309 if (v & PERR_INT_CAUSE)
1310 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1311 name[idx]);
1312 if (v & ECC_CE_INT_CAUSE) {
1313 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1314
1315 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1316 if (printk_ratelimit())
1317 dev_warn(adapter->pdev_dev,
1318 "%u %s correctable ECC data error%s\n",
1319 cnt, name[idx], cnt > 1 ? "s" : "");
1320 }
1321 if (v & ECC_UE_INT_CAUSE)
1322 dev_alert(adapter->pdev_dev,
1323 "%s uncorrectable ECC data error\n", name[idx]);
1324
1325 t4_write_reg(adapter, addr, v);
1326 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1327 t4_fatal_err(adapter);
1328}
1329
1330/*
1331 * MA interrupt handler.
1332 */
1333static void ma_intr_handler(struct adapter *adap)
1334{
1335 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1336
1337 if (status & MEM_PERR_INT_CAUSE)
1338 dev_alert(adap->pdev_dev,
1339 "MA parity error, parity status %#x\n",
1340 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1341 if (status & MEM_WRAP_INT_CAUSE) {
1342 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1343 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1344 "client %u to address %#x\n",
1345 MEM_WRAP_CLIENT_NUM_GET(v),
1346 MEM_WRAP_ADDRESS_GET(v) << 4);
1347 }
1348 t4_write_reg(adap, MA_INT_CAUSE, status);
1349 t4_fatal_err(adap);
1350}
1351
1352/*
1353 * SMB interrupt handler.
1354 */
1355static void smb_intr_handler(struct adapter *adap)
1356{
Joe Perches005b5712010-12-14 21:36:53 +00001357 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001358 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1359 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1360 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1361 { 0 }
1362 };
1363
1364 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1365 t4_fatal_err(adap);
1366}
1367
1368/*
1369 * NC-SI interrupt handler.
1370 */
1371static void ncsi_intr_handler(struct adapter *adap)
1372{
Joe Perches005b5712010-12-14 21:36:53 +00001373 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001374 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1375 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1376 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1377 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1378 { 0 }
1379 };
1380
1381 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1382 t4_fatal_err(adap);
1383}
1384
1385/*
1386 * XGMAC interrupt handler.
1387 */
1388static void xgmac_intr_handler(struct adapter *adap, int port)
1389{
1390 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1391
1392 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1393 if (!v)
1394 return;
1395
1396 if (v & TXFIFO_PRTY_ERR)
1397 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1398 port);
1399 if (v & RXFIFO_PRTY_ERR)
1400 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1401 port);
1402 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1403 t4_fatal_err(adap);
1404}
1405
1406/*
1407 * PL interrupt handler.
1408 */
1409static void pl_intr_handler(struct adapter *adap)
1410{
Joe Perches005b5712010-12-14 21:36:53 +00001411 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001412 { FATALPERR, "T4 fatal parity error", -1, 1 },
1413 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1414 { 0 }
1415 };
1416
1417 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1418 t4_fatal_err(adap);
1419}
1420
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001421#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001422#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1423 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1424 CPL_SWITCH | SGE | ULP_TX)
1425
1426/**
1427 * t4_slow_intr_handler - control path interrupt handler
1428 * @adapter: the adapter
1429 *
1430 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1431 * The designation 'slow' is because it involves register reads, while
1432 * data interrupts typically don't involve any MMIOs.
1433 */
1434int t4_slow_intr_handler(struct adapter *adapter)
1435{
1436 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1437
1438 if (!(cause & GLBL_INTR_MASK))
1439 return 0;
1440 if (cause & CIM)
1441 cim_intr_handler(adapter);
1442 if (cause & MPS)
1443 mps_intr_handler(adapter);
1444 if (cause & NCSI)
1445 ncsi_intr_handler(adapter);
1446 if (cause & PL)
1447 pl_intr_handler(adapter);
1448 if (cause & SMB)
1449 smb_intr_handler(adapter);
1450 if (cause & XGMAC0)
1451 xgmac_intr_handler(adapter, 0);
1452 if (cause & XGMAC1)
1453 xgmac_intr_handler(adapter, 1);
1454 if (cause & XGMAC_KR0)
1455 xgmac_intr_handler(adapter, 2);
1456 if (cause & XGMAC_KR1)
1457 xgmac_intr_handler(adapter, 3);
1458 if (cause & PCIE)
1459 pcie_intr_handler(adapter);
1460 if (cause & MC)
1461 mem_intr_handler(adapter, MEM_MC);
1462 if (cause & EDC0)
1463 mem_intr_handler(adapter, MEM_EDC0);
1464 if (cause & EDC1)
1465 mem_intr_handler(adapter, MEM_EDC1);
1466 if (cause & LE)
1467 le_intr_handler(adapter);
1468 if (cause & TP)
1469 tp_intr_handler(adapter);
1470 if (cause & MA)
1471 ma_intr_handler(adapter);
1472 if (cause & PM_TX)
1473 pmtx_intr_handler(adapter);
1474 if (cause & PM_RX)
1475 pmrx_intr_handler(adapter);
1476 if (cause & ULP_RX)
1477 ulprx_intr_handler(adapter);
1478 if (cause & CPL_SWITCH)
1479 cplsw_intr_handler(adapter);
1480 if (cause & SGE)
1481 sge_intr_handler(adapter);
1482 if (cause & ULP_TX)
1483 ulptx_intr_handler(adapter);
1484
1485 /* Clear the interrupts just processed for which we are the master. */
1486 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1487 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1488 return 1;
1489}
1490
1491/**
1492 * t4_intr_enable - enable interrupts
1493 * @adapter: the adapter whose interrupts should be enabled
1494 *
1495 * Enable PF-specific interrupts for the calling function and the top-level
1496 * interrupt concentrator for global interrupts. Interrupts are already
1497 * enabled at each module, here we just enable the roots of the interrupt
1498 * hierarchies.
1499 *
1500 * Note: this function should be called only when the driver manages
1501 * non PF-specific interrupts from the various HW modules. Only one PCI
1502 * function at a time should be doing this.
1503 */
1504void t4_intr_enable(struct adapter *adapter)
1505{
1506 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1507
1508 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1509 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1510 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1511 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1512 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1513 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1514 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1515 EGRESS_SIZE_ERR);
1516 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1517 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1518}
1519
1520/**
1521 * t4_intr_disable - disable interrupts
1522 * @adapter: the adapter whose interrupts should be disabled
1523 *
1524 * Disable interrupts. We only disable the top-level interrupt
1525 * concentrators. The caller must be a PCI function managing global
1526 * interrupts.
1527 */
1528void t4_intr_disable(struct adapter *adapter)
1529{
1530 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1531
1532 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1533 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1534}
1535
1536/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001537 * hash_mac_addr - return the hash value of a MAC address
1538 * @addr: the 48-bit Ethernet MAC address
1539 *
1540 * Hashes a MAC address according to the hash function used by HW inexact
1541 * (hash) address matching.
1542 */
1543static int hash_mac_addr(const u8 *addr)
1544{
1545 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1546 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1547 a ^= b;
1548 a ^= (a >> 12);
1549 a ^= (a >> 6);
1550 return a & 0x3f;
1551}
1552
1553/**
1554 * t4_config_rss_range - configure a portion of the RSS mapping table
1555 * @adapter: the adapter
1556 * @mbox: mbox to use for the FW command
1557 * @viid: virtual interface whose RSS subtable is to be written
1558 * @start: start entry in the table to write
1559 * @n: how many table entries to write
1560 * @rspq: values for the response queue lookup table
1561 * @nrspq: number of values in @rspq
1562 *
1563 * Programs the selected part of the VI's RSS mapping table with the
1564 * provided values. If @nrspq < @n the supplied values are used repeatedly
1565 * until the full table range is populated.
1566 *
1567 * The caller must ensure the values in @rspq are in the range allowed for
1568 * @viid.
1569 */
1570int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1571 int start, int n, const u16 *rspq, unsigned int nrspq)
1572{
1573 int ret;
1574 const u16 *rsp = rspq;
1575 const u16 *rsp_end = rspq + nrspq;
1576 struct fw_rss_ind_tbl_cmd cmd;
1577
1578 memset(&cmd, 0, sizeof(cmd));
1579 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1580 FW_CMD_REQUEST | FW_CMD_WRITE |
1581 FW_RSS_IND_TBL_CMD_VIID(viid));
1582 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1583
1584 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1585 while (n > 0) {
1586 int nq = min(n, 32);
1587 __be32 *qp = &cmd.iq0_to_iq2;
1588
1589 cmd.niqid = htons(nq);
1590 cmd.startidx = htons(start);
1591
1592 start += nq;
1593 n -= nq;
1594
1595 while (nq > 0) {
1596 unsigned int v;
1597
1598 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1599 if (++rsp >= rsp_end)
1600 rsp = rspq;
1601 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1602 if (++rsp >= rsp_end)
1603 rsp = rspq;
1604 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1605 if (++rsp >= rsp_end)
1606 rsp = rspq;
1607
1608 *qp++ = htonl(v);
1609 nq -= 3;
1610 }
1611
1612 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1613 if (ret)
1614 return ret;
1615 }
1616 return 0;
1617}
1618
1619/**
1620 * t4_config_glbl_rss - configure the global RSS mode
1621 * @adapter: the adapter
1622 * @mbox: mbox to use for the FW command
1623 * @mode: global RSS mode
1624 * @flags: mode-specific flags
1625 *
1626 * Sets the global RSS mode.
1627 */
1628int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1629 unsigned int flags)
1630{
1631 struct fw_rss_glb_config_cmd c;
1632
1633 memset(&c, 0, sizeof(c));
1634 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1635 FW_CMD_REQUEST | FW_CMD_WRITE);
1636 c.retval_len16 = htonl(FW_LEN16(c));
1637 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1638 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1639 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1640 c.u.basicvirtual.mode_pkd =
1641 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1642 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1643 } else
1644 return -EINVAL;
1645 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1646}
1647
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001648/**
1649 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1650 * @adap: the adapter
1651 * @v4: holds the TCP/IP counter values
1652 * @v6: holds the TCP/IPv6 counter values
1653 *
1654 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1655 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1656 */
1657void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1658 struct tp_tcp_stats *v6)
1659{
1660 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1661
1662#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1663#define STAT(x) val[STAT_IDX(x)]
1664#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1665
1666 if (v4) {
1667 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1668 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1669 v4->tcpOutRsts = STAT(OUT_RST);
1670 v4->tcpInSegs = STAT64(IN_SEG);
1671 v4->tcpOutSegs = STAT64(OUT_SEG);
1672 v4->tcpRetransSegs = STAT64(RXT_SEG);
1673 }
1674 if (v6) {
1675 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1676 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1677 v6->tcpOutRsts = STAT(OUT_RST);
1678 v6->tcpInSegs = STAT64(IN_SEG);
1679 v6->tcpOutSegs = STAT64(OUT_SEG);
1680 v6->tcpRetransSegs = STAT64(RXT_SEG);
1681 }
1682#undef STAT64
1683#undef STAT
1684#undef STAT_IDX
1685}
1686
1687/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001688 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1689 * @adap: the adapter
1690 * @mtus: where to store the MTU values
1691 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1692 *
1693 * Reads the HW path MTU table.
1694 */
1695void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1696{
1697 u32 v;
1698 int i;
1699
1700 for (i = 0; i < NMTUS; ++i) {
1701 t4_write_reg(adap, TP_MTU_TABLE,
1702 MTUINDEX(0xff) | MTUVALUE(i));
1703 v = t4_read_reg(adap, TP_MTU_TABLE);
1704 mtus[i] = MTUVALUE_GET(v);
1705 if (mtu_log)
1706 mtu_log[i] = MTUWIDTH_GET(v);
1707 }
1708}
1709
1710/**
1711 * init_cong_ctrl - initialize congestion control parameters
1712 * @a: the alpha values for congestion control
1713 * @b: the beta values for congestion control
1714 *
1715 * Initialize the congestion control parameters.
1716 */
1717static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1718{
1719 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1720 a[9] = 2;
1721 a[10] = 3;
1722 a[11] = 4;
1723 a[12] = 5;
1724 a[13] = 6;
1725 a[14] = 7;
1726 a[15] = 8;
1727 a[16] = 9;
1728 a[17] = 10;
1729 a[18] = 14;
1730 a[19] = 17;
1731 a[20] = 21;
1732 a[21] = 25;
1733 a[22] = 30;
1734 a[23] = 35;
1735 a[24] = 45;
1736 a[25] = 60;
1737 a[26] = 80;
1738 a[27] = 100;
1739 a[28] = 200;
1740 a[29] = 300;
1741 a[30] = 400;
1742 a[31] = 500;
1743
1744 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1745 b[9] = b[10] = 1;
1746 b[11] = b[12] = 2;
1747 b[13] = b[14] = b[15] = b[16] = 3;
1748 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1749 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1750 b[28] = b[29] = 6;
1751 b[30] = b[31] = 7;
1752}
1753
1754/* The minimum additive increment value for the congestion control table */
1755#define CC_MIN_INCR 2U
1756
1757/**
1758 * t4_load_mtus - write the MTU and congestion control HW tables
1759 * @adap: the adapter
1760 * @mtus: the values for the MTU table
1761 * @alpha: the values for the congestion control alpha parameter
1762 * @beta: the values for the congestion control beta parameter
1763 *
1764 * Write the HW MTU table with the supplied MTUs and the high-speed
1765 * congestion control table with the supplied alpha, beta, and MTUs.
1766 * We write the two tables together because the additive increments
1767 * depend on the MTUs.
1768 */
1769void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1770 const unsigned short *alpha, const unsigned short *beta)
1771{
1772 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1773 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1774 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1775 28672, 40960, 57344, 81920, 114688, 163840, 229376
1776 };
1777
1778 unsigned int i, w;
1779
1780 for (i = 0; i < NMTUS; ++i) {
1781 unsigned int mtu = mtus[i];
1782 unsigned int log2 = fls(mtu);
1783
1784 if (!(mtu & ((1 << log2) >> 2))) /* round */
1785 log2--;
1786 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1787 MTUWIDTH(log2) | MTUVALUE(mtu));
1788
1789 for (w = 0; w < NCCTRL_WIN; ++w) {
1790 unsigned int inc;
1791
1792 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1793 CC_MIN_INCR);
1794
1795 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1796 (w << 16) | (beta[w] << 13) | inc);
1797 }
1798 }
1799}
1800
1801/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001802 * get_mps_bg_map - return the buffer groups associated with a port
1803 * @adap: the adapter
1804 * @idx: the port index
1805 *
1806 * Returns a bitmap indicating which MPS buffer groups are associated
1807 * with the given port. Bit i is set if buffer group i is used by the
1808 * port.
1809 */
1810static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1811{
1812 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1813
1814 if (n == 0)
1815 return idx == 0 ? 0xf : 0;
1816 if (n == 1)
1817 return idx < 2 ? (3 << (2 * idx)) : 0;
1818 return 1 << idx;
1819}
1820
1821/**
1822 * t4_get_port_stats - collect port statistics
1823 * @adap: the adapter
1824 * @idx: the port index
1825 * @p: the stats structure to fill
1826 *
1827 * Collect statistics related to the given port from HW.
1828 */
1829void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1830{
1831 u32 bgmap = get_mps_bg_map(adap, idx);
1832
1833#define GET_STAT(name) \
1834 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1835#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1836
1837 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1838 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1839 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1840 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1841 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1842 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1843 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1844 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1845 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1846 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1847 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1848 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1849 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1850 p->tx_drop = GET_STAT(TX_PORT_DROP);
1851 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1852 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1853 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1854 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1855 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1856 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1857 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1858 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1859 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1860
1861 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1862 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1863 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1864 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1865 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1866 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1867 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1868 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1869 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1870 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1871 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1872 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1873 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1874 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1875 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1876 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1877 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1878 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1879 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1880 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1881 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1882 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1883 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1884 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1885 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1886 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1887 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1888
1889 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1890 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1891 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1892 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1893 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1894 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1895 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1896 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1897
1898#undef GET_STAT
1899#undef GET_STAT_COM
1900}
1901
1902/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001903 * t4_wol_magic_enable - enable/disable magic packet WoL
1904 * @adap: the adapter
1905 * @port: the physical port index
1906 * @addr: MAC address expected in magic packets, %NULL to disable
1907 *
1908 * Enables/disables magic packet wake-on-LAN for the selected port.
1909 */
1910void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1911 const u8 *addr)
1912{
1913 if (addr) {
1914 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1915 (addr[2] << 24) | (addr[3] << 16) |
1916 (addr[4] << 8) | addr[5]);
1917 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1918 (addr[0] << 8) | addr[1]);
1919 }
1920 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1921 addr ? MAGICEN : 0);
1922}
1923
1924/**
1925 * t4_wol_pat_enable - enable/disable pattern-based WoL
1926 * @adap: the adapter
1927 * @port: the physical port index
1928 * @map: bitmap of which HW pattern filters to set
1929 * @mask0: byte mask for bytes 0-63 of a packet
1930 * @mask1: byte mask for bytes 64-127 of a packet
1931 * @crc: Ethernet CRC for selected bytes
1932 * @enable: enable/disable switch
1933 *
1934 * Sets the pattern filters indicated in @map to mask out the bytes
1935 * specified in @mask0/@mask1 in received packets and compare the CRC of
1936 * the resulting packet against @crc. If @enable is %true pattern-based
1937 * WoL is enabled, otherwise disabled.
1938 */
1939int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1940 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1941{
1942 int i;
1943
1944 if (!enable) {
1945 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1946 PATEN, 0);
1947 return 0;
1948 }
1949 if (map > 0xff)
1950 return -EINVAL;
1951
1952#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1953
1954 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1955 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1956 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1957
1958 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1959 if (!(map & 1))
1960 continue;
1961
1962 /* write byte masks */
1963 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1964 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1965 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1966 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1967 return -ETIMEDOUT;
1968
1969 /* write CRC */
1970 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1971 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1972 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1973 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1974 return -ETIMEDOUT;
1975 }
1976#undef EPIO_REG
1977
1978 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1979 return 0;
1980}
1981
1982#define INIT_CMD(var, cmd, rd_wr) do { \
1983 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1984 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1985 (var).retval_len16 = htonl(FW_LEN16(var)); \
1986} while (0)
1987
1988/**
1989 * t4_mdio_rd - read a PHY register through MDIO
1990 * @adap: the adapter
1991 * @mbox: mailbox to use for the FW command
1992 * @phy_addr: the PHY address
1993 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
1994 * @reg: the register to read
1995 * @valp: where to store the value
1996 *
1997 * Issues a FW command through the given mailbox to read a PHY register.
1998 */
1999int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2000 unsigned int mmd, unsigned int reg, u16 *valp)
2001{
2002 int ret;
2003 struct fw_ldst_cmd c;
2004
2005 memset(&c, 0, sizeof(c));
2006 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2007 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2008 c.cycles_to_len16 = htonl(FW_LEN16(c));
2009 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2010 FW_LDST_CMD_MMD(mmd));
2011 c.u.mdio.raddr = htons(reg);
2012
2013 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2014 if (ret == 0)
2015 *valp = ntohs(c.u.mdio.rval);
2016 return ret;
2017}
2018
2019/**
2020 * t4_mdio_wr - write a PHY register through MDIO
2021 * @adap: the adapter
2022 * @mbox: mailbox to use for the FW command
2023 * @phy_addr: the PHY address
2024 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2025 * @reg: the register to write
2026 * @valp: value to write
2027 *
2028 * Issues a FW command through the given mailbox to write a PHY register.
2029 */
2030int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2031 unsigned int mmd, unsigned int reg, u16 val)
2032{
2033 struct fw_ldst_cmd c;
2034
2035 memset(&c, 0, sizeof(c));
2036 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2037 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2038 c.cycles_to_len16 = htonl(FW_LEN16(c));
2039 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2040 FW_LDST_CMD_MMD(mmd));
2041 c.u.mdio.raddr = htons(reg);
2042 c.u.mdio.rval = htons(val);
2043
2044 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2045}
2046
2047/**
2048 * t4_fw_hello - establish communication with FW
2049 * @adap: the adapter
2050 * @mbox: mailbox to use for the FW command
2051 * @evt_mbox: mailbox to receive async FW events
2052 * @master: specifies the caller's willingness to be the device master
2053 * @state: returns the current device state
2054 *
2055 * Issues a command to establish communication with FW.
2056 */
2057int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2058 enum dev_master master, enum dev_state *state)
2059{
2060 int ret;
2061 struct fw_hello_cmd c;
2062
2063 INIT_CMD(c, HELLO, WRITE);
2064 c.err_to_mbasyncnot = htonl(
2065 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2066 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2067 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2068 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2069
2070 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2071 if (ret == 0 && state) {
2072 u32 v = ntohl(c.err_to_mbasyncnot);
2073 if (v & FW_HELLO_CMD_INIT)
2074 *state = DEV_STATE_INIT;
2075 else if (v & FW_HELLO_CMD_ERR)
2076 *state = DEV_STATE_ERR;
2077 else
2078 *state = DEV_STATE_UNINIT;
2079 }
2080 return ret;
2081}
2082
2083/**
2084 * t4_fw_bye - end communication with FW
2085 * @adap: the adapter
2086 * @mbox: mailbox to use for the FW command
2087 *
2088 * Issues a command to terminate communication with FW.
2089 */
2090int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2091{
2092 struct fw_bye_cmd c;
2093
2094 INIT_CMD(c, BYE, WRITE);
2095 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2096}
2097
2098/**
2099 * t4_init_cmd - ask FW to initialize the device
2100 * @adap: the adapter
2101 * @mbox: mailbox to use for the FW command
2102 *
2103 * Issues a command to FW to partially initialize the device. This
2104 * performs initialization that generally doesn't depend on user input.
2105 */
2106int t4_early_init(struct adapter *adap, unsigned int mbox)
2107{
2108 struct fw_initialize_cmd c;
2109
2110 INIT_CMD(c, INITIALIZE, WRITE);
2111 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2112}
2113
2114/**
2115 * t4_fw_reset - issue a reset to FW
2116 * @adap: the adapter
2117 * @mbox: mailbox to use for the FW command
2118 * @reset: specifies the type of reset to perform
2119 *
2120 * Issues a reset command of the specified type to FW.
2121 */
2122int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2123{
2124 struct fw_reset_cmd c;
2125
2126 INIT_CMD(c, RESET, WRITE);
2127 c.val = htonl(reset);
2128 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2129}
2130
2131/**
2132 * t4_query_params - query FW or device parameters
2133 * @adap: the adapter
2134 * @mbox: mailbox to use for the FW command
2135 * @pf: the PF
2136 * @vf: the VF
2137 * @nparams: the number of parameters
2138 * @params: the parameter names
2139 * @val: the parameter values
2140 *
2141 * Reads the value of FW or device parameters. Up to 7 parameters can be
2142 * queried at once.
2143 */
2144int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2145 unsigned int vf, unsigned int nparams, const u32 *params,
2146 u32 *val)
2147{
2148 int i, ret;
2149 struct fw_params_cmd c;
2150 __be32 *p = &c.param[0].mnem;
2151
2152 if (nparams > 7)
2153 return -EINVAL;
2154
2155 memset(&c, 0, sizeof(c));
2156 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2157 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2158 FW_PARAMS_CMD_VFN(vf));
2159 c.retval_len16 = htonl(FW_LEN16(c));
2160 for (i = 0; i < nparams; i++, p += 2)
2161 *p = htonl(*params++);
2162
2163 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2164 if (ret == 0)
2165 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2166 *val++ = ntohl(*p);
2167 return ret;
2168}
2169
2170/**
2171 * t4_set_params - sets FW or device parameters
2172 * @adap: the adapter
2173 * @mbox: mailbox to use for the FW command
2174 * @pf: the PF
2175 * @vf: the VF
2176 * @nparams: the number of parameters
2177 * @params: the parameter names
2178 * @val: the parameter values
2179 *
2180 * Sets the value of FW or device parameters. Up to 7 parameters can be
2181 * specified at once.
2182 */
2183int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2184 unsigned int vf, unsigned int nparams, const u32 *params,
2185 const u32 *val)
2186{
2187 struct fw_params_cmd c;
2188 __be32 *p = &c.param[0].mnem;
2189
2190 if (nparams > 7)
2191 return -EINVAL;
2192
2193 memset(&c, 0, sizeof(c));
2194 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2195 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2196 FW_PARAMS_CMD_VFN(vf));
2197 c.retval_len16 = htonl(FW_LEN16(c));
2198 while (nparams--) {
2199 *p++ = htonl(*params++);
2200 *p++ = htonl(*val++);
2201 }
2202
2203 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2204}
2205
2206/**
2207 * t4_cfg_pfvf - configure PF/VF resource limits
2208 * @adap: the adapter
2209 * @mbox: mailbox to use for the FW command
2210 * @pf: the PF being configured
2211 * @vf: the VF being configured
2212 * @txq: the max number of egress queues
2213 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2214 * @rxqi: the max number of interrupt-capable ingress queues
2215 * @rxq: the max number of interruptless ingress queues
2216 * @tc: the PCI traffic class
2217 * @vi: the max number of virtual interfaces
2218 * @cmask: the channel access rights mask for the PF/VF
2219 * @pmask: the port access rights mask for the PF/VF
2220 * @nexact: the maximum number of exact MPS filters
2221 * @rcaps: read capabilities
2222 * @wxcaps: write/execute capabilities
2223 *
2224 * Configures resource limits and capabilities for a physical or virtual
2225 * function.
2226 */
2227int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2228 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2229 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2230 unsigned int vi, unsigned int cmask, unsigned int pmask,
2231 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2232{
2233 struct fw_pfvf_cmd c;
2234
2235 memset(&c, 0, sizeof(c));
2236 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2237 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2238 FW_PFVF_CMD_VFN(vf));
2239 c.retval_len16 = htonl(FW_LEN16(c));
2240 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2241 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002242 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002243 FW_PFVF_CMD_PMASK(pmask) |
2244 FW_PFVF_CMD_NEQ(txq));
2245 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2246 FW_PFVF_CMD_NEXACTF(nexact));
2247 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2248 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2249 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2250 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2251}
2252
2253/**
2254 * t4_alloc_vi - allocate a virtual interface
2255 * @adap: the adapter
2256 * @mbox: mailbox to use for the FW command
2257 * @port: physical port associated with the VI
2258 * @pf: the PF owning the VI
2259 * @vf: the VF owning the VI
2260 * @nmac: number of MAC addresses needed (1 to 5)
2261 * @mac: the MAC addresses of the VI
2262 * @rss_size: size of RSS table slice associated with this VI
2263 *
2264 * Allocates a virtual interface for the given physical port. If @mac is
2265 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2266 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2267 * stored consecutively so the space needed is @nmac * 6 bytes.
2268 * Returns a negative error number or the non-negative VI id.
2269 */
2270int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2271 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2272 unsigned int *rss_size)
2273{
2274 int ret;
2275 struct fw_vi_cmd c;
2276
2277 memset(&c, 0, sizeof(c));
2278 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2279 FW_CMD_WRITE | FW_CMD_EXEC |
2280 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2281 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2282 c.portid_pkd = FW_VI_CMD_PORTID(port);
2283 c.nmac = nmac - 1;
2284
2285 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2286 if (ret)
2287 return ret;
2288
2289 if (mac) {
2290 memcpy(mac, c.mac, sizeof(c.mac));
2291 switch (nmac) {
2292 case 5:
2293 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2294 case 4:
2295 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2296 case 3:
2297 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2298 case 2:
2299 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2300 }
2301 }
2302 if (rss_size)
2303 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002304 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002305}
2306
2307/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002308 * t4_set_rxmode - set Rx properties of a virtual interface
2309 * @adap: the adapter
2310 * @mbox: mailbox to use for the FW command
2311 * @viid: the VI id
2312 * @mtu: the new MTU or -1
2313 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2314 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2315 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002316 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002317 * @sleep_ok: if true we may sleep while awaiting command completion
2318 *
2319 * Sets Rx properties of a virtual interface.
2320 */
2321int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002322 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2323 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002324{
2325 struct fw_vi_rxmode_cmd c;
2326
2327 /* convert to FW values */
2328 if (mtu < 0)
2329 mtu = FW_RXMODE_MTU_NO_CHG;
2330 if (promisc < 0)
2331 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2332 if (all_multi < 0)
2333 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2334 if (bcast < 0)
2335 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002336 if (vlanex < 0)
2337 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002338
2339 memset(&c, 0, sizeof(c));
2340 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2341 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2342 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002343 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2344 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2345 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2346 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2347 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002348 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2349}
2350
2351/**
2352 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2353 * @adap: the adapter
2354 * @mbox: mailbox to use for the FW command
2355 * @viid: the VI id
2356 * @free: if true any existing filters for this VI id are first removed
2357 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2358 * @addr: the MAC address(es)
2359 * @idx: where to store the index of each allocated filter
2360 * @hash: pointer to hash address filter bitmap
2361 * @sleep_ok: call is allowed to sleep
2362 *
2363 * Allocates an exact-match filter for each of the supplied addresses and
2364 * sets it to the corresponding address. If @idx is not %NULL it should
2365 * have at least @naddr entries, each of which will be set to the index of
2366 * the filter allocated for the corresponding MAC address. If a filter
2367 * could not be allocated for an address its index is set to 0xffff.
2368 * If @hash is not %NULL addresses that fail to allocate an exact filter
2369 * are hashed and update the hash filter bitmap pointed at by @hash.
2370 *
2371 * Returns a negative error number or the number of filters allocated.
2372 */
2373int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2374 unsigned int viid, bool free, unsigned int naddr,
2375 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2376{
2377 int i, ret;
2378 struct fw_vi_mac_cmd c;
2379 struct fw_vi_mac_exact *p;
2380
2381 if (naddr > 7)
2382 return -EINVAL;
2383
2384 memset(&c, 0, sizeof(c));
2385 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2386 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2387 FW_VI_MAC_CMD_VIID(viid));
2388 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2389 FW_CMD_LEN16((naddr + 2) / 2));
2390
2391 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2392 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2393 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2394 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2395 }
2396
2397 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2398 if (ret)
2399 return ret;
2400
2401 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2402 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2403
2404 if (idx)
2405 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2406 if (index < NEXACT_MAC)
2407 ret++;
2408 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00002409 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002410 }
2411 return ret;
2412}
2413
2414/**
2415 * t4_change_mac - modifies the exact-match filter for a MAC address
2416 * @adap: the adapter
2417 * @mbox: mailbox to use for the FW command
2418 * @viid: the VI id
2419 * @idx: index of existing filter for old value of MAC address, or -1
2420 * @addr: the new MAC address value
2421 * @persist: whether a new MAC allocation should be persistent
2422 * @add_smt: if true also add the address to the HW SMT
2423 *
2424 * Modifies an exact-match filter and sets it to the new MAC address.
2425 * Note that in general it is not possible to modify the value of a given
2426 * filter so the generic way to modify an address filter is to free the one
2427 * being used by the old address value and allocate a new filter for the
2428 * new address value. @idx can be -1 if the address is a new addition.
2429 *
2430 * Returns a negative error number or the index of the filter with the new
2431 * MAC value.
2432 */
2433int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2434 int idx, const u8 *addr, bool persist, bool add_smt)
2435{
2436 int ret, mode;
2437 struct fw_vi_mac_cmd c;
2438 struct fw_vi_mac_exact *p = c.u.exact;
2439
2440 if (idx < 0) /* new allocation */
2441 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2442 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2443
2444 memset(&c, 0, sizeof(c));
2445 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2446 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2447 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2448 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2449 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2450 FW_VI_MAC_CMD_IDX(idx));
2451 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2452
2453 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2454 if (ret == 0) {
2455 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2456 if (ret >= NEXACT_MAC)
2457 ret = -ENOMEM;
2458 }
2459 return ret;
2460}
2461
2462/**
2463 * t4_set_addr_hash - program the MAC inexact-match hash filter
2464 * @adap: the adapter
2465 * @mbox: mailbox to use for the FW command
2466 * @viid: the VI id
2467 * @ucast: whether the hash filter should also match unicast addresses
2468 * @vec: the value to be written to the hash filter
2469 * @sleep_ok: call is allowed to sleep
2470 *
2471 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2472 */
2473int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2474 bool ucast, u64 vec, bool sleep_ok)
2475{
2476 struct fw_vi_mac_cmd c;
2477
2478 memset(&c, 0, sizeof(c));
2479 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2480 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2481 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2482 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2483 FW_CMD_LEN16(1));
2484 c.u.hash.hashvec = cpu_to_be64(vec);
2485 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2486}
2487
2488/**
2489 * t4_enable_vi - enable/disable a virtual interface
2490 * @adap: the adapter
2491 * @mbox: mailbox to use for the FW command
2492 * @viid: the VI id
2493 * @rx_en: 1=enable Rx, 0=disable Rx
2494 * @tx_en: 1=enable Tx, 0=disable Tx
2495 *
2496 * Enables/disables a virtual interface.
2497 */
2498int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2499 bool rx_en, bool tx_en)
2500{
2501 struct fw_vi_enable_cmd c;
2502
2503 memset(&c, 0, sizeof(c));
2504 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2505 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2506 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2507 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2508 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2509}
2510
2511/**
2512 * t4_identify_port - identify a VI's port by blinking its LED
2513 * @adap: the adapter
2514 * @mbox: mailbox to use for the FW command
2515 * @viid: the VI id
2516 * @nblinks: how many times to blink LED at 2.5 Hz
2517 *
2518 * Identifies a VI's port by blinking its LED.
2519 */
2520int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2521 unsigned int nblinks)
2522{
2523 struct fw_vi_enable_cmd c;
2524
2525 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2526 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2527 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2528 c.blinkdur = htons(nblinks);
2529 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2530}
2531
2532/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002533 * t4_iq_free - free an ingress queue and its FLs
2534 * @adap: the adapter
2535 * @mbox: mailbox to use for the FW command
2536 * @pf: the PF owning the queues
2537 * @vf: the VF owning the queues
2538 * @iqtype: the ingress queue type
2539 * @iqid: ingress queue id
2540 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2541 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2542 *
2543 * Frees an ingress queue and its associated FLs, if any.
2544 */
2545int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2546 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2547 unsigned int fl0id, unsigned int fl1id)
2548{
2549 struct fw_iq_cmd c;
2550
2551 memset(&c, 0, sizeof(c));
2552 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2553 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2554 FW_IQ_CMD_VFN(vf));
2555 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2556 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2557 c.iqid = htons(iqid);
2558 c.fl0id = htons(fl0id);
2559 c.fl1id = htons(fl1id);
2560 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2561}
2562
2563/**
2564 * t4_eth_eq_free - free an Ethernet egress queue
2565 * @adap: the adapter
2566 * @mbox: mailbox to use for the FW command
2567 * @pf: the PF owning the queue
2568 * @vf: the VF owning the queue
2569 * @eqid: egress queue id
2570 *
2571 * Frees an Ethernet egress queue.
2572 */
2573int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2574 unsigned int vf, unsigned int eqid)
2575{
2576 struct fw_eq_eth_cmd c;
2577
2578 memset(&c, 0, sizeof(c));
2579 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2580 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2581 FW_EQ_ETH_CMD_VFN(vf));
2582 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2583 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2584 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2585}
2586
2587/**
2588 * t4_ctrl_eq_free - free a control egress queue
2589 * @adap: the adapter
2590 * @mbox: mailbox to use for the FW command
2591 * @pf: the PF owning the queue
2592 * @vf: the VF owning the queue
2593 * @eqid: egress queue id
2594 *
2595 * Frees a control egress queue.
2596 */
2597int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2598 unsigned int vf, unsigned int eqid)
2599{
2600 struct fw_eq_ctrl_cmd c;
2601
2602 memset(&c, 0, sizeof(c));
2603 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2604 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2605 FW_EQ_CTRL_CMD_VFN(vf));
2606 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2607 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2608 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2609}
2610
2611/**
2612 * t4_ofld_eq_free - free an offload egress queue
2613 * @adap: the adapter
2614 * @mbox: mailbox to use for the FW command
2615 * @pf: the PF owning the queue
2616 * @vf: the VF owning the queue
2617 * @eqid: egress queue id
2618 *
2619 * Frees a control egress queue.
2620 */
2621int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2622 unsigned int vf, unsigned int eqid)
2623{
2624 struct fw_eq_ofld_cmd c;
2625
2626 memset(&c, 0, sizeof(c));
2627 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2628 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2629 FW_EQ_OFLD_CMD_VFN(vf));
2630 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2631 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2632 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2633}
2634
2635/**
2636 * t4_handle_fw_rpl - process a FW reply message
2637 * @adap: the adapter
2638 * @rpl: start of the FW message
2639 *
2640 * Processes a FW message, such as link state change messages.
2641 */
2642int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2643{
2644 u8 opcode = *(const u8 *)rpl;
2645
2646 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2647 int speed = 0, fc = 0;
2648 const struct fw_port_cmd *p = (void *)rpl;
2649 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2650 int port = adap->chan_map[chan];
2651 struct port_info *pi = adap2pinfo(adap, port);
2652 struct link_config *lc = &pi->link_cfg;
2653 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2654 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2655 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2656
2657 if (stat & FW_PORT_CMD_RXPAUSE)
2658 fc |= PAUSE_RX;
2659 if (stat & FW_PORT_CMD_TXPAUSE)
2660 fc |= PAUSE_TX;
2661 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2662 speed = SPEED_100;
2663 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2664 speed = SPEED_1000;
2665 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2666 speed = SPEED_10000;
2667
2668 if (link_ok != lc->link_ok || speed != lc->speed ||
2669 fc != lc->fc) { /* something changed */
2670 lc->link_ok = link_ok;
2671 lc->speed = speed;
2672 lc->fc = fc;
2673 t4_os_link_changed(adap, port, link_ok);
2674 }
2675 if (mod != pi->mod_type) {
2676 pi->mod_type = mod;
2677 t4_os_portmod_changed(adap, port);
2678 }
2679 }
2680 return 0;
2681}
2682
2683static void __devinit get_pci_mode(struct adapter *adapter,
2684 struct pci_params *p)
2685{
2686 u16 val;
2687 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2688
2689 if (pcie_cap) {
2690 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2691 &val);
2692 p->speed = val & PCI_EXP_LNKSTA_CLS;
2693 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2694 }
2695}
2696
2697/**
2698 * init_link_config - initialize a link's SW state
2699 * @lc: structure holding the link state
2700 * @caps: link capabilities
2701 *
2702 * Initializes the SW state maintained for each link, including the link's
2703 * capabilities and default speed/flow-control/autonegotiation settings.
2704 */
2705static void __devinit init_link_config(struct link_config *lc,
2706 unsigned int caps)
2707{
2708 lc->supported = caps;
2709 lc->requested_speed = 0;
2710 lc->speed = 0;
2711 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2712 if (lc->supported & FW_PORT_CAP_ANEG) {
2713 lc->advertising = lc->supported & ADVERT_MASK;
2714 lc->autoneg = AUTONEG_ENABLE;
2715 lc->requested_fc |= PAUSE_AUTONEG;
2716 } else {
2717 lc->advertising = 0;
2718 lc->autoneg = AUTONEG_DISABLE;
2719 }
2720}
2721
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002722int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002723{
2724 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2725 return 0;
2726 msleep(500);
2727 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2728}
2729
Dimitris Michailidis900a6592010-06-18 10:05:27 +00002730static int __devinit get_flash_params(struct adapter *adap)
2731{
2732 int ret;
2733 u32 info;
2734
2735 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2736 if (!ret)
2737 ret = sf1_read(adap, 3, 0, 1, &info);
2738 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2739 if (ret)
2740 return ret;
2741
2742 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2743 return -EINVAL;
2744 info >>= 16; /* log2 of size */
2745 if (info >= 0x14 && info < 0x18)
2746 adap->params.sf_nsec = 1 << (info - 16);
2747 else if (info == 0x18)
2748 adap->params.sf_nsec = 64;
2749 else
2750 return -EINVAL;
2751 adap->params.sf_size = 1 << info;
2752 adap->params.sf_fw_start =
2753 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2754 return 0;
2755}
2756
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002757/**
2758 * t4_prep_adapter - prepare SW and HW for operation
2759 * @adapter: the adapter
2760 * @reset: if true perform a HW reset
2761 *
2762 * Initialize adapter SW state for the various HW modules, set initial
2763 * values for some adapter tunables, take PHYs out of reset, and
2764 * initialize the MDIO interface.
2765 */
2766int __devinit t4_prep_adapter(struct adapter *adapter)
2767{
2768 int ret;
2769
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002770 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002771 if (ret < 0)
2772 return ret;
2773
2774 get_pci_mode(adapter, &adapter->params.pci);
2775 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2776
Dimitris Michailidis900a6592010-06-18 10:05:27 +00002777 ret = get_flash_params(adapter);
2778 if (ret < 0) {
2779 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2780 return ret;
2781 }
2782
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002783 ret = get_vpd_params(adapter, &adapter->params.vpd);
2784 if (ret < 0)
2785 return ret;
2786
2787 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2788
2789 /*
2790 * Default port for debugging in case we can't reach FW.
2791 */
2792 adapter->params.nports = 1;
2793 adapter->params.portvec = 1;
2794 return 0;
2795}
2796
2797int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2798{
2799 u8 addr[6];
2800 int ret, i, j = 0;
2801 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002802 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002803
2804 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002805 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002806
2807 for_each_port(adap, i) {
2808 unsigned int rss_size;
2809 struct port_info *p = adap2pinfo(adap, i);
2810
2811 while ((adap->params.portvec & (1 << j)) == 0)
2812 j++;
2813
2814 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2815 FW_CMD_REQUEST | FW_CMD_READ |
2816 FW_PORT_CMD_PORTID(j));
2817 c.action_to_len16 = htonl(
2818 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2819 FW_LEN16(c));
2820 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2821 if (ret)
2822 return ret;
2823
2824 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2825 if (ret < 0)
2826 return ret;
2827
2828 p->viid = ret;
2829 p->tx_chan = j;
2830 p->lport = j;
2831 p->rss_size = rss_size;
2832 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2833 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00002834 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002835
2836 ret = ntohl(c.u.info.lstatus_to_modtype);
2837 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2838 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2839 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002840 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002841
Dimitris Michailidisf7965642010-07-11 12:01:18 +00002842 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2843 FW_CMD_REQUEST | FW_CMD_READ |
2844 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2845 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2846 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2847 if (ret)
2848 return ret;
2849 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2850
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002851 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
2852 j++;
2853 }
2854 return 0;
2855}