blob: ab732b378c4f4d9888861827f9770361e4b8d6b9 [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
Joe Perches005b5712010-12-14 21:36:53 +0000208 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000222 /*
223 * If the device is off-line, as in EEH, commands will time out.
224 * Fail them early so we don't waste time waiting.
225 */
226 if (adap->pdev->error_state != pci_channel_io_normal)
227 return -EIO;
228
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232
233 if (v != MBOX_OWNER_DRV)
234 return v ? -EBUSY : -ETIMEDOUT;
235
236 for (i = 0; i < size; i += 8)
237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
238
239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
240 t4_read_reg(adap, ctl_reg); /* flush write */
241
242 delay_idx = 0;
243 ms = delay[0];
244
245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
246 if (sleep_ok) {
247 ms = delay[delay_idx]; /* last element may repeat */
248 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 delay_idx++;
250 msleep(ms);
251 } else
252 mdelay(ms);
253
254 v = t4_read_reg(adap, ctl_reg);
255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
256 if (!(v & MBMSGVALID)) {
257 t4_write_reg(adap, ctl_reg, 0);
258 continue;
259 }
260
261 res = t4_read_reg64(adap, data_reg);
262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
263 fw_asrt(adap, data_reg);
264 res = FW_CMD_RETVAL(EIO);
265 } else if (rpl)
266 get_mbox_rpl(adap, rpl, size / 8, data_reg);
267
268 if (FW_CMD_RETVAL_GET((int)res))
269 dump_mbox(adap, mbox, data_reg);
270 t4_write_reg(adap, ctl_reg, 0);
271 return -FW_CMD_RETVAL_GET((int)res);
272 }
273 }
274
275 dump_mbox(adap, mbox, data_reg);
276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
277 *(const u8 *)cmd, mbox);
278 return -ETIMEDOUT;
279}
280
281/**
282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter
284 * @addr: address of first byte requested
285 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word
287 *
288 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
289 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data.
291 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
293{
294 int i;
295
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
297 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
304 if (i)
305 return i;
306
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
308
309 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
311 if (ecc)
312 *ecc = t4_read_reg64(adap, MC_DATA(16));
313#undef MC_DATA
314 return 0;
315}
316
317/**
318 * t4_edc_read - read from EDC through backdoor accesses
319 * @adap: the adapter
320 * @idx: which EDC to access
321 * @addr: address of first byte requested
322 * @data: 64 bytes of data containing the requested address
323 * @ecc: where to store the corresponding 64-bit ECC word
324 *
325 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
326 * that covers the requested address @addr. If @parity is not %NULL it
327 * is assigned the 64-bit ECC word for the read data.
328 */
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{
331 int i;
332
333 idx *= EDC_STRIDE;
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
342 if (i)
343 return i;
344
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
346
347 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
349 if (ecc)
350 *ecc = t4_read_reg64(adap, EDC_DATA(16));
351#undef EDC_DATA
352 return 0;
353}
354
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
384 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
386 }
387
388 return 0;
389}
390
391/**
392 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
393 * @adap: the adapter
394 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
395 * @addr: address within indicated memory type
396 * @len: amount of memory to transfer
397 * @buf: host memory buffer
398 * @dir: direction of transfer 1 => read, 0 => write
399 *
400 * Reads/writes an [almost] arbitrary memory region in the firmware: the
401 * firmware memory address, length and host buffer must be aligned on
402 * 32-bit boudaries. The memory is transferred as a raw byte sequence
403 * from/to the firmware's memory. If this memory contains data
404 * structures which contain multi-byte integers, it's the callers
405 * responsibility to perform appropriate byte order conversions.
406 */
407static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
408 __be32 *buf, int dir)
409{
410 u32 pos, start, end, offset, memoffset;
411 int ret;
412
413 /*
414 * Argument sanity checks ...
415 */
416 if ((addr & 0x3) || (len & 0x3))
417 return -EINVAL;
418
419 /*
420 * Offset into the region of memory which is being accessed
421 * MEM_EDC0 = 0
422 * MEM_EDC1 = 1
423 * MEM_MC = 2
424 */
425 memoffset = (mtype * (5 * 1024 * 1024));
426
427 /* Determine the PCIE_MEM_ACCESS_OFFSET */
428 addr = addr + memoffset;
429
430 /*
431 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
432 * at a time so we need to round down the start and round up the end.
433 * We'll start copying out of the first line at (addr - start) a word
434 * at a time.
435 */
436 start = addr & ~(MEMWIN0_APERTURE-1);
437 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
438 offset = (addr - start)/sizeof(__be32);
439
440 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
441 __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
442
443 /*
444 * If we're writing, copy the data from the caller's memory
445 * buffer
446 */
447 if (!dir) {
448 /*
449 * If we're doing a partial write, then we need to do
450 * a read-modify-write ...
451 */
452 if (offset || len < MEMWIN0_APERTURE) {
453 ret = t4_mem_win_rw(adap, pos, data, 1);
454 if (ret)
455 return ret;
456 }
457 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
458 len > 0) {
459 data[offset++] = *buf++;
460 len -= sizeof(__be32);
461 }
462 }
463
464 /*
465 * Transfer a block of memory and bail if there's an error.
466 */
467 ret = t4_mem_win_rw(adap, pos, data, dir);
468 if (ret)
469 return ret;
470
471 /*
472 * If we're reading, copy the data into the caller's memory
473 * buffer.
474 */
475 if (dir)
476 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
477 len > 0) {
478 *buf++ = data[offset++];
479 len -= sizeof(__be32);
480 }
481 }
482
483 return 0;
484}
485
486int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
487 __be32 *buf)
488{
489 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
490}
491
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000492#define EEPROM_STAT_ADDR 0x7bfc
493#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000494#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000495
496/**
497 * t4_seeprom_wp - enable/disable EEPROM write protection
498 * @adapter: the adapter
499 * @enable: whether to enable or disable write protection
500 *
501 * Enables or disables write protection on the serial EEPROM.
502 */
503int t4_seeprom_wp(struct adapter *adapter, bool enable)
504{
505 unsigned int v = enable ? 0xc : 0;
506 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
507 return ret < 0 ? ret : 0;
508}
509
510/**
511 * get_vpd_params - read VPD parameters from VPD EEPROM
512 * @adapter: adapter to read
513 * @p: where to store the parameters
514 *
515 * Reads card parameters stored in VPD EEPROM.
516 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000517int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000518{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000519 u32 cclk_param, cclk_val;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000520 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000521 int ec, sn;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000522 u8 vpd[VPD_LEN], csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000523 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000524
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000525 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000526 if (ret < 0)
527 return ret;
528
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000529 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
530 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
531 return -EINVAL;
532 }
533
534 id_len = pci_vpd_lrdt_size(vpd);
535 if (id_len > ID_LEN)
536 id_len = ID_LEN;
537
538 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
539 if (i < 0) {
540 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
541 return -EINVAL;
542 }
543
544 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
545 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
546 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000547 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
548 return -EINVAL;
549 }
550
551#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000552 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000553 if (var < 0) { \
554 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
555 return -EINVAL; \
556 } \
557 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
558} while (0)
559
560 FIND_VPD_KW(i, "RV");
561 for (csum = 0; i >= 0; i--)
562 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000563
564 if (csum) {
565 dev_err(adapter->pdev_dev,
566 "corrupted VPD EEPROM, actual csum %u\n", csum);
567 return -EINVAL;
568 }
569
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000570 FIND_VPD_KW(ec, "EC");
571 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000572#undef FIND_VPD_KW
573
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000574 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000575 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000576 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000577 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000578 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
579 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000580 strim(p->sn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000581
582 /*
583 * Ask firmware for the Core Clock since it knows how to translate the
584 * Reference Clock ('V2') VPD field into a Core Clock value ...
585 */
586 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
587 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
588 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
589 1, &cclk_param, &cclk_val);
590 if (ret)
591 return ret;
592 p->cclk = cclk_val;
593
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000594 return 0;
595}
596
597/* serial flash and firmware constants */
598enum {
599 SF_ATTEMPTS = 10, /* max retries for SF operations */
600
601 /* flash command opcodes */
602 SF_PROG_PAGE = 2, /* program page */
603 SF_WR_DISABLE = 4, /* disable writes */
604 SF_RD_STATUS = 5, /* read status register */
605 SF_WR_ENABLE = 6, /* enable writes */
606 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000607 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000608 SF_ERASE_SECTOR = 0xd8, /* erase sector */
609
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000610 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000611};
612
613/**
614 * sf1_read - read data from the serial flash
615 * @adapter: the adapter
616 * @byte_cnt: number of bytes to read
617 * @cont: whether another operation will be chained
618 * @lock: whether to lock SF for PL access only
619 * @valp: where to store the read data
620 *
621 * Reads up to 4 bytes of data from the serial flash. The location of
622 * the read needs to be specified prior to calling this by issuing the
623 * appropriate commands to the serial flash.
624 */
625static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
626 int lock, u32 *valp)
627{
628 int ret;
629
630 if (!byte_cnt || byte_cnt > 4)
631 return -EINVAL;
632 if (t4_read_reg(adapter, SF_OP) & BUSY)
633 return -EBUSY;
634 cont = cont ? SF_CONT : 0;
635 lock = lock ? SF_LOCK : 0;
636 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
637 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
638 if (!ret)
639 *valp = t4_read_reg(adapter, SF_DATA);
640 return ret;
641}
642
643/**
644 * sf1_write - write data to the serial flash
645 * @adapter: the adapter
646 * @byte_cnt: number of bytes to write
647 * @cont: whether another operation will be chained
648 * @lock: whether to lock SF for PL access only
649 * @val: value to write
650 *
651 * Writes up to 4 bytes of data to the serial flash. The location of
652 * the write needs to be specified prior to calling this by issuing the
653 * appropriate commands to the serial flash.
654 */
655static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
656 int lock, u32 val)
657{
658 if (!byte_cnt || byte_cnt > 4)
659 return -EINVAL;
660 if (t4_read_reg(adapter, SF_OP) & BUSY)
661 return -EBUSY;
662 cont = cont ? SF_CONT : 0;
663 lock = lock ? SF_LOCK : 0;
664 t4_write_reg(adapter, SF_DATA, val);
665 t4_write_reg(adapter, SF_OP, lock |
666 cont | BYTECNT(byte_cnt - 1) | OP_WR);
667 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
668}
669
670/**
671 * flash_wait_op - wait for a flash operation to complete
672 * @adapter: the adapter
673 * @attempts: max number of polls of the status register
674 * @delay: delay between polls in ms
675 *
676 * Wait for a flash operation to complete by polling the status register.
677 */
678static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
679{
680 int ret;
681 u32 status;
682
683 while (1) {
684 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
685 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
686 return ret;
687 if (!(status & 1))
688 return 0;
689 if (--attempts == 0)
690 return -EAGAIN;
691 if (delay)
692 msleep(delay);
693 }
694}
695
696/**
697 * t4_read_flash - read words from serial flash
698 * @adapter: the adapter
699 * @addr: the start address for the read
700 * @nwords: how many 32-bit words to read
701 * @data: where to store the read data
702 * @byte_oriented: whether to store data as bytes or as words
703 *
704 * Read the specified number of 32-bit words from the serial flash.
705 * If @byte_oriented is set the read data is stored as a byte array
706 * (i.e., big-endian), otherwise as 32-bit words in the platform's
707 * natural endianess.
708 */
Roland Dreierde498c82010-04-21 08:59:17 +0000709static int t4_read_flash(struct adapter *adapter, unsigned int addr,
710 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000711{
712 int ret;
713
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000714 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000715 return -EINVAL;
716
717 addr = swab32(addr) | SF_RD_DATA_FAST;
718
719 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
720 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
721 return ret;
722
723 for ( ; nwords; nwords--, data++) {
724 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
725 if (nwords == 1)
726 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
727 if (ret)
728 return ret;
729 if (byte_oriented)
730 *data = htonl(*data);
731 }
732 return 0;
733}
734
735/**
736 * t4_write_flash - write up to a page of data to the serial flash
737 * @adapter: the adapter
738 * @addr: the start address to write
739 * @n: length of data to write in bytes
740 * @data: the data to write
741 *
742 * Writes up to a page of data (256 bytes) to the serial flash starting
743 * at the given address. All the data must be written to the same page.
744 */
745static int t4_write_flash(struct adapter *adapter, unsigned int addr,
746 unsigned int n, const u8 *data)
747{
748 int ret;
749 u32 buf[64];
750 unsigned int i, c, left, val, offset = addr & 0xff;
751
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000752 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000753 return -EINVAL;
754
755 val = swab32(addr) | SF_PROG_PAGE;
756
757 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
758 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
759 goto unlock;
760
761 for (left = n; left; left -= c) {
762 c = min(left, 4U);
763 for (val = 0, i = 0; i < c; ++i)
764 val = (val << 8) + *data++;
765
766 ret = sf1_write(adapter, c, c != left, 1, val);
767 if (ret)
768 goto unlock;
769 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000770 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000771 if (ret)
772 goto unlock;
773
774 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
775
776 /* Read the page to verify the write succeeded */
777 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
778 if (ret)
779 return ret;
780
781 if (memcmp(data - n, (u8 *)buf + offset, n)) {
782 dev_err(adapter->pdev_dev,
783 "failed to correctly write the flash page at %#x\n",
784 addr);
785 return -EIO;
786 }
787 return 0;
788
789unlock:
790 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
791 return ret;
792}
793
794/**
795 * get_fw_version - read the firmware version
796 * @adapter: the adapter
797 * @vers: where to place the version
798 *
799 * Reads the FW version from flash.
800 */
801static int get_fw_version(struct adapter *adapter, u32 *vers)
802{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000803 return t4_read_flash(adapter, adapter->params.sf_fw_start +
804 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000805}
806
807/**
808 * get_tp_version - read the TP microcode version
809 * @adapter: the adapter
810 * @vers: where to place the version
811 *
812 * Reads the TP microcode version from flash.
813 */
814static int get_tp_version(struct adapter *adapter, u32 *vers)
815{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000816 return t4_read_flash(adapter, adapter->params.sf_fw_start +
817 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000818 1, vers, 0);
819}
820
821/**
822 * t4_check_fw_version - check if the FW is compatible with this driver
823 * @adapter: the adapter
824 *
825 * Checks if an adapter's FW is compatible with the driver. Returns 0
826 * if there's exact match, a negative error if the version could not be
827 * read or there's a major version mismatch, and a positive value if the
828 * expected major version is found but there's a minor version mismatch.
829 */
830int t4_check_fw_version(struct adapter *adapter)
831{
832 u32 api_vers[2];
833 int ret, major, minor, micro;
834
835 ret = get_fw_version(adapter, &adapter->params.fw_vers);
836 if (!ret)
837 ret = get_tp_version(adapter, &adapter->params.tp_vers);
838 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000839 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
840 offsetof(struct fw_hdr, intfver_nic),
841 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000842 if (ret)
843 return ret;
844
845 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
846 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
847 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
848 memcpy(adapter->params.api_vers, api_vers,
849 sizeof(adapter->params.api_vers));
850
851 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
852 dev_err(adapter->pdev_dev,
853 "card FW has major version %u, driver wants %u\n",
854 major, FW_VERSION_MAJOR);
855 return -EINVAL;
856 }
857
858 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
859 return 0; /* perfect match */
860
861 /* Minor/micro version mismatch. Report it but often it's OK. */
862 return 1;
863}
864
865/**
866 * t4_flash_erase_sectors - erase a range of flash sectors
867 * @adapter: the adapter
868 * @start: the first sector to erase
869 * @end: the last sector to erase
870 *
871 * Erases the sectors in the given inclusive range.
872 */
873static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
874{
875 int ret = 0;
876
877 while (start <= end) {
878 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
879 (ret = sf1_write(adapter, 4, 0, 1,
880 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000881 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000882 dev_err(adapter->pdev_dev,
883 "erase of flash sector %d failed, error %d\n",
884 start, ret);
885 break;
886 }
887 start++;
888 }
889 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
890 return ret;
891}
892
893/**
Vipul Pandya636f9d32012-09-26 02:39:39 +0000894 * t4_flash_cfg_addr - return the address of the flash configuration file
895 * @adapter: the adapter
896 *
897 * Return the address within the flash where the Firmware Configuration
898 * File is stored.
899 */
900unsigned int t4_flash_cfg_addr(struct adapter *adapter)
901{
902 if (adapter->params.sf_size == 0x100000)
903 return FLASH_FPGA_CFG_START;
904 else
905 return FLASH_CFG_START;
906}
907
908/**
909 * t4_load_cfg - download config file
910 * @adap: the adapter
911 * @cfg_data: the cfg text file to write
912 * @size: text file size
913 *
914 * Write the supplied config text file to the card's serial flash.
915 */
916int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
917{
918 int ret, i, n;
919 unsigned int addr;
920 unsigned int flash_cfg_start_sec;
921 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
922
923 addr = t4_flash_cfg_addr(adap);
924 flash_cfg_start_sec = addr / SF_SEC_SIZE;
925
926 if (size > FLASH_CFG_MAX_SIZE) {
927 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
928 FLASH_CFG_MAX_SIZE);
929 return -EFBIG;
930 }
931
932 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
933 sf_sec_size);
934 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
935 flash_cfg_start_sec + i - 1);
936 /*
937 * If size == 0 then we're simply erasing the FLASH sectors associated
938 * with the on-adapter Firmware Configuration File.
939 */
940 if (ret || size == 0)
941 goto out;
942
943 /* this will write to the flash up to SF_PAGE_SIZE at a time */
944 for (i = 0; i < size; i += SF_PAGE_SIZE) {
945 if ((size - i) < SF_PAGE_SIZE)
946 n = size - i;
947 else
948 n = SF_PAGE_SIZE;
949 ret = t4_write_flash(adap, addr, n, cfg_data);
950 if (ret)
951 goto out;
952
953 addr += SF_PAGE_SIZE;
954 cfg_data += SF_PAGE_SIZE;
955 }
956
957out:
958 if (ret)
959 dev_err(adap->pdev_dev, "config file %s failed %d\n",
960 (size == 0 ? "clear" : "download"), ret);
961 return ret;
962}
963
964/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000965 * t4_load_fw - download firmware
966 * @adap: the adapter
967 * @fw_data: the firmware image to write
968 * @size: image size
969 *
970 * Write the supplied firmware image to the card's serial flash.
971 */
972int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
973{
974 u32 csum;
975 int ret, addr;
976 unsigned int i;
977 u8 first_page[SF_PAGE_SIZE];
978 const u32 *p = (const u32 *)fw_data;
979 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000980 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
981 unsigned int fw_img_start = adap->params.sf_fw_start;
982 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000983
984 if (!size) {
985 dev_err(adap->pdev_dev, "FW image has no data\n");
986 return -EINVAL;
987 }
988 if (size & 511) {
989 dev_err(adap->pdev_dev,
990 "FW image size not multiple of 512 bytes\n");
991 return -EINVAL;
992 }
993 if (ntohs(hdr->len512) * 512 != size) {
994 dev_err(adap->pdev_dev,
995 "FW image size differs from size in FW header\n");
996 return -EINVAL;
997 }
998 if (size > FW_MAX_SIZE) {
999 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1000 FW_MAX_SIZE);
1001 return -EFBIG;
1002 }
1003
1004 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1005 csum += ntohl(p[i]);
1006
1007 if (csum != 0xffffffff) {
1008 dev_err(adap->pdev_dev,
1009 "corrupted firmware image, checksum %#x\n", csum);
1010 return -EINVAL;
1011 }
1012
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001013 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1014 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001015 if (ret)
1016 goto out;
1017
1018 /*
1019 * We write the correct version at the end so the driver can see a bad
1020 * version if the FW write fails. Start by writing a copy of the
1021 * first page with a bad version.
1022 */
1023 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1024 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001025 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001026 if (ret)
1027 goto out;
1028
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001029 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001030 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1031 addr += SF_PAGE_SIZE;
1032 fw_data += SF_PAGE_SIZE;
1033 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1034 if (ret)
1035 goto out;
1036 }
1037
1038 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001039 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001040 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1041out:
1042 if (ret)
1043 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1044 ret);
1045 return ret;
1046}
1047
1048#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1049 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1050
1051/**
1052 * t4_link_start - apply link configuration to MAC/PHY
1053 * @phy: the PHY to setup
1054 * @mac: the MAC to setup
1055 * @lc: the requested link configuration
1056 *
1057 * Set up a port's MAC and PHY according to a desired link configuration.
1058 * - If the PHY can auto-negotiate first decide what to advertise, then
1059 * enable/disable auto-negotiation as desired, and reset.
1060 * - If the PHY does not auto-negotiate just reset it.
1061 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1062 * otherwise do it later based on the outcome of auto-negotiation.
1063 */
1064int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1065 struct link_config *lc)
1066{
1067 struct fw_port_cmd c;
1068 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1069
1070 lc->link_ok = 0;
1071 if (lc->requested_fc & PAUSE_RX)
1072 fc |= FW_PORT_CAP_FC_RX;
1073 if (lc->requested_fc & PAUSE_TX)
1074 fc |= FW_PORT_CAP_FC_TX;
1075
1076 memset(&c, 0, sizeof(c));
1077 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1078 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1079 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1080 FW_LEN16(c));
1081
1082 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1083 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1084 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1085 } else if (lc->autoneg == AUTONEG_DISABLE) {
1086 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1087 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1088 } else
1089 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1090
1091 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1092}
1093
1094/**
1095 * t4_restart_aneg - restart autonegotiation
1096 * @adap: the adapter
1097 * @mbox: mbox to use for the FW command
1098 * @port: the port id
1099 *
1100 * Restarts autonegotiation for the selected port.
1101 */
1102int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1103{
1104 struct fw_port_cmd c;
1105
1106 memset(&c, 0, sizeof(c));
1107 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1108 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1109 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1110 FW_LEN16(c));
1111 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1112 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1113}
1114
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301115typedef void (*int_handler_t)(struct adapter *adap);
1116
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001117struct intr_info {
1118 unsigned int mask; /* bits to check in interrupt status */
1119 const char *msg; /* message to print or NULL */
1120 short stat_idx; /* stat counter to increment or -1 */
1121 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301122 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001123};
1124
1125/**
1126 * t4_handle_intr_status - table driven interrupt handler
1127 * @adapter: the adapter that generated the interrupt
1128 * @reg: the interrupt status register to process
1129 * @acts: table of interrupt actions
1130 *
1131 * A table driven interrupt handler that applies a set of masks to an
1132 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001133 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001134 * optionally emitting a warning or alert message. The table is terminated
1135 * by an entry specifying mask 0. Returns the number of fatal interrupt
1136 * conditions.
1137 */
1138static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1139 const struct intr_info *acts)
1140{
1141 int fatal = 0;
1142 unsigned int mask = 0;
1143 unsigned int status = t4_read_reg(adapter, reg);
1144
1145 for ( ; acts->mask; ++acts) {
1146 if (!(status & acts->mask))
1147 continue;
1148 if (acts->fatal) {
1149 fatal++;
1150 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1151 status & acts->mask);
1152 } else if (acts->msg && printk_ratelimit())
1153 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1154 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301155 if (acts->int_handler)
1156 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001157 mask |= acts->mask;
1158 }
1159 status &= mask;
1160 if (status) /* clear processed interrupts */
1161 t4_write_reg(adapter, reg, status);
1162 return fatal;
1163}
1164
1165/*
1166 * Interrupt handler for the PCIE module.
1167 */
1168static void pcie_intr_handler(struct adapter *adapter)
1169{
Joe Perches005b5712010-12-14 21:36:53 +00001170 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001171 { RNPP, "RXNP array parity error", -1, 1 },
1172 { RPCP, "RXPC array parity error", -1, 1 },
1173 { RCIP, "RXCIF array parity error", -1, 1 },
1174 { RCCP, "Rx completions control array parity error", -1, 1 },
1175 { RFTP, "RXFT array parity error", -1, 1 },
1176 { 0 }
1177 };
Joe Perches005b5712010-12-14 21:36:53 +00001178 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001179 { TPCP, "TXPC array parity error", -1, 1 },
1180 { TNPP, "TXNP array parity error", -1, 1 },
1181 { TFTP, "TXFT array parity error", -1, 1 },
1182 { TCAP, "TXCA array parity error", -1, 1 },
1183 { TCIP, "TXCIF array parity error", -1, 1 },
1184 { RCAP, "RXCA array parity error", -1, 1 },
1185 { OTDD, "outbound request TLP discarded", -1, 1 },
1186 { RDPE, "Rx data parity error", -1, 1 },
1187 { TDUE, "Tx uncorrectable data error", -1, 1 },
1188 { 0 }
1189 };
Joe Perches005b5712010-12-14 21:36:53 +00001190 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001191 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1192 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1193 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1194 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1195 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1196 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1197 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1198 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1199 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1200 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1201 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1202 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1203 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1204 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1205 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1206 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1207 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1208 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1209 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1210 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1211 { FIDPERR, "PCI FID parity error", -1, 1 },
1212 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1213 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1214 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1215 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1216 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1217 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1218 { PCIESINT, "PCI core secondary fault", -1, 1 },
1219 { PCIEPINT, "PCI core primary fault", -1, 1 },
1220 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1221 { 0 }
1222 };
1223
1224 int fat;
1225
1226 fat = t4_handle_intr_status(adapter,
1227 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1228 sysbus_intr_info) +
1229 t4_handle_intr_status(adapter,
1230 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1231 pcie_port_intr_info) +
1232 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1233 if (fat)
1234 t4_fatal_err(adapter);
1235}
1236
1237/*
1238 * TP interrupt handler.
1239 */
1240static void tp_intr_handler(struct adapter *adapter)
1241{
Joe Perches005b5712010-12-14 21:36:53 +00001242 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001243 { 0x3fffffff, "TP parity error", -1, 1 },
1244 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1245 { 0 }
1246 };
1247
1248 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1249 t4_fatal_err(adapter);
1250}
1251
1252/*
1253 * SGE interrupt handler.
1254 */
1255static void sge_intr_handler(struct adapter *adapter)
1256{
1257 u64 v;
1258
Joe Perches005b5712010-12-14 21:36:53 +00001259 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001260 { ERR_CPL_EXCEED_IQE_SIZE,
1261 "SGE received CPL exceeding IQE size", -1, 1 },
1262 { ERR_INVALID_CIDX_INC,
1263 "SGE GTS CIDX increment too large", -1, 0 },
1264 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001265 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1266 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1267 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001268 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1269 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1270 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1271 0 },
1272 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1273 0 },
1274 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1275 0 },
1276 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1277 0 },
1278 { ERR_ING_CTXT_PRIO,
1279 "SGE too many priority ingress contexts", -1, 0 },
1280 { ERR_EGR_CTXT_PRIO,
1281 "SGE too many priority egress contexts", -1, 0 },
1282 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1283 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1284 { 0 }
1285 };
1286
1287 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301288 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001289 if (v) {
1290 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301291 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001292 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1293 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1294 }
1295
1296 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1297 v != 0)
1298 t4_fatal_err(adapter);
1299}
1300
1301/*
1302 * CIM interrupt handler.
1303 */
1304static void cim_intr_handler(struct adapter *adapter)
1305{
Joe Perches005b5712010-12-14 21:36:53 +00001306 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001307 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1308 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1309 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1310 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1311 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1312 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1313 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1314 { 0 }
1315 };
Joe Perches005b5712010-12-14 21:36:53 +00001316 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001317 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1318 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1319 { ILLWRINT, "CIM illegal write", -1, 1 },
1320 { ILLRDINT, "CIM illegal read", -1, 1 },
1321 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1322 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1323 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1324 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1325 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1326 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1327 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1328 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1329 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1330 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1331 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1332 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1333 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1334 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1335 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1336 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1337 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1338 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1339 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1340 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1341 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1342 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1343 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1344 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1345 { 0 }
1346 };
1347
1348 int fat;
1349
1350 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1351 cim_intr_info) +
1352 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1353 cim_upintr_info);
1354 if (fat)
1355 t4_fatal_err(adapter);
1356}
1357
1358/*
1359 * ULP RX interrupt handler.
1360 */
1361static void ulprx_intr_handler(struct adapter *adapter)
1362{
Joe Perches005b5712010-12-14 21:36:53 +00001363 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001364 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001365 { 0x7fffff, "ULPRX parity error", -1, 1 },
1366 { 0 }
1367 };
1368
1369 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1370 t4_fatal_err(adapter);
1371}
1372
1373/*
1374 * ULP TX interrupt handler.
1375 */
1376static void ulptx_intr_handler(struct adapter *adapter)
1377{
Joe Perches005b5712010-12-14 21:36:53 +00001378 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001379 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1380 0 },
1381 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1382 0 },
1383 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1384 0 },
1385 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1386 0 },
1387 { 0xfffffff, "ULPTX parity error", -1, 1 },
1388 { 0 }
1389 };
1390
1391 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1392 t4_fatal_err(adapter);
1393}
1394
1395/*
1396 * PM TX interrupt handler.
1397 */
1398static void pmtx_intr_handler(struct adapter *adapter)
1399{
Joe Perches005b5712010-12-14 21:36:53 +00001400 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001401 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1402 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1403 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1404 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1405 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1406 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1407 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1408 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1409 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1410 { 0 }
1411 };
1412
1413 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1414 t4_fatal_err(adapter);
1415}
1416
1417/*
1418 * PM RX interrupt handler.
1419 */
1420static void pmrx_intr_handler(struct adapter *adapter)
1421{
Joe Perches005b5712010-12-14 21:36:53 +00001422 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001423 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1424 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1425 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1426 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1427 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1428 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1429 { 0 }
1430 };
1431
1432 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1433 t4_fatal_err(adapter);
1434}
1435
1436/*
1437 * CPL switch interrupt handler.
1438 */
1439static void cplsw_intr_handler(struct adapter *adapter)
1440{
Joe Perches005b5712010-12-14 21:36:53 +00001441 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001442 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1443 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1444 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1445 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1446 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1447 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1448 { 0 }
1449 };
1450
1451 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1452 t4_fatal_err(adapter);
1453}
1454
1455/*
1456 * LE interrupt handler.
1457 */
1458static void le_intr_handler(struct adapter *adap)
1459{
Joe Perches005b5712010-12-14 21:36:53 +00001460 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001461 { LIPMISS, "LE LIP miss", -1, 0 },
1462 { LIP0, "LE 0 LIP error", -1, 0 },
1463 { PARITYERR, "LE parity error", -1, 1 },
1464 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1465 { REQQPARERR, "LE request queue parity error", -1, 1 },
1466 { 0 }
1467 };
1468
1469 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1470 t4_fatal_err(adap);
1471}
1472
1473/*
1474 * MPS interrupt handler.
1475 */
1476static void mps_intr_handler(struct adapter *adapter)
1477{
Joe Perches005b5712010-12-14 21:36:53 +00001478 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001479 { 0xffffff, "MPS Rx parity error", -1, 1 },
1480 { 0 }
1481 };
Joe Perches005b5712010-12-14 21:36:53 +00001482 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001483 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1484 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1485 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1486 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1487 { BUBBLE, "MPS Tx underflow", -1, 1 },
1488 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1489 { FRMERR, "MPS Tx framing error", -1, 1 },
1490 { 0 }
1491 };
Joe Perches005b5712010-12-14 21:36:53 +00001492 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001493 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1494 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1495 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1496 { 0 }
1497 };
Joe Perches005b5712010-12-14 21:36:53 +00001498 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001499 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1500 { 0 }
1501 };
Joe Perches005b5712010-12-14 21:36:53 +00001502 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001503 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1504 { 0 }
1505 };
Joe Perches005b5712010-12-14 21:36:53 +00001506 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001507 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1508 { 0 }
1509 };
Joe Perches005b5712010-12-14 21:36:53 +00001510 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001511 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1512 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1513 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1514 { 0 }
1515 };
1516
1517 int fat;
1518
1519 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1520 mps_rx_intr_info) +
1521 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1522 mps_tx_intr_info) +
1523 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1524 mps_trc_intr_info) +
1525 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1526 mps_stat_sram_intr_info) +
1527 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1528 mps_stat_tx_intr_info) +
1529 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1530 mps_stat_rx_intr_info) +
1531 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1532 mps_cls_intr_info);
1533
1534 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1535 RXINT | TXINT | STATINT);
1536 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1537 if (fat)
1538 t4_fatal_err(adapter);
1539}
1540
1541#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1542
1543/*
1544 * EDC/MC interrupt handler.
1545 */
1546static void mem_intr_handler(struct adapter *adapter, int idx)
1547{
1548 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1549
1550 unsigned int addr, cnt_addr, v;
1551
1552 if (idx <= MEM_EDC1) {
1553 addr = EDC_REG(EDC_INT_CAUSE, idx);
1554 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1555 } else {
1556 addr = MC_INT_CAUSE;
1557 cnt_addr = MC_ECC_STATUS;
1558 }
1559
1560 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1561 if (v & PERR_INT_CAUSE)
1562 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1563 name[idx]);
1564 if (v & ECC_CE_INT_CAUSE) {
1565 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1566
1567 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1568 if (printk_ratelimit())
1569 dev_warn(adapter->pdev_dev,
1570 "%u %s correctable ECC data error%s\n",
1571 cnt, name[idx], cnt > 1 ? "s" : "");
1572 }
1573 if (v & ECC_UE_INT_CAUSE)
1574 dev_alert(adapter->pdev_dev,
1575 "%s uncorrectable ECC data error\n", name[idx]);
1576
1577 t4_write_reg(adapter, addr, v);
1578 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1579 t4_fatal_err(adapter);
1580}
1581
1582/*
1583 * MA interrupt handler.
1584 */
1585static void ma_intr_handler(struct adapter *adap)
1586{
1587 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1588
1589 if (status & MEM_PERR_INT_CAUSE)
1590 dev_alert(adap->pdev_dev,
1591 "MA parity error, parity status %#x\n",
1592 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1593 if (status & MEM_WRAP_INT_CAUSE) {
1594 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1595 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1596 "client %u to address %#x\n",
1597 MEM_WRAP_CLIENT_NUM_GET(v),
1598 MEM_WRAP_ADDRESS_GET(v) << 4);
1599 }
1600 t4_write_reg(adap, MA_INT_CAUSE, status);
1601 t4_fatal_err(adap);
1602}
1603
1604/*
1605 * SMB interrupt handler.
1606 */
1607static void smb_intr_handler(struct adapter *adap)
1608{
Joe Perches005b5712010-12-14 21:36:53 +00001609 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001610 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1611 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1612 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1613 { 0 }
1614 };
1615
1616 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1617 t4_fatal_err(adap);
1618}
1619
1620/*
1621 * NC-SI interrupt handler.
1622 */
1623static void ncsi_intr_handler(struct adapter *adap)
1624{
Joe Perches005b5712010-12-14 21:36:53 +00001625 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001626 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1627 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1628 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1629 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1630 { 0 }
1631 };
1632
1633 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1634 t4_fatal_err(adap);
1635}
1636
1637/*
1638 * XGMAC interrupt handler.
1639 */
1640static void xgmac_intr_handler(struct adapter *adap, int port)
1641{
1642 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1643
1644 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1645 if (!v)
1646 return;
1647
1648 if (v & TXFIFO_PRTY_ERR)
1649 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1650 port);
1651 if (v & RXFIFO_PRTY_ERR)
1652 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1653 port);
1654 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1655 t4_fatal_err(adap);
1656}
1657
1658/*
1659 * PL interrupt handler.
1660 */
1661static void pl_intr_handler(struct adapter *adap)
1662{
Joe Perches005b5712010-12-14 21:36:53 +00001663 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001664 { FATALPERR, "T4 fatal parity error", -1, 1 },
1665 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1666 { 0 }
1667 };
1668
1669 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1670 t4_fatal_err(adap);
1671}
1672
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001673#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001674#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1675 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1676 CPL_SWITCH | SGE | ULP_TX)
1677
1678/**
1679 * t4_slow_intr_handler - control path interrupt handler
1680 * @adapter: the adapter
1681 *
1682 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1683 * The designation 'slow' is because it involves register reads, while
1684 * data interrupts typically don't involve any MMIOs.
1685 */
1686int t4_slow_intr_handler(struct adapter *adapter)
1687{
1688 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1689
1690 if (!(cause & GLBL_INTR_MASK))
1691 return 0;
1692 if (cause & CIM)
1693 cim_intr_handler(adapter);
1694 if (cause & MPS)
1695 mps_intr_handler(adapter);
1696 if (cause & NCSI)
1697 ncsi_intr_handler(adapter);
1698 if (cause & PL)
1699 pl_intr_handler(adapter);
1700 if (cause & SMB)
1701 smb_intr_handler(adapter);
1702 if (cause & XGMAC0)
1703 xgmac_intr_handler(adapter, 0);
1704 if (cause & XGMAC1)
1705 xgmac_intr_handler(adapter, 1);
1706 if (cause & XGMAC_KR0)
1707 xgmac_intr_handler(adapter, 2);
1708 if (cause & XGMAC_KR1)
1709 xgmac_intr_handler(adapter, 3);
1710 if (cause & PCIE)
1711 pcie_intr_handler(adapter);
1712 if (cause & MC)
1713 mem_intr_handler(adapter, MEM_MC);
1714 if (cause & EDC0)
1715 mem_intr_handler(adapter, MEM_EDC0);
1716 if (cause & EDC1)
1717 mem_intr_handler(adapter, MEM_EDC1);
1718 if (cause & LE)
1719 le_intr_handler(adapter);
1720 if (cause & TP)
1721 tp_intr_handler(adapter);
1722 if (cause & MA)
1723 ma_intr_handler(adapter);
1724 if (cause & PM_TX)
1725 pmtx_intr_handler(adapter);
1726 if (cause & PM_RX)
1727 pmrx_intr_handler(adapter);
1728 if (cause & ULP_RX)
1729 ulprx_intr_handler(adapter);
1730 if (cause & CPL_SWITCH)
1731 cplsw_intr_handler(adapter);
1732 if (cause & SGE)
1733 sge_intr_handler(adapter);
1734 if (cause & ULP_TX)
1735 ulptx_intr_handler(adapter);
1736
1737 /* Clear the interrupts just processed for which we are the master. */
1738 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1739 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1740 return 1;
1741}
1742
1743/**
1744 * t4_intr_enable - enable interrupts
1745 * @adapter: the adapter whose interrupts should be enabled
1746 *
1747 * Enable PF-specific interrupts for the calling function and the top-level
1748 * interrupt concentrator for global interrupts. Interrupts are already
1749 * enabled at each module, here we just enable the roots of the interrupt
1750 * hierarchies.
1751 *
1752 * Note: this function should be called only when the driver manages
1753 * non PF-specific interrupts from the various HW modules. Only one PCI
1754 * function at a time should be doing this.
1755 */
1756void t4_intr_enable(struct adapter *adapter)
1757{
1758 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1759
1760 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1761 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1762 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1763 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1764 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1765 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1766 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001767 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001768 EGRESS_SIZE_ERR);
1769 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1770 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1771}
1772
1773/**
1774 * t4_intr_disable - disable interrupts
1775 * @adapter: the adapter whose interrupts should be disabled
1776 *
1777 * Disable interrupts. We only disable the top-level interrupt
1778 * concentrators. The caller must be a PCI function managing global
1779 * interrupts.
1780 */
1781void t4_intr_disable(struct adapter *adapter)
1782{
1783 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1784
1785 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1786 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1787}
1788
1789/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001790 * hash_mac_addr - return the hash value of a MAC address
1791 * @addr: the 48-bit Ethernet MAC address
1792 *
1793 * Hashes a MAC address according to the hash function used by HW inexact
1794 * (hash) address matching.
1795 */
1796static int hash_mac_addr(const u8 *addr)
1797{
1798 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1799 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1800 a ^= b;
1801 a ^= (a >> 12);
1802 a ^= (a >> 6);
1803 return a & 0x3f;
1804}
1805
1806/**
1807 * t4_config_rss_range - configure a portion of the RSS mapping table
1808 * @adapter: the adapter
1809 * @mbox: mbox to use for the FW command
1810 * @viid: virtual interface whose RSS subtable is to be written
1811 * @start: start entry in the table to write
1812 * @n: how many table entries to write
1813 * @rspq: values for the response queue lookup table
1814 * @nrspq: number of values in @rspq
1815 *
1816 * Programs the selected part of the VI's RSS mapping table with the
1817 * provided values. If @nrspq < @n the supplied values are used repeatedly
1818 * until the full table range is populated.
1819 *
1820 * The caller must ensure the values in @rspq are in the range allowed for
1821 * @viid.
1822 */
1823int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1824 int start, int n, const u16 *rspq, unsigned int nrspq)
1825{
1826 int ret;
1827 const u16 *rsp = rspq;
1828 const u16 *rsp_end = rspq + nrspq;
1829 struct fw_rss_ind_tbl_cmd cmd;
1830
1831 memset(&cmd, 0, sizeof(cmd));
1832 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1833 FW_CMD_REQUEST | FW_CMD_WRITE |
1834 FW_RSS_IND_TBL_CMD_VIID(viid));
1835 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1836
1837 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1838 while (n > 0) {
1839 int nq = min(n, 32);
1840 __be32 *qp = &cmd.iq0_to_iq2;
1841
1842 cmd.niqid = htons(nq);
1843 cmd.startidx = htons(start);
1844
1845 start += nq;
1846 n -= nq;
1847
1848 while (nq > 0) {
1849 unsigned int v;
1850
1851 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1852 if (++rsp >= rsp_end)
1853 rsp = rspq;
1854 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1855 if (++rsp >= rsp_end)
1856 rsp = rspq;
1857 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1858 if (++rsp >= rsp_end)
1859 rsp = rspq;
1860
1861 *qp++ = htonl(v);
1862 nq -= 3;
1863 }
1864
1865 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1866 if (ret)
1867 return ret;
1868 }
1869 return 0;
1870}
1871
1872/**
1873 * t4_config_glbl_rss - configure the global RSS mode
1874 * @adapter: the adapter
1875 * @mbox: mbox to use for the FW command
1876 * @mode: global RSS mode
1877 * @flags: mode-specific flags
1878 *
1879 * Sets the global RSS mode.
1880 */
1881int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1882 unsigned int flags)
1883{
1884 struct fw_rss_glb_config_cmd c;
1885
1886 memset(&c, 0, sizeof(c));
1887 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1888 FW_CMD_REQUEST | FW_CMD_WRITE);
1889 c.retval_len16 = htonl(FW_LEN16(c));
1890 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1891 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1892 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1893 c.u.basicvirtual.mode_pkd =
1894 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1895 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1896 } else
1897 return -EINVAL;
1898 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1899}
1900
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001901/**
1902 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1903 * @adap: the adapter
1904 * @v4: holds the TCP/IP counter values
1905 * @v6: holds the TCP/IPv6 counter values
1906 *
1907 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1908 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1909 */
1910void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1911 struct tp_tcp_stats *v6)
1912{
1913 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1914
1915#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1916#define STAT(x) val[STAT_IDX(x)]
1917#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1918
1919 if (v4) {
1920 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1921 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1922 v4->tcpOutRsts = STAT(OUT_RST);
1923 v4->tcpInSegs = STAT64(IN_SEG);
1924 v4->tcpOutSegs = STAT64(OUT_SEG);
1925 v4->tcpRetransSegs = STAT64(RXT_SEG);
1926 }
1927 if (v6) {
1928 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1929 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1930 v6->tcpOutRsts = STAT(OUT_RST);
1931 v6->tcpInSegs = STAT64(IN_SEG);
1932 v6->tcpOutSegs = STAT64(OUT_SEG);
1933 v6->tcpRetransSegs = STAT64(RXT_SEG);
1934 }
1935#undef STAT64
1936#undef STAT
1937#undef STAT_IDX
1938}
1939
1940/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001941 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1942 * @adap: the adapter
1943 * @mtus: where to store the MTU values
1944 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1945 *
1946 * Reads the HW path MTU table.
1947 */
1948void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1949{
1950 u32 v;
1951 int i;
1952
1953 for (i = 0; i < NMTUS; ++i) {
1954 t4_write_reg(adap, TP_MTU_TABLE,
1955 MTUINDEX(0xff) | MTUVALUE(i));
1956 v = t4_read_reg(adap, TP_MTU_TABLE);
1957 mtus[i] = MTUVALUE_GET(v);
1958 if (mtu_log)
1959 mtu_log[i] = MTUWIDTH_GET(v);
1960 }
1961}
1962
1963/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001964 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1965 * @adap: the adapter
1966 * @addr: the indirect TP register address
1967 * @mask: specifies the field within the register to modify
1968 * @val: new value for the field
1969 *
1970 * Sets a field of an indirect TP register to the given value.
1971 */
1972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1973 unsigned int mask, unsigned int val)
1974{
1975 t4_write_reg(adap, TP_PIO_ADDR, addr);
1976 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1977 t4_write_reg(adap, TP_PIO_DATA, val);
1978}
1979
1980/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001981 * init_cong_ctrl - initialize congestion control parameters
1982 * @a: the alpha values for congestion control
1983 * @b: the beta values for congestion control
1984 *
1985 * Initialize the congestion control parameters.
1986 */
1987static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1988{
1989 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1990 a[9] = 2;
1991 a[10] = 3;
1992 a[11] = 4;
1993 a[12] = 5;
1994 a[13] = 6;
1995 a[14] = 7;
1996 a[15] = 8;
1997 a[16] = 9;
1998 a[17] = 10;
1999 a[18] = 14;
2000 a[19] = 17;
2001 a[20] = 21;
2002 a[21] = 25;
2003 a[22] = 30;
2004 a[23] = 35;
2005 a[24] = 45;
2006 a[25] = 60;
2007 a[26] = 80;
2008 a[27] = 100;
2009 a[28] = 200;
2010 a[29] = 300;
2011 a[30] = 400;
2012 a[31] = 500;
2013
2014 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2015 b[9] = b[10] = 1;
2016 b[11] = b[12] = 2;
2017 b[13] = b[14] = b[15] = b[16] = 3;
2018 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2019 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2020 b[28] = b[29] = 6;
2021 b[30] = b[31] = 7;
2022}
2023
2024/* The minimum additive increment value for the congestion control table */
2025#define CC_MIN_INCR 2U
2026
2027/**
2028 * t4_load_mtus - write the MTU and congestion control HW tables
2029 * @adap: the adapter
2030 * @mtus: the values for the MTU table
2031 * @alpha: the values for the congestion control alpha parameter
2032 * @beta: the values for the congestion control beta parameter
2033 *
2034 * Write the HW MTU table with the supplied MTUs and the high-speed
2035 * congestion control table with the supplied alpha, beta, and MTUs.
2036 * We write the two tables together because the additive increments
2037 * depend on the MTUs.
2038 */
2039void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2040 const unsigned short *alpha, const unsigned short *beta)
2041{
2042 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2043 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2044 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2045 28672, 40960, 57344, 81920, 114688, 163840, 229376
2046 };
2047
2048 unsigned int i, w;
2049
2050 for (i = 0; i < NMTUS; ++i) {
2051 unsigned int mtu = mtus[i];
2052 unsigned int log2 = fls(mtu);
2053
2054 if (!(mtu & ((1 << log2) >> 2))) /* round */
2055 log2--;
2056 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2057 MTUWIDTH(log2) | MTUVALUE(mtu));
2058
2059 for (w = 0; w < NCCTRL_WIN; ++w) {
2060 unsigned int inc;
2061
2062 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2063 CC_MIN_INCR);
2064
2065 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2066 (w << 16) | (beta[w] << 13) | inc);
2067 }
2068 }
2069}
2070
2071/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002072 * get_mps_bg_map - return the buffer groups associated with a port
2073 * @adap: the adapter
2074 * @idx: the port index
2075 *
2076 * Returns a bitmap indicating which MPS buffer groups are associated
2077 * with the given port. Bit i is set if buffer group i is used by the
2078 * port.
2079 */
2080static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2081{
2082 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2083
2084 if (n == 0)
2085 return idx == 0 ? 0xf : 0;
2086 if (n == 1)
2087 return idx < 2 ? (3 << (2 * idx)) : 0;
2088 return 1 << idx;
2089}
2090
2091/**
2092 * t4_get_port_stats - collect port statistics
2093 * @adap: the adapter
2094 * @idx: the port index
2095 * @p: the stats structure to fill
2096 *
2097 * Collect statistics related to the given port from HW.
2098 */
2099void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2100{
2101 u32 bgmap = get_mps_bg_map(adap, idx);
2102
2103#define GET_STAT(name) \
2104 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2105#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2106
2107 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2108 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2109 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2110 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2111 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2112 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2113 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2114 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2115 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2116 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2117 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2118 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2119 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2120 p->tx_drop = GET_STAT(TX_PORT_DROP);
2121 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2122 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2123 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2124 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2125 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2126 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2127 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2128 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2129 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2130
2131 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2132 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2133 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2134 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2135 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2136 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2137 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2138 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2139 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2140 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2141 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2142 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2143 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2144 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2145 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2146 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2147 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2148 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2149 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2150 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2151 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2152 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2153 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2154 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2155 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2156 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2157 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2158
2159 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2160 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2161 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2162 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2163 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2164 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2165 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2166 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2167
2168#undef GET_STAT
2169#undef GET_STAT_COM
2170}
2171
2172/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002173 * t4_wol_magic_enable - enable/disable magic packet WoL
2174 * @adap: the adapter
2175 * @port: the physical port index
2176 * @addr: MAC address expected in magic packets, %NULL to disable
2177 *
2178 * Enables/disables magic packet wake-on-LAN for the selected port.
2179 */
2180void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2181 const u8 *addr)
2182{
2183 if (addr) {
2184 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2185 (addr[2] << 24) | (addr[3] << 16) |
2186 (addr[4] << 8) | addr[5]);
2187 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2188 (addr[0] << 8) | addr[1]);
2189 }
2190 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2191 addr ? MAGICEN : 0);
2192}
2193
2194/**
2195 * t4_wol_pat_enable - enable/disable pattern-based WoL
2196 * @adap: the adapter
2197 * @port: the physical port index
2198 * @map: bitmap of which HW pattern filters to set
2199 * @mask0: byte mask for bytes 0-63 of a packet
2200 * @mask1: byte mask for bytes 64-127 of a packet
2201 * @crc: Ethernet CRC for selected bytes
2202 * @enable: enable/disable switch
2203 *
2204 * Sets the pattern filters indicated in @map to mask out the bytes
2205 * specified in @mask0/@mask1 in received packets and compare the CRC of
2206 * the resulting packet against @crc. If @enable is %true pattern-based
2207 * WoL is enabled, otherwise disabled.
2208 */
2209int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2210 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2211{
2212 int i;
2213
2214 if (!enable) {
2215 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2216 PATEN, 0);
2217 return 0;
2218 }
2219 if (map > 0xff)
2220 return -EINVAL;
2221
2222#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2223
2224 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2225 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2226 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2227
2228 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2229 if (!(map & 1))
2230 continue;
2231
2232 /* write byte masks */
2233 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2234 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2235 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2236 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2237 return -ETIMEDOUT;
2238
2239 /* write CRC */
2240 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2241 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2242 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2243 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2244 return -ETIMEDOUT;
2245 }
2246#undef EPIO_REG
2247
2248 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2249 return 0;
2250}
2251
2252#define INIT_CMD(var, cmd, rd_wr) do { \
2253 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2254 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2255 (var).retval_len16 = htonl(FW_LEN16(var)); \
2256} while (0)
2257
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302258int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2259 u32 addr, u32 val)
2260{
2261 struct fw_ldst_cmd c;
2262
2263 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002264 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2265 FW_CMD_WRITE |
2266 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302267 c.cycles_to_len16 = htonl(FW_LEN16(c));
2268 c.u.addrval.addr = htonl(addr);
2269 c.u.addrval.val = htonl(val);
2270
2271 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2272}
2273
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002274/**
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302275 * t4_mem_win_read_len - read memory through PCIE memory window
2276 * @adap: the adapter
2277 * @addr: address of first byte requested aligned on 32b.
2278 * @data: len bytes to hold the data read
2279 * @len: amount of data to read from window. Must be <=
2280 * MEMWIN0_APERATURE after adjusting for 16B alignment
2281 * requirements of the the memory window.
2282 *
2283 * Read len bytes of data from MC starting at @addr.
2284 */
2285int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2286{
2287 int i;
2288 int off;
2289
2290 /*
2291 * Align on a 16B boundary.
2292 */
2293 off = addr & 15;
2294 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2295 return -EINVAL;
2296
Vipul Pandya840f3002012-09-05 02:01:55 +00002297 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2298 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302299
2300 for (i = 0; i < len; i += 4)
2301 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2302
2303 return 0;
2304}
2305
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002306/**
2307 * t4_mdio_rd - read a PHY register through MDIO
2308 * @adap: the adapter
2309 * @mbox: mailbox to use for the FW command
2310 * @phy_addr: the PHY address
2311 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2312 * @reg: the register to read
2313 * @valp: where to store the value
2314 *
2315 * Issues a FW command through the given mailbox to read a PHY register.
2316 */
2317int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2318 unsigned int mmd, unsigned int reg, u16 *valp)
2319{
2320 int ret;
2321 struct fw_ldst_cmd c;
2322
2323 memset(&c, 0, sizeof(c));
2324 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2325 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2326 c.cycles_to_len16 = htonl(FW_LEN16(c));
2327 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2328 FW_LDST_CMD_MMD(mmd));
2329 c.u.mdio.raddr = htons(reg);
2330
2331 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2332 if (ret == 0)
2333 *valp = ntohs(c.u.mdio.rval);
2334 return ret;
2335}
2336
2337/**
2338 * t4_mdio_wr - write a PHY register through MDIO
2339 * @adap: the adapter
2340 * @mbox: mailbox to use for the FW command
2341 * @phy_addr: the PHY address
2342 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2343 * @reg: the register to write
2344 * @valp: value to write
2345 *
2346 * Issues a FW command through the given mailbox to write a PHY register.
2347 */
2348int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2349 unsigned int mmd, unsigned int reg, u16 val)
2350{
2351 struct fw_ldst_cmd c;
2352
2353 memset(&c, 0, sizeof(c));
2354 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2355 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2356 c.cycles_to_len16 = htonl(FW_LEN16(c));
2357 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2358 FW_LDST_CMD_MMD(mmd));
2359 c.u.mdio.raddr = htons(reg);
2360 c.u.mdio.rval = htons(val);
2361
2362 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2363}
2364
2365/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002366 * t4_fw_hello - establish communication with FW
2367 * @adap: the adapter
2368 * @mbox: mailbox to use for the FW command
2369 * @evt_mbox: mailbox to receive async FW events
2370 * @master: specifies the caller's willingness to be the device master
2371 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002372 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002373 * Issues a command to establish communication with FW. Returns either
2374 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002375 */
2376int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2377 enum dev_master master, enum dev_state *state)
2378{
2379 int ret;
2380 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002381 u32 v;
2382 unsigned int master_mbox;
2383 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002384
Vipul Pandya636f9d32012-09-26 02:39:39 +00002385retry:
2386 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002387 INIT_CMD(c, HELLO, WRITE);
2388 c.err_to_mbasyncnot = htonl(
2389 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2390 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002391 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2392 FW_HELLO_CMD_MBMASTER_MASK) |
2393 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2394 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2395 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002396
Vipul Pandya636f9d32012-09-26 02:39:39 +00002397 /*
2398 * Issue the HELLO command to the firmware. If it's not successful
2399 * but indicates that we got a "busy" or "timeout" condition, retry
2400 * the HELLO until we exhaust our retry limit.
2401 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002402 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002403 if (ret < 0) {
2404 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2405 goto retry;
2406 return ret;
2407 }
2408
2409 v = ntohl(c.err_to_mbasyncnot);
2410 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2411 if (state) {
2412 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002413 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002414 else if (v & FW_HELLO_CMD_INIT)
2415 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002416 else
2417 *state = DEV_STATE_UNINIT;
2418 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002419
2420 /*
2421 * If we're not the Master PF then we need to wait around for the
2422 * Master PF Driver to finish setting up the adapter.
2423 *
2424 * Note that we also do this wait if we're a non-Master-capable PF and
2425 * there is no current Master PF; a Master PF may show up momentarily
2426 * and we wouldn't want to fail pointlessly. (This can happen when an
2427 * OS loads lots of different drivers rapidly at the same time). In
2428 * this case, the Master PF returned by the firmware will be
2429 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2430 */
2431 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2432 master_mbox != mbox) {
2433 int waiting = FW_CMD_HELLO_TIMEOUT;
2434
2435 /*
2436 * Wait for the firmware to either indicate an error or
2437 * initialized state. If we see either of these we bail out
2438 * and report the issue to the caller. If we exhaust the
2439 * "hello timeout" and we haven't exhausted our retries, try
2440 * again. Otherwise bail with a timeout error.
2441 */
2442 for (;;) {
2443 u32 pcie_fw;
2444
2445 msleep(50);
2446 waiting -= 50;
2447
2448 /*
2449 * If neither Error nor Initialialized are indicated
2450 * by the firmware keep waiting till we exaust our
2451 * timeout ... and then retry if we haven't exhausted
2452 * our retries ...
2453 */
2454 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2455 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2456 if (waiting <= 0) {
2457 if (retries-- > 0)
2458 goto retry;
2459
2460 return -ETIMEDOUT;
2461 }
2462 continue;
2463 }
2464
2465 /*
2466 * We either have an Error or Initialized condition
2467 * report errors preferentially.
2468 */
2469 if (state) {
2470 if (pcie_fw & FW_PCIE_FW_ERR)
2471 *state = DEV_STATE_ERR;
2472 else if (pcie_fw & FW_PCIE_FW_INIT)
2473 *state = DEV_STATE_INIT;
2474 }
2475
2476 /*
2477 * If we arrived before a Master PF was selected and
2478 * there's not a valid Master PF, grab its identity
2479 * for our caller.
2480 */
2481 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2482 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2483 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2484 break;
2485 }
2486 }
2487
2488 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002489}
2490
2491/**
2492 * t4_fw_bye - end communication with FW
2493 * @adap: the adapter
2494 * @mbox: mailbox to use for the FW command
2495 *
2496 * Issues a command to terminate communication with FW.
2497 */
2498int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2499{
2500 struct fw_bye_cmd c;
2501
2502 INIT_CMD(c, BYE, WRITE);
2503 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2504}
2505
2506/**
2507 * t4_init_cmd - ask FW to initialize the device
2508 * @adap: the adapter
2509 * @mbox: mailbox to use for the FW command
2510 *
2511 * Issues a command to FW to partially initialize the device. This
2512 * performs initialization that generally doesn't depend on user input.
2513 */
2514int t4_early_init(struct adapter *adap, unsigned int mbox)
2515{
2516 struct fw_initialize_cmd c;
2517
2518 INIT_CMD(c, INITIALIZE, WRITE);
2519 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2520}
2521
2522/**
2523 * t4_fw_reset - issue a reset to FW
2524 * @adap: the adapter
2525 * @mbox: mailbox to use for the FW command
2526 * @reset: specifies the type of reset to perform
2527 *
2528 * Issues a reset command of the specified type to FW.
2529 */
2530int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2531{
2532 struct fw_reset_cmd c;
2533
2534 INIT_CMD(c, RESET, WRITE);
2535 c.val = htonl(reset);
2536 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2537}
2538
2539/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002540 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2541 * @adap: the adapter
2542 * @mbox: mailbox to use for the FW RESET command (if desired)
2543 * @force: force uP into RESET even if FW RESET command fails
2544 *
2545 * Issues a RESET command to firmware (if desired) with a HALT indication
2546 * and then puts the microprocessor into RESET state. The RESET command
2547 * will only be issued if a legitimate mailbox is provided (mbox <=
2548 * FW_PCIE_FW_MASTER_MASK).
2549 *
2550 * This is generally used in order for the host to safely manipulate the
2551 * adapter without fear of conflicting with whatever the firmware might
2552 * be doing. The only way out of this state is to RESTART the firmware
2553 * ...
2554 */
2555int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2556{
2557 int ret = 0;
2558
2559 /*
2560 * If a legitimate mailbox is provided, issue a RESET command
2561 * with a HALT indication.
2562 */
2563 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2564 struct fw_reset_cmd c;
2565
2566 memset(&c, 0, sizeof(c));
2567 INIT_CMD(c, RESET, WRITE);
2568 c.val = htonl(PIORST | PIORSTMODE);
2569 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2570 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2571 }
2572
2573 /*
2574 * Normally we won't complete the operation if the firmware RESET
2575 * command fails but if our caller insists we'll go ahead and put the
2576 * uP into RESET. This can be useful if the firmware is hung or even
2577 * missing ... We'll have to take the risk of putting the uP into
2578 * RESET without the cooperation of firmware in that case.
2579 *
2580 * We also force the firmware's HALT flag to be on in case we bypassed
2581 * the firmware RESET command above or we're dealing with old firmware
2582 * which doesn't have the HALT capability. This will serve as a flag
2583 * for the incoming firmware to know that it's coming out of a HALT
2584 * rather than a RESET ... if it's new enough to understand that ...
2585 */
2586 if (ret == 0 || force) {
2587 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2588 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2589 FW_PCIE_FW_HALT);
2590 }
2591
2592 /*
2593 * And we always return the result of the firmware RESET command
2594 * even when we force the uP into RESET ...
2595 */
2596 return ret;
2597}
2598
2599/**
2600 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2601 * @adap: the adapter
2602 * @reset: if we want to do a RESET to restart things
2603 *
2604 * Restart firmware previously halted by t4_fw_halt(). On successful
2605 * return the previous PF Master remains as the new PF Master and there
2606 * is no need to issue a new HELLO command, etc.
2607 *
2608 * We do this in two ways:
2609 *
2610 * 1. If we're dealing with newer firmware we'll simply want to take
2611 * the chip's microprocessor out of RESET. This will cause the
2612 * firmware to start up from its start vector. And then we'll loop
2613 * until the firmware indicates it's started again (PCIE_FW.HALT
2614 * reset to 0) or we timeout.
2615 *
2616 * 2. If we're dealing with older firmware then we'll need to RESET
2617 * the chip since older firmware won't recognize the PCIE_FW.HALT
2618 * flag and automatically RESET itself on startup.
2619 */
2620int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2621{
2622 if (reset) {
2623 /*
2624 * Since we're directing the RESET instead of the firmware
2625 * doing it automatically, we need to clear the PCIE_FW.HALT
2626 * bit.
2627 */
2628 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2629
2630 /*
2631 * If we've been given a valid mailbox, first try to get the
2632 * firmware to do the RESET. If that works, great and we can
2633 * return success. Otherwise, if we haven't been given a
2634 * valid mailbox or the RESET command failed, fall back to
2635 * hitting the chip with a hammer.
2636 */
2637 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2638 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2639 msleep(100);
2640 if (t4_fw_reset(adap, mbox,
2641 PIORST | PIORSTMODE) == 0)
2642 return 0;
2643 }
2644
2645 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2646 msleep(2000);
2647 } else {
2648 int ms;
2649
2650 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2651 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2652 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2653 return 0;
2654 msleep(100);
2655 ms += 100;
2656 }
2657 return -ETIMEDOUT;
2658 }
2659 return 0;
2660}
2661
2662/**
2663 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2664 * @adap: the adapter
2665 * @mbox: mailbox to use for the FW RESET command (if desired)
2666 * @fw_data: the firmware image to write
2667 * @size: image size
2668 * @force: force upgrade even if firmware doesn't cooperate
2669 *
2670 * Perform all of the steps necessary for upgrading an adapter's
2671 * firmware image. Normally this requires the cooperation of the
2672 * existing firmware in order to halt all existing activities
2673 * but if an invalid mailbox token is passed in we skip that step
2674 * (though we'll still put the adapter microprocessor into RESET in
2675 * that case).
2676 *
2677 * On successful return the new firmware will have been loaded and
2678 * the adapter will have been fully RESET losing all previous setup
2679 * state. On unsuccessful return the adapter may be completely hosed ...
2680 * positive errno indicates that the adapter is ~probably~ intact, a
2681 * negative errno indicates that things are looking bad ...
2682 */
2683int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2684 const u8 *fw_data, unsigned int size, int force)
2685{
2686 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2687 int reset, ret;
2688
2689 ret = t4_fw_halt(adap, mbox, force);
2690 if (ret < 0 && !force)
2691 return ret;
2692
2693 ret = t4_load_fw(adap, fw_data, size);
2694 if (ret < 0)
2695 return ret;
2696
2697 /*
2698 * Older versions of the firmware don't understand the new
2699 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2700 * restart. So for newly loaded older firmware we'll have to do the
2701 * RESET for it so it starts up on a clean slate. We can tell if
2702 * the newly loaded firmware will handle this right by checking
2703 * its header flags to see if it advertises the capability.
2704 */
2705 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2706 return t4_fw_restart(adap, mbox, reset);
2707}
2708
2709
2710/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002711 * t4_fw_config_file - setup an adapter via a Configuration File
2712 * @adap: the adapter
2713 * @mbox: mailbox to use for the FW command
2714 * @mtype: the memory type where the Configuration File is located
2715 * @maddr: the memory address where the Configuration File is located
2716 * @finiver: return value for CF [fini] version
2717 * @finicsum: return value for CF [fini] checksum
2718 * @cfcsum: return value for CF computed checksum
2719 *
2720 * Issue a command to get the firmware to process the Configuration
2721 * File located at the specified mtype/maddress. If the Configuration
2722 * File is processed successfully and return value pointers are
2723 * provided, the Configuration File "[fini] section version and
2724 * checksum values will be returned along with the computed checksum.
2725 * It's up to the caller to decide how it wants to respond to the
2726 * checksums not matching but it recommended that a prominant warning
2727 * be emitted in order to help people rapidly identify changed or
2728 * corrupted Configuration Files.
2729 *
2730 * Also note that it's possible to modify things like "niccaps",
2731 * "toecaps",etc. between processing the Configuration File and telling
2732 * the firmware to use the new configuration. Callers which want to
2733 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2734 * Configuration Files if they want to do this.
2735 */
2736int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2737 unsigned int mtype, unsigned int maddr,
2738 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2739{
2740 struct fw_caps_config_cmd caps_cmd;
2741 int ret;
2742
2743 /*
2744 * Tell the firmware to process the indicated Configuration File.
2745 * If there are no errors and the caller has provided return value
2746 * pointers for the [fini] section version, checksum and computed
2747 * checksum, pass those back to the caller.
2748 */
2749 memset(&caps_cmd, 0, sizeof(caps_cmd));
2750 caps_cmd.op_to_write =
2751 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2752 FW_CMD_REQUEST |
2753 FW_CMD_READ);
2754 caps_cmd.retval_len16 =
2755 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2756 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2757 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2758 FW_LEN16(caps_cmd));
2759 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2760 if (ret < 0)
2761 return ret;
2762
2763 if (finiver)
2764 *finiver = ntohl(caps_cmd.finiver);
2765 if (finicsum)
2766 *finicsum = ntohl(caps_cmd.finicsum);
2767 if (cfcsum)
2768 *cfcsum = ntohl(caps_cmd.cfcsum);
2769
2770 /*
2771 * And now tell the firmware to use the configuration we just loaded.
2772 */
2773 caps_cmd.op_to_write =
2774 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2775 FW_CMD_REQUEST |
2776 FW_CMD_WRITE);
2777 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2778 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2779}
2780
2781/**
2782 * t4_fixup_host_params - fix up host-dependent parameters
2783 * @adap: the adapter
2784 * @page_size: the host's Base Page Size
2785 * @cache_line_size: the host's Cache Line Size
2786 *
2787 * Various registers in T4 contain values which are dependent on the
2788 * host's Base Page and Cache Line Sizes. This function will fix all of
2789 * those registers with the appropriate values as passed in ...
2790 */
2791int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2792 unsigned int cache_line_size)
2793{
2794 unsigned int page_shift = fls(page_size) - 1;
2795 unsigned int sge_hps = page_shift - 10;
2796 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2797 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2798 unsigned int fl_align_log = fls(fl_align) - 1;
2799
2800 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2801 HOSTPAGESIZEPF0(sge_hps) |
2802 HOSTPAGESIZEPF1(sge_hps) |
2803 HOSTPAGESIZEPF2(sge_hps) |
2804 HOSTPAGESIZEPF3(sge_hps) |
2805 HOSTPAGESIZEPF4(sge_hps) |
2806 HOSTPAGESIZEPF5(sge_hps) |
2807 HOSTPAGESIZEPF6(sge_hps) |
2808 HOSTPAGESIZEPF7(sge_hps));
2809
2810 t4_set_reg_field(adap, SGE_CONTROL,
2811 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2812 EGRSTATUSPAGESIZE_MASK,
2813 INGPADBOUNDARY(fl_align_log - 5) |
2814 EGRSTATUSPAGESIZE(stat_len != 64));
2815
2816 /*
2817 * Adjust various SGE Free List Host Buffer Sizes.
2818 *
2819 * This is something of a crock since we're using fixed indices into
2820 * the array which are also known by the sge.c code and the T4
2821 * Firmware Configuration File. We need to come up with a much better
2822 * approach to managing this array. For now, the first four entries
2823 * are:
2824 *
2825 * 0: Host Page Size
2826 * 1: 64KB
2827 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2828 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2829 *
2830 * For the single-MTU buffers in unpacked mode we need to include
2831 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2832 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2833 * Padding boundry. All of these are accommodated in the Factory
2834 * Default Firmware Configuration File but we need to adjust it for
2835 * this host's cache line size.
2836 */
2837 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2838 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2839 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2840 & ~(fl_align-1));
2841 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2842 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2843 & ~(fl_align-1));
2844
2845 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2846
2847 return 0;
2848}
2849
2850/**
2851 * t4_fw_initialize - ask FW to initialize the device
2852 * @adap: the adapter
2853 * @mbox: mailbox to use for the FW command
2854 *
2855 * Issues a command to FW to partially initialize the device. This
2856 * performs initialization that generally doesn't depend on user input.
2857 */
2858int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2859{
2860 struct fw_initialize_cmd c;
2861
2862 memset(&c, 0, sizeof(c));
2863 INIT_CMD(c, INITIALIZE, WRITE);
2864 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2865}
2866
2867/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002868 * t4_query_params - query FW or device parameters
2869 * @adap: the adapter
2870 * @mbox: mailbox to use for the FW command
2871 * @pf: the PF
2872 * @vf: the VF
2873 * @nparams: the number of parameters
2874 * @params: the parameter names
2875 * @val: the parameter values
2876 *
2877 * Reads the value of FW or device parameters. Up to 7 parameters can be
2878 * queried at once.
2879 */
2880int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2881 unsigned int vf, unsigned int nparams, const u32 *params,
2882 u32 *val)
2883{
2884 int i, ret;
2885 struct fw_params_cmd c;
2886 __be32 *p = &c.param[0].mnem;
2887
2888 if (nparams > 7)
2889 return -EINVAL;
2890
2891 memset(&c, 0, sizeof(c));
2892 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2893 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2894 FW_PARAMS_CMD_VFN(vf));
2895 c.retval_len16 = htonl(FW_LEN16(c));
2896 for (i = 0; i < nparams; i++, p += 2)
2897 *p = htonl(*params++);
2898
2899 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2900 if (ret == 0)
2901 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2902 *val++ = ntohl(*p);
2903 return ret;
2904}
2905
2906/**
2907 * t4_set_params - sets FW or device parameters
2908 * @adap: the adapter
2909 * @mbox: mailbox to use for the FW command
2910 * @pf: the PF
2911 * @vf: the VF
2912 * @nparams: the number of parameters
2913 * @params: the parameter names
2914 * @val: the parameter values
2915 *
2916 * Sets the value of FW or device parameters. Up to 7 parameters can be
2917 * specified at once.
2918 */
2919int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2920 unsigned int vf, unsigned int nparams, const u32 *params,
2921 const u32 *val)
2922{
2923 struct fw_params_cmd c;
2924 __be32 *p = &c.param[0].mnem;
2925
2926 if (nparams > 7)
2927 return -EINVAL;
2928
2929 memset(&c, 0, sizeof(c));
2930 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2931 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2932 FW_PARAMS_CMD_VFN(vf));
2933 c.retval_len16 = htonl(FW_LEN16(c));
2934 while (nparams--) {
2935 *p++ = htonl(*params++);
2936 *p++ = htonl(*val++);
2937 }
2938
2939 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2940}
2941
2942/**
2943 * t4_cfg_pfvf - configure PF/VF resource limits
2944 * @adap: the adapter
2945 * @mbox: mailbox to use for the FW command
2946 * @pf: the PF being configured
2947 * @vf: the VF being configured
2948 * @txq: the max number of egress queues
2949 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2950 * @rxqi: the max number of interrupt-capable ingress queues
2951 * @rxq: the max number of interruptless ingress queues
2952 * @tc: the PCI traffic class
2953 * @vi: the max number of virtual interfaces
2954 * @cmask: the channel access rights mask for the PF/VF
2955 * @pmask: the port access rights mask for the PF/VF
2956 * @nexact: the maximum number of exact MPS filters
2957 * @rcaps: read capabilities
2958 * @wxcaps: write/execute capabilities
2959 *
2960 * Configures resource limits and capabilities for a physical or virtual
2961 * function.
2962 */
2963int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2964 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2965 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2966 unsigned int vi, unsigned int cmask, unsigned int pmask,
2967 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2968{
2969 struct fw_pfvf_cmd c;
2970
2971 memset(&c, 0, sizeof(c));
2972 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2973 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2974 FW_PFVF_CMD_VFN(vf));
2975 c.retval_len16 = htonl(FW_LEN16(c));
2976 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2977 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002978 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002979 FW_PFVF_CMD_PMASK(pmask) |
2980 FW_PFVF_CMD_NEQ(txq));
2981 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2982 FW_PFVF_CMD_NEXACTF(nexact));
2983 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2984 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2985 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2986 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2987}
2988
2989/**
2990 * t4_alloc_vi - allocate a virtual interface
2991 * @adap: the adapter
2992 * @mbox: mailbox to use for the FW command
2993 * @port: physical port associated with the VI
2994 * @pf: the PF owning the VI
2995 * @vf: the VF owning the VI
2996 * @nmac: number of MAC addresses needed (1 to 5)
2997 * @mac: the MAC addresses of the VI
2998 * @rss_size: size of RSS table slice associated with this VI
2999 *
3000 * Allocates a virtual interface for the given physical port. If @mac is
3001 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3002 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3003 * stored consecutively so the space needed is @nmac * 6 bytes.
3004 * Returns a negative error number or the non-negative VI id.
3005 */
3006int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3007 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3008 unsigned int *rss_size)
3009{
3010 int ret;
3011 struct fw_vi_cmd c;
3012
3013 memset(&c, 0, sizeof(c));
3014 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3015 FW_CMD_WRITE | FW_CMD_EXEC |
3016 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3017 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3018 c.portid_pkd = FW_VI_CMD_PORTID(port);
3019 c.nmac = nmac - 1;
3020
3021 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3022 if (ret)
3023 return ret;
3024
3025 if (mac) {
3026 memcpy(mac, c.mac, sizeof(c.mac));
3027 switch (nmac) {
3028 case 5:
3029 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3030 case 4:
3031 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3032 case 3:
3033 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3034 case 2:
3035 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3036 }
3037 }
3038 if (rss_size)
3039 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003040 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003041}
3042
3043/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003044 * t4_set_rxmode - set Rx properties of a virtual interface
3045 * @adap: the adapter
3046 * @mbox: mailbox to use for the FW command
3047 * @viid: the VI id
3048 * @mtu: the new MTU or -1
3049 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3050 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3051 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003052 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003053 * @sleep_ok: if true we may sleep while awaiting command completion
3054 *
3055 * Sets Rx properties of a virtual interface.
3056 */
3057int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003058 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3059 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003060{
3061 struct fw_vi_rxmode_cmd c;
3062
3063 /* convert to FW values */
3064 if (mtu < 0)
3065 mtu = FW_RXMODE_MTU_NO_CHG;
3066 if (promisc < 0)
3067 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3068 if (all_multi < 0)
3069 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3070 if (bcast < 0)
3071 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003072 if (vlanex < 0)
3073 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003074
3075 memset(&c, 0, sizeof(c));
3076 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3077 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3078 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003079 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3080 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3081 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3082 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3083 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003084 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3085}
3086
3087/**
3088 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3089 * @adap: the adapter
3090 * @mbox: mailbox to use for the FW command
3091 * @viid: the VI id
3092 * @free: if true any existing filters for this VI id are first removed
3093 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3094 * @addr: the MAC address(es)
3095 * @idx: where to store the index of each allocated filter
3096 * @hash: pointer to hash address filter bitmap
3097 * @sleep_ok: call is allowed to sleep
3098 *
3099 * Allocates an exact-match filter for each of the supplied addresses and
3100 * sets it to the corresponding address. If @idx is not %NULL it should
3101 * have at least @naddr entries, each of which will be set to the index of
3102 * the filter allocated for the corresponding MAC address. If a filter
3103 * could not be allocated for an address its index is set to 0xffff.
3104 * If @hash is not %NULL addresses that fail to allocate an exact filter
3105 * are hashed and update the hash filter bitmap pointed at by @hash.
3106 *
3107 * Returns a negative error number or the number of filters allocated.
3108 */
3109int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3110 unsigned int viid, bool free, unsigned int naddr,
3111 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3112{
3113 int i, ret;
3114 struct fw_vi_mac_cmd c;
3115 struct fw_vi_mac_exact *p;
3116
3117 if (naddr > 7)
3118 return -EINVAL;
3119
3120 memset(&c, 0, sizeof(c));
3121 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3122 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3123 FW_VI_MAC_CMD_VIID(viid));
3124 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3125 FW_CMD_LEN16((naddr + 2) / 2));
3126
3127 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3128 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3129 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3130 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3131 }
3132
3133 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3134 if (ret)
3135 return ret;
3136
3137 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3138 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3139
3140 if (idx)
3141 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
3142 if (index < NEXACT_MAC)
3143 ret++;
3144 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003145 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003146 }
3147 return ret;
3148}
3149
3150/**
3151 * t4_change_mac - modifies the exact-match filter for a MAC address
3152 * @adap: the adapter
3153 * @mbox: mailbox to use for the FW command
3154 * @viid: the VI id
3155 * @idx: index of existing filter for old value of MAC address, or -1
3156 * @addr: the new MAC address value
3157 * @persist: whether a new MAC allocation should be persistent
3158 * @add_smt: if true also add the address to the HW SMT
3159 *
3160 * Modifies an exact-match filter and sets it to the new MAC address.
3161 * Note that in general it is not possible to modify the value of a given
3162 * filter so the generic way to modify an address filter is to free the one
3163 * being used by the old address value and allocate a new filter for the
3164 * new address value. @idx can be -1 if the address is a new addition.
3165 *
3166 * Returns a negative error number or the index of the filter with the new
3167 * MAC value.
3168 */
3169int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3170 int idx, const u8 *addr, bool persist, bool add_smt)
3171{
3172 int ret, mode;
3173 struct fw_vi_mac_cmd c;
3174 struct fw_vi_mac_exact *p = c.u.exact;
3175
3176 if (idx < 0) /* new allocation */
3177 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3178 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3179
3180 memset(&c, 0, sizeof(c));
3181 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3182 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3183 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3184 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3185 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3186 FW_VI_MAC_CMD_IDX(idx));
3187 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3188
3189 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3190 if (ret == 0) {
3191 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3192 if (ret >= NEXACT_MAC)
3193 ret = -ENOMEM;
3194 }
3195 return ret;
3196}
3197
3198/**
3199 * t4_set_addr_hash - program the MAC inexact-match hash filter
3200 * @adap: the adapter
3201 * @mbox: mailbox to use for the FW command
3202 * @viid: the VI id
3203 * @ucast: whether the hash filter should also match unicast addresses
3204 * @vec: the value to be written to the hash filter
3205 * @sleep_ok: call is allowed to sleep
3206 *
3207 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3208 */
3209int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3210 bool ucast, u64 vec, bool sleep_ok)
3211{
3212 struct fw_vi_mac_cmd c;
3213
3214 memset(&c, 0, sizeof(c));
3215 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3216 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3217 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3218 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3219 FW_CMD_LEN16(1));
3220 c.u.hash.hashvec = cpu_to_be64(vec);
3221 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3222}
3223
3224/**
3225 * t4_enable_vi - enable/disable a virtual interface
3226 * @adap: the adapter
3227 * @mbox: mailbox to use for the FW command
3228 * @viid: the VI id
3229 * @rx_en: 1=enable Rx, 0=disable Rx
3230 * @tx_en: 1=enable Tx, 0=disable Tx
3231 *
3232 * Enables/disables a virtual interface.
3233 */
3234int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3235 bool rx_en, bool tx_en)
3236{
3237 struct fw_vi_enable_cmd c;
3238
3239 memset(&c, 0, sizeof(c));
3240 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3241 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3242 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3243 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3244 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3245}
3246
3247/**
3248 * t4_identify_port - identify a VI's port by blinking its LED
3249 * @adap: the adapter
3250 * @mbox: mailbox to use for the FW command
3251 * @viid: the VI id
3252 * @nblinks: how many times to blink LED at 2.5 Hz
3253 *
3254 * Identifies a VI's port by blinking its LED.
3255 */
3256int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3257 unsigned int nblinks)
3258{
3259 struct fw_vi_enable_cmd c;
3260
3261 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3262 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3263 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3264 c.blinkdur = htons(nblinks);
3265 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3266}
3267
3268/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003269 * t4_iq_free - free an ingress queue and its FLs
3270 * @adap: the adapter
3271 * @mbox: mailbox to use for the FW command
3272 * @pf: the PF owning the queues
3273 * @vf: the VF owning the queues
3274 * @iqtype: the ingress queue type
3275 * @iqid: ingress queue id
3276 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3277 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3278 *
3279 * Frees an ingress queue and its associated FLs, if any.
3280 */
3281int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3282 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3283 unsigned int fl0id, unsigned int fl1id)
3284{
3285 struct fw_iq_cmd c;
3286
3287 memset(&c, 0, sizeof(c));
3288 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3289 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3290 FW_IQ_CMD_VFN(vf));
3291 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3292 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3293 c.iqid = htons(iqid);
3294 c.fl0id = htons(fl0id);
3295 c.fl1id = htons(fl1id);
3296 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3297}
3298
3299/**
3300 * t4_eth_eq_free - free an Ethernet egress queue
3301 * @adap: the adapter
3302 * @mbox: mailbox to use for the FW command
3303 * @pf: the PF owning the queue
3304 * @vf: the VF owning the queue
3305 * @eqid: egress queue id
3306 *
3307 * Frees an Ethernet egress queue.
3308 */
3309int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3310 unsigned int vf, unsigned int eqid)
3311{
3312 struct fw_eq_eth_cmd c;
3313
3314 memset(&c, 0, sizeof(c));
3315 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3316 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3317 FW_EQ_ETH_CMD_VFN(vf));
3318 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3319 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3320 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3321}
3322
3323/**
3324 * t4_ctrl_eq_free - free a control egress queue
3325 * @adap: the adapter
3326 * @mbox: mailbox to use for the FW command
3327 * @pf: the PF owning the queue
3328 * @vf: the VF owning the queue
3329 * @eqid: egress queue id
3330 *
3331 * Frees a control egress queue.
3332 */
3333int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3334 unsigned int vf, unsigned int eqid)
3335{
3336 struct fw_eq_ctrl_cmd c;
3337
3338 memset(&c, 0, sizeof(c));
3339 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3340 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3341 FW_EQ_CTRL_CMD_VFN(vf));
3342 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3343 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3344 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3345}
3346
3347/**
3348 * t4_ofld_eq_free - free an offload egress queue
3349 * @adap: the adapter
3350 * @mbox: mailbox to use for the FW command
3351 * @pf: the PF owning the queue
3352 * @vf: the VF owning the queue
3353 * @eqid: egress queue id
3354 *
3355 * Frees a control egress queue.
3356 */
3357int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3358 unsigned int vf, unsigned int eqid)
3359{
3360 struct fw_eq_ofld_cmd c;
3361
3362 memset(&c, 0, sizeof(c));
3363 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3364 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3365 FW_EQ_OFLD_CMD_VFN(vf));
3366 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3367 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3368 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3369}
3370
3371/**
3372 * t4_handle_fw_rpl - process a FW reply message
3373 * @adap: the adapter
3374 * @rpl: start of the FW message
3375 *
3376 * Processes a FW message, such as link state change messages.
3377 */
3378int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3379{
3380 u8 opcode = *(const u8 *)rpl;
3381
3382 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3383 int speed = 0, fc = 0;
3384 const struct fw_port_cmd *p = (void *)rpl;
3385 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3386 int port = adap->chan_map[chan];
3387 struct port_info *pi = adap2pinfo(adap, port);
3388 struct link_config *lc = &pi->link_cfg;
3389 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3390 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3391 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3392
3393 if (stat & FW_PORT_CMD_RXPAUSE)
3394 fc |= PAUSE_RX;
3395 if (stat & FW_PORT_CMD_TXPAUSE)
3396 fc |= PAUSE_TX;
3397 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3398 speed = SPEED_100;
3399 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3400 speed = SPEED_1000;
3401 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3402 speed = SPEED_10000;
3403
3404 if (link_ok != lc->link_ok || speed != lc->speed ||
3405 fc != lc->fc) { /* something changed */
3406 lc->link_ok = link_ok;
3407 lc->speed = speed;
3408 lc->fc = fc;
3409 t4_os_link_changed(adap, port, link_ok);
3410 }
3411 if (mod != pi->mod_type) {
3412 pi->mod_type = mod;
3413 t4_os_portmod_changed(adap, port);
3414 }
3415 }
3416 return 0;
3417}
3418
3419static void __devinit get_pci_mode(struct adapter *adapter,
3420 struct pci_params *p)
3421{
3422 u16 val;
3423 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3424
3425 if (pcie_cap) {
3426 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3427 &val);
3428 p->speed = val & PCI_EXP_LNKSTA_CLS;
3429 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3430 }
3431}
3432
3433/**
3434 * init_link_config - initialize a link's SW state
3435 * @lc: structure holding the link state
3436 * @caps: link capabilities
3437 *
3438 * Initializes the SW state maintained for each link, including the link's
3439 * capabilities and default speed/flow-control/autonegotiation settings.
3440 */
3441static void __devinit init_link_config(struct link_config *lc,
3442 unsigned int caps)
3443{
3444 lc->supported = caps;
3445 lc->requested_speed = 0;
3446 lc->speed = 0;
3447 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3448 if (lc->supported & FW_PORT_CAP_ANEG) {
3449 lc->advertising = lc->supported & ADVERT_MASK;
3450 lc->autoneg = AUTONEG_ENABLE;
3451 lc->requested_fc |= PAUSE_AUTONEG;
3452 } else {
3453 lc->advertising = 0;
3454 lc->autoneg = AUTONEG_DISABLE;
3455 }
3456}
3457
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003458int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003459{
3460 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3461 return 0;
3462 msleep(500);
3463 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3464}
3465
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003466static int __devinit get_flash_params(struct adapter *adap)
3467{
3468 int ret;
3469 u32 info;
3470
3471 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3472 if (!ret)
3473 ret = sf1_read(adap, 3, 0, 1, &info);
3474 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3475 if (ret)
3476 return ret;
3477
3478 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3479 return -EINVAL;
3480 info >>= 16; /* log2 of size */
3481 if (info >= 0x14 && info < 0x18)
3482 adap->params.sf_nsec = 1 << (info - 16);
3483 else if (info == 0x18)
3484 adap->params.sf_nsec = 64;
3485 else
3486 return -EINVAL;
3487 adap->params.sf_size = 1 << info;
3488 adap->params.sf_fw_start =
3489 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3490 return 0;
3491}
3492
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003493/**
3494 * t4_prep_adapter - prepare SW and HW for operation
3495 * @adapter: the adapter
3496 * @reset: if true perform a HW reset
3497 *
3498 * Initialize adapter SW state for the various HW modules, set initial
3499 * values for some adapter tunables, take PHYs out of reset, and
3500 * initialize the MDIO interface.
3501 */
3502int __devinit t4_prep_adapter(struct adapter *adapter)
3503{
3504 int ret;
3505
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003506 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003507 if (ret < 0)
3508 return ret;
3509
3510 get_pci_mode(adapter, &adapter->params.pci);
3511 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3512
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003513 ret = get_flash_params(adapter);
3514 if (ret < 0) {
3515 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3516 return ret;
3517 }
3518
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003519 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3520
3521 /*
3522 * Default port for debugging in case we can't reach FW.
3523 */
3524 adapter->params.nports = 1;
3525 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003526 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003527 return 0;
3528}
3529
3530int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3531{
3532 u8 addr[6];
3533 int ret, i, j = 0;
3534 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003535 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003536
3537 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003538 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003539
3540 for_each_port(adap, i) {
3541 unsigned int rss_size;
3542 struct port_info *p = adap2pinfo(adap, i);
3543
3544 while ((adap->params.portvec & (1 << j)) == 0)
3545 j++;
3546
3547 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3548 FW_CMD_REQUEST | FW_CMD_READ |
3549 FW_PORT_CMD_PORTID(j));
3550 c.action_to_len16 = htonl(
3551 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3552 FW_LEN16(c));
3553 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3554 if (ret)
3555 return ret;
3556
3557 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3558 if (ret < 0)
3559 return ret;
3560
3561 p->viid = ret;
3562 p->tx_chan = j;
3563 p->lport = j;
3564 p->rss_size = rss_size;
3565 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3566 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00003567 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003568
3569 ret = ntohl(c.u.info.lstatus_to_modtype);
3570 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3571 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3572 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003573 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003574
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003575 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3576 FW_CMD_REQUEST | FW_CMD_READ |
3577 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3578 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3579 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3580 if (ret)
3581 return ret;
3582 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3583
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003584 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3585 j++;
3586 }
3587 return 0;
3588}