blob: 61f002d220138df4579b0a4f3ebd8a1e1e92a377 [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
Joe Perches005b5712010-12-14 21:36:53 +0000208 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000222 /*
223 * If the device is off-line, as in EEH, commands will time out.
224 * Fail them early so we don't waste time waiting.
225 */
226 if (adap->pdev->error_state != pci_channel_io_normal)
227 return -EIO;
228
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232
233 if (v != MBOX_OWNER_DRV)
234 return v ? -EBUSY : -ETIMEDOUT;
235
236 for (i = 0; i < size; i += 8)
237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
238
239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
240 t4_read_reg(adap, ctl_reg); /* flush write */
241
242 delay_idx = 0;
243 ms = delay[0];
244
245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
246 if (sleep_ok) {
247 ms = delay[delay_idx]; /* last element may repeat */
248 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 delay_idx++;
250 msleep(ms);
251 } else
252 mdelay(ms);
253
254 v = t4_read_reg(adap, ctl_reg);
255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
256 if (!(v & MBMSGVALID)) {
257 t4_write_reg(adap, ctl_reg, 0);
258 continue;
259 }
260
261 res = t4_read_reg64(adap, data_reg);
262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
263 fw_asrt(adap, data_reg);
264 res = FW_CMD_RETVAL(EIO);
265 } else if (rpl)
266 get_mbox_rpl(adap, rpl, size / 8, data_reg);
267
268 if (FW_CMD_RETVAL_GET((int)res))
269 dump_mbox(adap, mbox, data_reg);
270 t4_write_reg(adap, ctl_reg, 0);
271 return -FW_CMD_RETVAL_GET((int)res);
272 }
273 }
274
275 dump_mbox(adap, mbox, data_reg);
276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
277 *(const u8 *)cmd, mbox);
278 return -ETIMEDOUT;
279}
280
281/**
282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter
284 * @addr: address of first byte requested
285 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word
287 *
288 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
289 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data.
291 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
293{
294 int i;
295
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
297 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
304 if (i)
305 return i;
306
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
308
309 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
311 if (ecc)
312 *ecc = t4_read_reg64(adap, MC_DATA(16));
313#undef MC_DATA
314 return 0;
315}
316
317/**
318 * t4_edc_read - read from EDC through backdoor accesses
319 * @adap: the adapter
320 * @idx: which EDC to access
321 * @addr: address of first byte requested
322 * @data: 64 bytes of data containing the requested address
323 * @ecc: where to store the corresponding 64-bit ECC word
324 *
325 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
326 * that covers the requested address @addr. If @parity is not %NULL it
327 * is assigned the 64-bit ECC word for the read data.
328 */
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{
331 int i;
332
333 idx *= EDC_STRIDE;
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
342 if (i)
343 return i;
344
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
346
347 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
349 if (ecc)
350 *ecc = t4_read_reg64(adap, EDC_DATA(16));
351#undef EDC_DATA
352 return 0;
353}
354
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
384 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
386 }
387
388 return 0;
389}
390
391/**
392 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
393 * @adap: the adapter
394 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
395 * @addr: address within indicated memory type
396 * @len: amount of memory to transfer
397 * @buf: host memory buffer
398 * @dir: direction of transfer 1 => read, 0 => write
399 *
400 * Reads/writes an [almost] arbitrary memory region in the firmware: the
401 * firmware memory address, length and host buffer must be aligned on
402 * 32-bit boudaries. The memory is transferred as a raw byte sequence
403 * from/to the firmware's memory. If this memory contains data
404 * structures which contain multi-byte integers, it's the callers
405 * responsibility to perform appropriate byte order conversions.
406 */
407static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
408 __be32 *buf, int dir)
409{
410 u32 pos, start, end, offset, memoffset;
411 int ret;
412
413 /*
414 * Argument sanity checks ...
415 */
416 if ((addr & 0x3) || (len & 0x3))
417 return -EINVAL;
418
419 /*
420 * Offset into the region of memory which is being accessed
421 * MEM_EDC0 = 0
422 * MEM_EDC1 = 1
423 * MEM_MC = 2
424 */
425 memoffset = (mtype * (5 * 1024 * 1024));
426
427 /* Determine the PCIE_MEM_ACCESS_OFFSET */
428 addr = addr + memoffset;
429
430 /*
431 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
432 * at a time so we need to round down the start and round up the end.
433 * We'll start copying out of the first line at (addr - start) a word
434 * at a time.
435 */
436 start = addr & ~(MEMWIN0_APERTURE-1);
437 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
438 offset = (addr - start)/sizeof(__be32);
439
440 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
441 __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
442
443 /*
444 * If we're writing, copy the data from the caller's memory
445 * buffer
446 */
447 if (!dir) {
448 /*
449 * If we're doing a partial write, then we need to do
450 * a read-modify-write ...
451 */
452 if (offset || len < MEMWIN0_APERTURE) {
453 ret = t4_mem_win_rw(adap, pos, data, 1);
454 if (ret)
455 return ret;
456 }
457 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
458 len > 0) {
459 data[offset++] = *buf++;
460 len -= sizeof(__be32);
461 }
462 }
463
464 /*
465 * Transfer a block of memory and bail if there's an error.
466 */
467 ret = t4_mem_win_rw(adap, pos, data, dir);
468 if (ret)
469 return ret;
470
471 /*
472 * If we're reading, copy the data into the caller's memory
473 * buffer.
474 */
475 if (dir)
476 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
477 len > 0) {
478 *buf++ = data[offset++];
479 len -= sizeof(__be32);
480 }
481 }
482
483 return 0;
484}
485
486int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
487 __be32 *buf)
488{
489 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
490}
491
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000492#define EEPROM_STAT_ADDR 0x7bfc
493#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000494#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000495
496/**
497 * t4_seeprom_wp - enable/disable EEPROM write protection
498 * @adapter: the adapter
499 * @enable: whether to enable or disable write protection
500 *
501 * Enables or disables write protection on the serial EEPROM.
502 */
503int t4_seeprom_wp(struct adapter *adapter, bool enable)
504{
505 unsigned int v = enable ? 0xc : 0;
506 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
507 return ret < 0 ? ret : 0;
508}
509
510/**
511 * get_vpd_params - read VPD parameters from VPD EEPROM
512 * @adapter: adapter to read
513 * @p: where to store the parameters
514 *
515 * Reads card parameters stored in VPD EEPROM.
516 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000517int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000518{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000519 u32 cclk_param, cclk_val;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000520 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000521 int ec, sn;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000522 u8 vpd[VPD_LEN], csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000523 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000524
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000525 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000526 if (ret < 0)
527 return ret;
528
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000529 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
530 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
531 return -EINVAL;
532 }
533
534 id_len = pci_vpd_lrdt_size(vpd);
535 if (id_len > ID_LEN)
536 id_len = ID_LEN;
537
538 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
539 if (i < 0) {
540 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
541 return -EINVAL;
542 }
543
544 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
545 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
546 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000547 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
548 return -EINVAL;
549 }
550
551#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000552 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000553 if (var < 0) { \
554 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
555 return -EINVAL; \
556 } \
557 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
558} while (0)
559
560 FIND_VPD_KW(i, "RV");
561 for (csum = 0; i >= 0; i--)
562 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000563
564 if (csum) {
565 dev_err(adapter->pdev_dev,
566 "corrupted VPD EEPROM, actual csum %u\n", csum);
567 return -EINVAL;
568 }
569
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000570 FIND_VPD_KW(ec, "EC");
571 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000572#undef FIND_VPD_KW
573
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000574 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000575 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000576 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000577 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000578 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
579 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000580 strim(p->sn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000581
582 /*
583 * Ask firmware for the Core Clock since it knows how to translate the
584 * Reference Clock ('V2') VPD field into a Core Clock value ...
585 */
586 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
587 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
588 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
589 1, &cclk_param, &cclk_val);
590 if (ret)
591 return ret;
592 p->cclk = cclk_val;
593
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000594 return 0;
595}
596
597/* serial flash and firmware constants */
598enum {
599 SF_ATTEMPTS = 10, /* max retries for SF operations */
600
601 /* flash command opcodes */
602 SF_PROG_PAGE = 2, /* program page */
603 SF_WR_DISABLE = 4, /* disable writes */
604 SF_RD_STATUS = 5, /* read status register */
605 SF_WR_ENABLE = 6, /* enable writes */
606 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000607 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000608 SF_ERASE_SECTOR = 0xd8, /* erase sector */
609
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000610 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000611};
612
613/**
614 * sf1_read - read data from the serial flash
615 * @adapter: the adapter
616 * @byte_cnt: number of bytes to read
617 * @cont: whether another operation will be chained
618 * @lock: whether to lock SF for PL access only
619 * @valp: where to store the read data
620 *
621 * Reads up to 4 bytes of data from the serial flash. The location of
622 * the read needs to be specified prior to calling this by issuing the
623 * appropriate commands to the serial flash.
624 */
625static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
626 int lock, u32 *valp)
627{
628 int ret;
629
630 if (!byte_cnt || byte_cnt > 4)
631 return -EINVAL;
632 if (t4_read_reg(adapter, SF_OP) & BUSY)
633 return -EBUSY;
634 cont = cont ? SF_CONT : 0;
635 lock = lock ? SF_LOCK : 0;
636 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
637 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
638 if (!ret)
639 *valp = t4_read_reg(adapter, SF_DATA);
640 return ret;
641}
642
643/**
644 * sf1_write - write data to the serial flash
645 * @adapter: the adapter
646 * @byte_cnt: number of bytes to write
647 * @cont: whether another operation will be chained
648 * @lock: whether to lock SF for PL access only
649 * @val: value to write
650 *
651 * Writes up to 4 bytes of data to the serial flash. The location of
652 * the write needs to be specified prior to calling this by issuing the
653 * appropriate commands to the serial flash.
654 */
655static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
656 int lock, u32 val)
657{
658 if (!byte_cnt || byte_cnt > 4)
659 return -EINVAL;
660 if (t4_read_reg(adapter, SF_OP) & BUSY)
661 return -EBUSY;
662 cont = cont ? SF_CONT : 0;
663 lock = lock ? SF_LOCK : 0;
664 t4_write_reg(adapter, SF_DATA, val);
665 t4_write_reg(adapter, SF_OP, lock |
666 cont | BYTECNT(byte_cnt - 1) | OP_WR);
667 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
668}
669
670/**
671 * flash_wait_op - wait for a flash operation to complete
672 * @adapter: the adapter
673 * @attempts: max number of polls of the status register
674 * @delay: delay between polls in ms
675 *
676 * Wait for a flash operation to complete by polling the status register.
677 */
678static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
679{
680 int ret;
681 u32 status;
682
683 while (1) {
684 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
685 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
686 return ret;
687 if (!(status & 1))
688 return 0;
689 if (--attempts == 0)
690 return -EAGAIN;
691 if (delay)
692 msleep(delay);
693 }
694}
695
696/**
697 * t4_read_flash - read words from serial flash
698 * @adapter: the adapter
699 * @addr: the start address for the read
700 * @nwords: how many 32-bit words to read
701 * @data: where to store the read data
702 * @byte_oriented: whether to store data as bytes or as words
703 *
704 * Read the specified number of 32-bit words from the serial flash.
705 * If @byte_oriented is set the read data is stored as a byte array
706 * (i.e., big-endian), otherwise as 32-bit words in the platform's
707 * natural endianess.
708 */
Roland Dreierde498c82010-04-21 08:59:17 +0000709static int t4_read_flash(struct adapter *adapter, unsigned int addr,
710 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000711{
712 int ret;
713
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000714 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000715 return -EINVAL;
716
717 addr = swab32(addr) | SF_RD_DATA_FAST;
718
719 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
720 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
721 return ret;
722
723 for ( ; nwords; nwords--, data++) {
724 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
725 if (nwords == 1)
726 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
727 if (ret)
728 return ret;
729 if (byte_oriented)
730 *data = htonl(*data);
731 }
732 return 0;
733}
734
735/**
736 * t4_write_flash - write up to a page of data to the serial flash
737 * @adapter: the adapter
738 * @addr: the start address to write
739 * @n: length of data to write in bytes
740 * @data: the data to write
741 *
742 * Writes up to a page of data (256 bytes) to the serial flash starting
743 * at the given address. All the data must be written to the same page.
744 */
745static int t4_write_flash(struct adapter *adapter, unsigned int addr,
746 unsigned int n, const u8 *data)
747{
748 int ret;
749 u32 buf[64];
750 unsigned int i, c, left, val, offset = addr & 0xff;
751
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000752 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000753 return -EINVAL;
754
755 val = swab32(addr) | SF_PROG_PAGE;
756
757 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
758 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
759 goto unlock;
760
761 for (left = n; left; left -= c) {
762 c = min(left, 4U);
763 for (val = 0, i = 0; i < c; ++i)
764 val = (val << 8) + *data++;
765
766 ret = sf1_write(adapter, c, c != left, 1, val);
767 if (ret)
768 goto unlock;
769 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000770 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000771 if (ret)
772 goto unlock;
773
774 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
775
776 /* Read the page to verify the write succeeded */
777 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
778 if (ret)
779 return ret;
780
781 if (memcmp(data - n, (u8 *)buf + offset, n)) {
782 dev_err(adapter->pdev_dev,
783 "failed to correctly write the flash page at %#x\n",
784 addr);
785 return -EIO;
786 }
787 return 0;
788
789unlock:
790 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
791 return ret;
792}
793
794/**
795 * get_fw_version - read the firmware version
796 * @adapter: the adapter
797 * @vers: where to place the version
798 *
799 * Reads the FW version from flash.
800 */
801static int get_fw_version(struct adapter *adapter, u32 *vers)
802{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000803 return t4_read_flash(adapter, adapter->params.sf_fw_start +
804 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000805}
806
807/**
808 * get_tp_version - read the TP microcode version
809 * @adapter: the adapter
810 * @vers: where to place the version
811 *
812 * Reads the TP microcode version from flash.
813 */
814static int get_tp_version(struct adapter *adapter, u32 *vers)
815{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000816 return t4_read_flash(adapter, adapter->params.sf_fw_start +
817 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000818 1, vers, 0);
819}
820
821/**
822 * t4_check_fw_version - check if the FW is compatible with this driver
823 * @adapter: the adapter
824 *
825 * Checks if an adapter's FW is compatible with the driver. Returns 0
826 * if there's exact match, a negative error if the version could not be
827 * read or there's a major version mismatch, and a positive value if the
828 * expected major version is found but there's a minor version mismatch.
829 */
830int t4_check_fw_version(struct adapter *adapter)
831{
832 u32 api_vers[2];
833 int ret, major, minor, micro;
834
835 ret = get_fw_version(adapter, &adapter->params.fw_vers);
836 if (!ret)
837 ret = get_tp_version(adapter, &adapter->params.tp_vers);
838 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000839 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
840 offsetof(struct fw_hdr, intfver_nic),
841 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000842 if (ret)
843 return ret;
844
845 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
846 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
847 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
848 memcpy(adapter->params.api_vers, api_vers,
849 sizeof(adapter->params.api_vers));
850
851 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
852 dev_err(adapter->pdev_dev,
853 "card FW has major version %u, driver wants %u\n",
854 major, FW_VERSION_MAJOR);
855 return -EINVAL;
856 }
857
858 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
859 return 0; /* perfect match */
860
861 /* Minor/micro version mismatch. Report it but often it's OK. */
862 return 1;
863}
864
865/**
866 * t4_flash_erase_sectors - erase a range of flash sectors
867 * @adapter: the adapter
868 * @start: the first sector to erase
869 * @end: the last sector to erase
870 *
871 * Erases the sectors in the given inclusive range.
872 */
873static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
874{
875 int ret = 0;
876
877 while (start <= end) {
878 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
879 (ret = sf1_write(adapter, 4, 0, 1,
880 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000881 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000882 dev_err(adapter->pdev_dev,
883 "erase of flash sector %d failed, error %d\n",
884 start, ret);
885 break;
886 }
887 start++;
888 }
889 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
890 return ret;
891}
892
893/**
Vipul Pandya636f9d32012-09-26 02:39:39 +0000894 * t4_flash_cfg_addr - return the address of the flash configuration file
895 * @adapter: the adapter
896 *
897 * Return the address within the flash where the Firmware Configuration
898 * File is stored.
899 */
900unsigned int t4_flash_cfg_addr(struct adapter *adapter)
901{
902 if (adapter->params.sf_size == 0x100000)
903 return FLASH_FPGA_CFG_START;
904 else
905 return FLASH_CFG_START;
906}
907
908/**
909 * t4_load_cfg - download config file
910 * @adap: the adapter
911 * @cfg_data: the cfg text file to write
912 * @size: text file size
913 *
914 * Write the supplied config text file to the card's serial flash.
915 */
916int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
917{
918 int ret, i, n;
919 unsigned int addr;
920 unsigned int flash_cfg_start_sec;
921 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
922
923 addr = t4_flash_cfg_addr(adap);
924 flash_cfg_start_sec = addr / SF_SEC_SIZE;
925
926 if (size > FLASH_CFG_MAX_SIZE) {
927 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
928 FLASH_CFG_MAX_SIZE);
929 return -EFBIG;
930 }
931
932 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
933 sf_sec_size);
934 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
935 flash_cfg_start_sec + i - 1);
936 /*
937 * If size == 0 then we're simply erasing the FLASH sectors associated
938 * with the on-adapter Firmware Configuration File.
939 */
940 if (ret || size == 0)
941 goto out;
942
943 /* this will write to the flash up to SF_PAGE_SIZE at a time */
944 for (i = 0; i < size; i += SF_PAGE_SIZE) {
945 if ((size - i) < SF_PAGE_SIZE)
946 n = size - i;
947 else
948 n = SF_PAGE_SIZE;
949 ret = t4_write_flash(adap, addr, n, cfg_data);
950 if (ret)
951 goto out;
952
953 addr += SF_PAGE_SIZE;
954 cfg_data += SF_PAGE_SIZE;
955 }
956
957out:
958 if (ret)
959 dev_err(adap->pdev_dev, "config file %s failed %d\n",
960 (size == 0 ? "clear" : "download"), ret);
961 return ret;
962}
963
964/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000965 * t4_load_fw - download firmware
966 * @adap: the adapter
967 * @fw_data: the firmware image to write
968 * @size: image size
969 *
970 * Write the supplied firmware image to the card's serial flash.
971 */
972int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
973{
974 u32 csum;
975 int ret, addr;
976 unsigned int i;
977 u8 first_page[SF_PAGE_SIZE];
978 const u32 *p = (const u32 *)fw_data;
979 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000980 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
981 unsigned int fw_img_start = adap->params.sf_fw_start;
982 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000983
984 if (!size) {
985 dev_err(adap->pdev_dev, "FW image has no data\n");
986 return -EINVAL;
987 }
988 if (size & 511) {
989 dev_err(adap->pdev_dev,
990 "FW image size not multiple of 512 bytes\n");
991 return -EINVAL;
992 }
993 if (ntohs(hdr->len512) * 512 != size) {
994 dev_err(adap->pdev_dev,
995 "FW image size differs from size in FW header\n");
996 return -EINVAL;
997 }
998 if (size > FW_MAX_SIZE) {
999 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1000 FW_MAX_SIZE);
1001 return -EFBIG;
1002 }
1003
1004 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1005 csum += ntohl(p[i]);
1006
1007 if (csum != 0xffffffff) {
1008 dev_err(adap->pdev_dev,
1009 "corrupted firmware image, checksum %#x\n", csum);
1010 return -EINVAL;
1011 }
1012
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001013 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1014 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001015 if (ret)
1016 goto out;
1017
1018 /*
1019 * We write the correct version at the end so the driver can see a bad
1020 * version if the FW write fails. Start by writing a copy of the
1021 * first page with a bad version.
1022 */
1023 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1024 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001025 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001026 if (ret)
1027 goto out;
1028
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001029 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001030 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1031 addr += SF_PAGE_SIZE;
1032 fw_data += SF_PAGE_SIZE;
1033 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1034 if (ret)
1035 goto out;
1036 }
1037
1038 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001039 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001040 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1041out:
1042 if (ret)
1043 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1044 ret);
1045 return ret;
1046}
1047
1048#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1049 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1050
1051/**
1052 * t4_link_start - apply link configuration to MAC/PHY
1053 * @phy: the PHY to setup
1054 * @mac: the MAC to setup
1055 * @lc: the requested link configuration
1056 *
1057 * Set up a port's MAC and PHY according to a desired link configuration.
1058 * - If the PHY can auto-negotiate first decide what to advertise, then
1059 * enable/disable auto-negotiation as desired, and reset.
1060 * - If the PHY does not auto-negotiate just reset it.
1061 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1062 * otherwise do it later based on the outcome of auto-negotiation.
1063 */
1064int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1065 struct link_config *lc)
1066{
1067 struct fw_port_cmd c;
1068 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1069
1070 lc->link_ok = 0;
1071 if (lc->requested_fc & PAUSE_RX)
1072 fc |= FW_PORT_CAP_FC_RX;
1073 if (lc->requested_fc & PAUSE_TX)
1074 fc |= FW_PORT_CAP_FC_TX;
1075
1076 memset(&c, 0, sizeof(c));
1077 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1078 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1079 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1080 FW_LEN16(c));
1081
1082 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1083 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1084 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1085 } else if (lc->autoneg == AUTONEG_DISABLE) {
1086 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1087 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1088 } else
1089 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1090
1091 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1092}
1093
1094/**
1095 * t4_restart_aneg - restart autonegotiation
1096 * @adap: the adapter
1097 * @mbox: mbox to use for the FW command
1098 * @port: the port id
1099 *
1100 * Restarts autonegotiation for the selected port.
1101 */
1102int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1103{
1104 struct fw_port_cmd c;
1105
1106 memset(&c, 0, sizeof(c));
1107 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1108 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1109 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1110 FW_LEN16(c));
1111 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1112 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1113}
1114
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301115typedef void (*int_handler_t)(struct adapter *adap);
1116
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001117struct intr_info {
1118 unsigned int mask; /* bits to check in interrupt status */
1119 const char *msg; /* message to print or NULL */
1120 short stat_idx; /* stat counter to increment or -1 */
1121 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301122 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001123};
1124
1125/**
1126 * t4_handle_intr_status - table driven interrupt handler
1127 * @adapter: the adapter that generated the interrupt
1128 * @reg: the interrupt status register to process
1129 * @acts: table of interrupt actions
1130 *
1131 * A table driven interrupt handler that applies a set of masks to an
1132 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001133 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001134 * optionally emitting a warning or alert message. The table is terminated
1135 * by an entry specifying mask 0. Returns the number of fatal interrupt
1136 * conditions.
1137 */
1138static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1139 const struct intr_info *acts)
1140{
1141 int fatal = 0;
1142 unsigned int mask = 0;
1143 unsigned int status = t4_read_reg(adapter, reg);
1144
1145 for ( ; acts->mask; ++acts) {
1146 if (!(status & acts->mask))
1147 continue;
1148 if (acts->fatal) {
1149 fatal++;
1150 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1151 status & acts->mask);
1152 } else if (acts->msg && printk_ratelimit())
1153 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1154 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301155 if (acts->int_handler)
1156 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001157 mask |= acts->mask;
1158 }
1159 status &= mask;
1160 if (status) /* clear processed interrupts */
1161 t4_write_reg(adapter, reg, status);
1162 return fatal;
1163}
1164
1165/*
1166 * Interrupt handler for the PCIE module.
1167 */
1168static void pcie_intr_handler(struct adapter *adapter)
1169{
Joe Perches005b5712010-12-14 21:36:53 +00001170 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001171 { RNPP, "RXNP array parity error", -1, 1 },
1172 { RPCP, "RXPC array parity error", -1, 1 },
1173 { RCIP, "RXCIF array parity error", -1, 1 },
1174 { RCCP, "Rx completions control array parity error", -1, 1 },
1175 { RFTP, "RXFT array parity error", -1, 1 },
1176 { 0 }
1177 };
Joe Perches005b5712010-12-14 21:36:53 +00001178 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001179 { TPCP, "TXPC array parity error", -1, 1 },
1180 { TNPP, "TXNP array parity error", -1, 1 },
1181 { TFTP, "TXFT array parity error", -1, 1 },
1182 { TCAP, "TXCA array parity error", -1, 1 },
1183 { TCIP, "TXCIF array parity error", -1, 1 },
1184 { RCAP, "RXCA array parity error", -1, 1 },
1185 { OTDD, "outbound request TLP discarded", -1, 1 },
1186 { RDPE, "Rx data parity error", -1, 1 },
1187 { TDUE, "Tx uncorrectable data error", -1, 1 },
1188 { 0 }
1189 };
Joe Perches005b5712010-12-14 21:36:53 +00001190 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001191 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1192 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1193 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1194 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1195 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1196 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1197 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1198 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1199 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1200 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1201 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1202 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1203 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1204 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1205 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1206 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1207 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1208 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1209 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1210 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1211 { FIDPERR, "PCI FID parity error", -1, 1 },
1212 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1213 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1214 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1215 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1216 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1217 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1218 { PCIESINT, "PCI core secondary fault", -1, 1 },
1219 { PCIEPINT, "PCI core primary fault", -1, 1 },
1220 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1221 { 0 }
1222 };
1223
1224 int fat;
1225
1226 fat = t4_handle_intr_status(adapter,
1227 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1228 sysbus_intr_info) +
1229 t4_handle_intr_status(adapter,
1230 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1231 pcie_port_intr_info) +
1232 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1233 if (fat)
1234 t4_fatal_err(adapter);
1235}
1236
1237/*
1238 * TP interrupt handler.
1239 */
1240static void tp_intr_handler(struct adapter *adapter)
1241{
Joe Perches005b5712010-12-14 21:36:53 +00001242 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001243 { 0x3fffffff, "TP parity error", -1, 1 },
1244 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1245 { 0 }
1246 };
1247
1248 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1249 t4_fatal_err(adapter);
1250}
1251
1252/*
1253 * SGE interrupt handler.
1254 */
1255static void sge_intr_handler(struct adapter *adapter)
1256{
1257 u64 v;
1258
Joe Perches005b5712010-12-14 21:36:53 +00001259 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001260 { ERR_CPL_EXCEED_IQE_SIZE,
1261 "SGE received CPL exceeding IQE size", -1, 1 },
1262 { ERR_INVALID_CIDX_INC,
1263 "SGE GTS CIDX increment too large", -1, 0 },
1264 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001265 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1266 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1267 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001268 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1269 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1270 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1271 0 },
1272 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1273 0 },
1274 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1275 0 },
1276 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1277 0 },
1278 { ERR_ING_CTXT_PRIO,
1279 "SGE too many priority ingress contexts", -1, 0 },
1280 { ERR_EGR_CTXT_PRIO,
1281 "SGE too many priority egress contexts", -1, 0 },
1282 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1283 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1284 { 0 }
1285 };
1286
1287 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301288 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001289 if (v) {
1290 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301291 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001292 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1293 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1294 }
1295
1296 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1297 v != 0)
1298 t4_fatal_err(adapter);
1299}
1300
1301/*
1302 * CIM interrupt handler.
1303 */
1304static void cim_intr_handler(struct adapter *adapter)
1305{
Joe Perches005b5712010-12-14 21:36:53 +00001306 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001307 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1308 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1309 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1310 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1311 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1312 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1313 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1314 { 0 }
1315 };
Joe Perches005b5712010-12-14 21:36:53 +00001316 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001317 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1318 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1319 { ILLWRINT, "CIM illegal write", -1, 1 },
1320 { ILLRDINT, "CIM illegal read", -1, 1 },
1321 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1322 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1323 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1324 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1325 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1326 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1327 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1328 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1329 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1330 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1331 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1332 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1333 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1334 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1335 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1336 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1337 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1338 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1339 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1340 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1341 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1342 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1343 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1344 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1345 { 0 }
1346 };
1347
1348 int fat;
1349
1350 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1351 cim_intr_info) +
1352 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1353 cim_upintr_info);
1354 if (fat)
1355 t4_fatal_err(adapter);
1356}
1357
1358/*
1359 * ULP RX interrupt handler.
1360 */
1361static void ulprx_intr_handler(struct adapter *adapter)
1362{
Joe Perches005b5712010-12-14 21:36:53 +00001363 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001364 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001365 { 0x7fffff, "ULPRX parity error", -1, 1 },
1366 { 0 }
1367 };
1368
1369 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1370 t4_fatal_err(adapter);
1371}
1372
1373/*
1374 * ULP TX interrupt handler.
1375 */
1376static void ulptx_intr_handler(struct adapter *adapter)
1377{
Joe Perches005b5712010-12-14 21:36:53 +00001378 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001379 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1380 0 },
1381 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1382 0 },
1383 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1384 0 },
1385 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1386 0 },
1387 { 0xfffffff, "ULPTX parity error", -1, 1 },
1388 { 0 }
1389 };
1390
1391 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1392 t4_fatal_err(adapter);
1393}
1394
1395/*
1396 * PM TX interrupt handler.
1397 */
1398static void pmtx_intr_handler(struct adapter *adapter)
1399{
Joe Perches005b5712010-12-14 21:36:53 +00001400 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001401 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1402 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1403 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1404 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1405 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1406 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1407 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1408 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1409 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1410 { 0 }
1411 };
1412
1413 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1414 t4_fatal_err(adapter);
1415}
1416
1417/*
1418 * PM RX interrupt handler.
1419 */
1420static void pmrx_intr_handler(struct adapter *adapter)
1421{
Joe Perches005b5712010-12-14 21:36:53 +00001422 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001423 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1424 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1425 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1426 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1427 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1428 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1429 { 0 }
1430 };
1431
1432 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1433 t4_fatal_err(adapter);
1434}
1435
1436/*
1437 * CPL switch interrupt handler.
1438 */
1439static void cplsw_intr_handler(struct adapter *adapter)
1440{
Joe Perches005b5712010-12-14 21:36:53 +00001441 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001442 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1443 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1444 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1445 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1446 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1447 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1448 { 0 }
1449 };
1450
1451 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1452 t4_fatal_err(adapter);
1453}
1454
1455/*
1456 * LE interrupt handler.
1457 */
1458static void le_intr_handler(struct adapter *adap)
1459{
Joe Perches005b5712010-12-14 21:36:53 +00001460 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001461 { LIPMISS, "LE LIP miss", -1, 0 },
1462 { LIP0, "LE 0 LIP error", -1, 0 },
1463 { PARITYERR, "LE parity error", -1, 1 },
1464 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1465 { REQQPARERR, "LE request queue parity error", -1, 1 },
1466 { 0 }
1467 };
1468
1469 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1470 t4_fatal_err(adap);
1471}
1472
1473/*
1474 * MPS interrupt handler.
1475 */
1476static void mps_intr_handler(struct adapter *adapter)
1477{
Joe Perches005b5712010-12-14 21:36:53 +00001478 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001479 { 0xffffff, "MPS Rx parity error", -1, 1 },
1480 { 0 }
1481 };
Joe Perches005b5712010-12-14 21:36:53 +00001482 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001483 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1484 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1485 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1486 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1487 { BUBBLE, "MPS Tx underflow", -1, 1 },
1488 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1489 { FRMERR, "MPS Tx framing error", -1, 1 },
1490 { 0 }
1491 };
Joe Perches005b5712010-12-14 21:36:53 +00001492 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001493 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1494 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1495 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1496 { 0 }
1497 };
Joe Perches005b5712010-12-14 21:36:53 +00001498 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001499 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1500 { 0 }
1501 };
Joe Perches005b5712010-12-14 21:36:53 +00001502 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001503 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1504 { 0 }
1505 };
Joe Perches005b5712010-12-14 21:36:53 +00001506 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001507 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1508 { 0 }
1509 };
Joe Perches005b5712010-12-14 21:36:53 +00001510 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001511 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1512 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1513 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1514 { 0 }
1515 };
1516
1517 int fat;
1518
1519 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1520 mps_rx_intr_info) +
1521 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1522 mps_tx_intr_info) +
1523 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1524 mps_trc_intr_info) +
1525 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1526 mps_stat_sram_intr_info) +
1527 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1528 mps_stat_tx_intr_info) +
1529 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1530 mps_stat_rx_intr_info) +
1531 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1532 mps_cls_intr_info);
1533
1534 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1535 RXINT | TXINT | STATINT);
1536 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1537 if (fat)
1538 t4_fatal_err(adapter);
1539}
1540
1541#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1542
1543/*
1544 * EDC/MC interrupt handler.
1545 */
1546static void mem_intr_handler(struct adapter *adapter, int idx)
1547{
1548 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1549
1550 unsigned int addr, cnt_addr, v;
1551
1552 if (idx <= MEM_EDC1) {
1553 addr = EDC_REG(EDC_INT_CAUSE, idx);
1554 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1555 } else {
1556 addr = MC_INT_CAUSE;
1557 cnt_addr = MC_ECC_STATUS;
1558 }
1559
1560 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1561 if (v & PERR_INT_CAUSE)
1562 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1563 name[idx]);
1564 if (v & ECC_CE_INT_CAUSE) {
1565 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1566
1567 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1568 if (printk_ratelimit())
1569 dev_warn(adapter->pdev_dev,
1570 "%u %s correctable ECC data error%s\n",
1571 cnt, name[idx], cnt > 1 ? "s" : "");
1572 }
1573 if (v & ECC_UE_INT_CAUSE)
1574 dev_alert(adapter->pdev_dev,
1575 "%s uncorrectable ECC data error\n", name[idx]);
1576
1577 t4_write_reg(adapter, addr, v);
1578 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1579 t4_fatal_err(adapter);
1580}
1581
1582/*
1583 * MA interrupt handler.
1584 */
1585static void ma_intr_handler(struct adapter *adap)
1586{
1587 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1588
1589 if (status & MEM_PERR_INT_CAUSE)
1590 dev_alert(adap->pdev_dev,
1591 "MA parity error, parity status %#x\n",
1592 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1593 if (status & MEM_WRAP_INT_CAUSE) {
1594 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1595 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1596 "client %u to address %#x\n",
1597 MEM_WRAP_CLIENT_NUM_GET(v),
1598 MEM_WRAP_ADDRESS_GET(v) << 4);
1599 }
1600 t4_write_reg(adap, MA_INT_CAUSE, status);
1601 t4_fatal_err(adap);
1602}
1603
1604/*
1605 * SMB interrupt handler.
1606 */
1607static void smb_intr_handler(struct adapter *adap)
1608{
Joe Perches005b5712010-12-14 21:36:53 +00001609 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001610 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1611 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1612 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1613 { 0 }
1614 };
1615
1616 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1617 t4_fatal_err(adap);
1618}
1619
1620/*
1621 * NC-SI interrupt handler.
1622 */
1623static void ncsi_intr_handler(struct adapter *adap)
1624{
Joe Perches005b5712010-12-14 21:36:53 +00001625 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001626 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1627 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1628 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1629 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1630 { 0 }
1631 };
1632
1633 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1634 t4_fatal_err(adap);
1635}
1636
1637/*
1638 * XGMAC interrupt handler.
1639 */
1640static void xgmac_intr_handler(struct adapter *adap, int port)
1641{
1642 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1643
1644 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1645 if (!v)
1646 return;
1647
1648 if (v & TXFIFO_PRTY_ERR)
1649 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1650 port);
1651 if (v & RXFIFO_PRTY_ERR)
1652 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1653 port);
1654 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1655 t4_fatal_err(adap);
1656}
1657
1658/*
1659 * PL interrupt handler.
1660 */
1661static void pl_intr_handler(struct adapter *adap)
1662{
Joe Perches005b5712010-12-14 21:36:53 +00001663 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001664 { FATALPERR, "T4 fatal parity error", -1, 1 },
1665 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1666 { 0 }
1667 };
1668
1669 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1670 t4_fatal_err(adap);
1671}
1672
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001673#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001674#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1675 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1676 CPL_SWITCH | SGE | ULP_TX)
1677
1678/**
1679 * t4_slow_intr_handler - control path interrupt handler
1680 * @adapter: the adapter
1681 *
1682 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1683 * The designation 'slow' is because it involves register reads, while
1684 * data interrupts typically don't involve any MMIOs.
1685 */
1686int t4_slow_intr_handler(struct adapter *adapter)
1687{
1688 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1689
1690 if (!(cause & GLBL_INTR_MASK))
1691 return 0;
1692 if (cause & CIM)
1693 cim_intr_handler(adapter);
1694 if (cause & MPS)
1695 mps_intr_handler(adapter);
1696 if (cause & NCSI)
1697 ncsi_intr_handler(adapter);
1698 if (cause & PL)
1699 pl_intr_handler(adapter);
1700 if (cause & SMB)
1701 smb_intr_handler(adapter);
1702 if (cause & XGMAC0)
1703 xgmac_intr_handler(adapter, 0);
1704 if (cause & XGMAC1)
1705 xgmac_intr_handler(adapter, 1);
1706 if (cause & XGMAC_KR0)
1707 xgmac_intr_handler(adapter, 2);
1708 if (cause & XGMAC_KR1)
1709 xgmac_intr_handler(adapter, 3);
1710 if (cause & PCIE)
1711 pcie_intr_handler(adapter);
1712 if (cause & MC)
1713 mem_intr_handler(adapter, MEM_MC);
1714 if (cause & EDC0)
1715 mem_intr_handler(adapter, MEM_EDC0);
1716 if (cause & EDC1)
1717 mem_intr_handler(adapter, MEM_EDC1);
1718 if (cause & LE)
1719 le_intr_handler(adapter);
1720 if (cause & TP)
1721 tp_intr_handler(adapter);
1722 if (cause & MA)
1723 ma_intr_handler(adapter);
1724 if (cause & PM_TX)
1725 pmtx_intr_handler(adapter);
1726 if (cause & PM_RX)
1727 pmrx_intr_handler(adapter);
1728 if (cause & ULP_RX)
1729 ulprx_intr_handler(adapter);
1730 if (cause & CPL_SWITCH)
1731 cplsw_intr_handler(adapter);
1732 if (cause & SGE)
1733 sge_intr_handler(adapter);
1734 if (cause & ULP_TX)
1735 ulptx_intr_handler(adapter);
1736
1737 /* Clear the interrupts just processed for which we are the master. */
1738 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1739 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1740 return 1;
1741}
1742
1743/**
1744 * t4_intr_enable - enable interrupts
1745 * @adapter: the adapter whose interrupts should be enabled
1746 *
1747 * Enable PF-specific interrupts for the calling function and the top-level
1748 * interrupt concentrator for global interrupts. Interrupts are already
1749 * enabled at each module, here we just enable the roots of the interrupt
1750 * hierarchies.
1751 *
1752 * Note: this function should be called only when the driver manages
1753 * non PF-specific interrupts from the various HW modules. Only one PCI
1754 * function at a time should be doing this.
1755 */
1756void t4_intr_enable(struct adapter *adapter)
1757{
1758 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1759
1760 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1761 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1762 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1763 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1764 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1765 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1766 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001767 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001768 EGRESS_SIZE_ERR);
1769 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1770 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1771}
1772
1773/**
1774 * t4_intr_disable - disable interrupts
1775 * @adapter: the adapter whose interrupts should be disabled
1776 *
1777 * Disable interrupts. We only disable the top-level interrupt
1778 * concentrators. The caller must be a PCI function managing global
1779 * interrupts.
1780 */
1781void t4_intr_disable(struct adapter *adapter)
1782{
1783 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1784
1785 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1786 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1787}
1788
1789/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001790 * hash_mac_addr - return the hash value of a MAC address
1791 * @addr: the 48-bit Ethernet MAC address
1792 *
1793 * Hashes a MAC address according to the hash function used by HW inexact
1794 * (hash) address matching.
1795 */
1796static int hash_mac_addr(const u8 *addr)
1797{
1798 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1799 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1800 a ^= b;
1801 a ^= (a >> 12);
1802 a ^= (a >> 6);
1803 return a & 0x3f;
1804}
1805
1806/**
1807 * t4_config_rss_range - configure a portion of the RSS mapping table
1808 * @adapter: the adapter
1809 * @mbox: mbox to use for the FW command
1810 * @viid: virtual interface whose RSS subtable is to be written
1811 * @start: start entry in the table to write
1812 * @n: how many table entries to write
1813 * @rspq: values for the response queue lookup table
1814 * @nrspq: number of values in @rspq
1815 *
1816 * Programs the selected part of the VI's RSS mapping table with the
1817 * provided values. If @nrspq < @n the supplied values are used repeatedly
1818 * until the full table range is populated.
1819 *
1820 * The caller must ensure the values in @rspq are in the range allowed for
1821 * @viid.
1822 */
1823int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1824 int start, int n, const u16 *rspq, unsigned int nrspq)
1825{
1826 int ret;
1827 const u16 *rsp = rspq;
1828 const u16 *rsp_end = rspq + nrspq;
1829 struct fw_rss_ind_tbl_cmd cmd;
1830
1831 memset(&cmd, 0, sizeof(cmd));
1832 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1833 FW_CMD_REQUEST | FW_CMD_WRITE |
1834 FW_RSS_IND_TBL_CMD_VIID(viid));
1835 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1836
1837 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1838 while (n > 0) {
1839 int nq = min(n, 32);
1840 __be32 *qp = &cmd.iq0_to_iq2;
1841
1842 cmd.niqid = htons(nq);
1843 cmd.startidx = htons(start);
1844
1845 start += nq;
1846 n -= nq;
1847
1848 while (nq > 0) {
1849 unsigned int v;
1850
1851 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1852 if (++rsp >= rsp_end)
1853 rsp = rspq;
1854 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1855 if (++rsp >= rsp_end)
1856 rsp = rspq;
1857 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1858 if (++rsp >= rsp_end)
1859 rsp = rspq;
1860
1861 *qp++ = htonl(v);
1862 nq -= 3;
1863 }
1864
1865 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1866 if (ret)
1867 return ret;
1868 }
1869 return 0;
1870}
1871
1872/**
1873 * t4_config_glbl_rss - configure the global RSS mode
1874 * @adapter: the adapter
1875 * @mbox: mbox to use for the FW command
1876 * @mode: global RSS mode
1877 * @flags: mode-specific flags
1878 *
1879 * Sets the global RSS mode.
1880 */
1881int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1882 unsigned int flags)
1883{
1884 struct fw_rss_glb_config_cmd c;
1885
1886 memset(&c, 0, sizeof(c));
1887 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1888 FW_CMD_REQUEST | FW_CMD_WRITE);
1889 c.retval_len16 = htonl(FW_LEN16(c));
1890 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1891 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1892 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1893 c.u.basicvirtual.mode_pkd =
1894 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1895 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1896 } else
1897 return -EINVAL;
1898 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1899}
1900
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001901/**
1902 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1903 * @adap: the adapter
1904 * @v4: holds the TCP/IP counter values
1905 * @v6: holds the TCP/IPv6 counter values
1906 *
1907 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1908 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1909 */
1910void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1911 struct tp_tcp_stats *v6)
1912{
1913 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1914
1915#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1916#define STAT(x) val[STAT_IDX(x)]
1917#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1918
1919 if (v4) {
1920 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1921 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1922 v4->tcpOutRsts = STAT(OUT_RST);
1923 v4->tcpInSegs = STAT64(IN_SEG);
1924 v4->tcpOutSegs = STAT64(OUT_SEG);
1925 v4->tcpRetransSegs = STAT64(RXT_SEG);
1926 }
1927 if (v6) {
1928 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1929 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1930 v6->tcpOutRsts = STAT(OUT_RST);
1931 v6->tcpInSegs = STAT64(IN_SEG);
1932 v6->tcpOutSegs = STAT64(OUT_SEG);
1933 v6->tcpRetransSegs = STAT64(RXT_SEG);
1934 }
1935#undef STAT64
1936#undef STAT
1937#undef STAT_IDX
1938}
1939
1940/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001941 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1942 * @adap: the adapter
1943 * @mtus: where to store the MTU values
1944 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1945 *
1946 * Reads the HW path MTU table.
1947 */
1948void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1949{
1950 u32 v;
1951 int i;
1952
1953 for (i = 0; i < NMTUS; ++i) {
1954 t4_write_reg(adap, TP_MTU_TABLE,
1955 MTUINDEX(0xff) | MTUVALUE(i));
1956 v = t4_read_reg(adap, TP_MTU_TABLE);
1957 mtus[i] = MTUVALUE_GET(v);
1958 if (mtu_log)
1959 mtu_log[i] = MTUWIDTH_GET(v);
1960 }
1961}
1962
1963/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001964 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1965 * @adap: the adapter
1966 * @addr: the indirect TP register address
1967 * @mask: specifies the field within the register to modify
1968 * @val: new value for the field
1969 *
1970 * Sets a field of an indirect TP register to the given value.
1971 */
1972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1973 unsigned int mask, unsigned int val)
1974{
1975 t4_write_reg(adap, TP_PIO_ADDR, addr);
1976 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1977 t4_write_reg(adap, TP_PIO_DATA, val);
1978}
1979
1980/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001981 * init_cong_ctrl - initialize congestion control parameters
1982 * @a: the alpha values for congestion control
1983 * @b: the beta values for congestion control
1984 *
1985 * Initialize the congestion control parameters.
1986 */
1987static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1988{
1989 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1990 a[9] = 2;
1991 a[10] = 3;
1992 a[11] = 4;
1993 a[12] = 5;
1994 a[13] = 6;
1995 a[14] = 7;
1996 a[15] = 8;
1997 a[16] = 9;
1998 a[17] = 10;
1999 a[18] = 14;
2000 a[19] = 17;
2001 a[20] = 21;
2002 a[21] = 25;
2003 a[22] = 30;
2004 a[23] = 35;
2005 a[24] = 45;
2006 a[25] = 60;
2007 a[26] = 80;
2008 a[27] = 100;
2009 a[28] = 200;
2010 a[29] = 300;
2011 a[30] = 400;
2012 a[31] = 500;
2013
2014 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2015 b[9] = b[10] = 1;
2016 b[11] = b[12] = 2;
2017 b[13] = b[14] = b[15] = b[16] = 3;
2018 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2019 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2020 b[28] = b[29] = 6;
2021 b[30] = b[31] = 7;
2022}
2023
2024/* The minimum additive increment value for the congestion control table */
2025#define CC_MIN_INCR 2U
2026
2027/**
2028 * t4_load_mtus - write the MTU and congestion control HW tables
2029 * @adap: the adapter
2030 * @mtus: the values for the MTU table
2031 * @alpha: the values for the congestion control alpha parameter
2032 * @beta: the values for the congestion control beta parameter
2033 *
2034 * Write the HW MTU table with the supplied MTUs and the high-speed
2035 * congestion control table with the supplied alpha, beta, and MTUs.
2036 * We write the two tables together because the additive increments
2037 * depend on the MTUs.
2038 */
2039void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2040 const unsigned short *alpha, const unsigned short *beta)
2041{
2042 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2043 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2044 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2045 28672, 40960, 57344, 81920, 114688, 163840, 229376
2046 };
2047
2048 unsigned int i, w;
2049
2050 for (i = 0; i < NMTUS; ++i) {
2051 unsigned int mtu = mtus[i];
2052 unsigned int log2 = fls(mtu);
2053
2054 if (!(mtu & ((1 << log2) >> 2))) /* round */
2055 log2--;
2056 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2057 MTUWIDTH(log2) | MTUVALUE(mtu));
2058
2059 for (w = 0; w < NCCTRL_WIN; ++w) {
2060 unsigned int inc;
2061
2062 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2063 CC_MIN_INCR);
2064
2065 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2066 (w << 16) | (beta[w] << 13) | inc);
2067 }
2068 }
2069}
2070
2071/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002072 * get_mps_bg_map - return the buffer groups associated with a port
2073 * @adap: the adapter
2074 * @idx: the port index
2075 *
2076 * Returns a bitmap indicating which MPS buffer groups are associated
2077 * with the given port. Bit i is set if buffer group i is used by the
2078 * port.
2079 */
2080static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2081{
2082 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2083
2084 if (n == 0)
2085 return idx == 0 ? 0xf : 0;
2086 if (n == 1)
2087 return idx < 2 ? (3 << (2 * idx)) : 0;
2088 return 1 << idx;
2089}
2090
2091/**
2092 * t4_get_port_stats - collect port statistics
2093 * @adap: the adapter
2094 * @idx: the port index
2095 * @p: the stats structure to fill
2096 *
2097 * Collect statistics related to the given port from HW.
2098 */
2099void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2100{
2101 u32 bgmap = get_mps_bg_map(adap, idx);
2102
2103#define GET_STAT(name) \
2104 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2105#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2106
2107 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2108 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2109 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2110 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2111 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2112 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2113 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2114 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2115 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2116 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2117 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2118 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2119 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2120 p->tx_drop = GET_STAT(TX_PORT_DROP);
2121 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2122 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2123 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2124 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2125 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2126 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2127 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2128 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2129 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2130
2131 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2132 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2133 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2134 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2135 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2136 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2137 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2138 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2139 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2140 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2141 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2142 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2143 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2144 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2145 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2146 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2147 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2148 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2149 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2150 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2151 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2152 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2153 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2154 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2155 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2156 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2157 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2158
2159 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2160 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2161 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2162 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2163 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2164 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2165 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2166 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2167
2168#undef GET_STAT
2169#undef GET_STAT_COM
2170}
2171
2172/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002173 * t4_wol_magic_enable - enable/disable magic packet WoL
2174 * @adap: the adapter
2175 * @port: the physical port index
2176 * @addr: MAC address expected in magic packets, %NULL to disable
2177 *
2178 * Enables/disables magic packet wake-on-LAN for the selected port.
2179 */
2180void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2181 const u8 *addr)
2182{
2183 if (addr) {
2184 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2185 (addr[2] << 24) | (addr[3] << 16) |
2186 (addr[4] << 8) | addr[5]);
2187 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2188 (addr[0] << 8) | addr[1]);
2189 }
2190 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2191 addr ? MAGICEN : 0);
2192}
2193
2194/**
2195 * t4_wol_pat_enable - enable/disable pattern-based WoL
2196 * @adap: the adapter
2197 * @port: the physical port index
2198 * @map: bitmap of which HW pattern filters to set
2199 * @mask0: byte mask for bytes 0-63 of a packet
2200 * @mask1: byte mask for bytes 64-127 of a packet
2201 * @crc: Ethernet CRC for selected bytes
2202 * @enable: enable/disable switch
2203 *
2204 * Sets the pattern filters indicated in @map to mask out the bytes
2205 * specified in @mask0/@mask1 in received packets and compare the CRC of
2206 * the resulting packet against @crc. If @enable is %true pattern-based
2207 * WoL is enabled, otherwise disabled.
2208 */
2209int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2210 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2211{
2212 int i;
2213
2214 if (!enable) {
2215 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2216 PATEN, 0);
2217 return 0;
2218 }
2219 if (map > 0xff)
2220 return -EINVAL;
2221
2222#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2223
2224 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2225 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2226 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2227
2228 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2229 if (!(map & 1))
2230 continue;
2231
2232 /* write byte masks */
2233 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2234 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2235 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2236 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2237 return -ETIMEDOUT;
2238
2239 /* write CRC */
2240 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2241 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2242 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2243 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2244 return -ETIMEDOUT;
2245 }
2246#undef EPIO_REG
2247
2248 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2249 return 0;
2250}
2251
2252#define INIT_CMD(var, cmd, rd_wr) do { \
2253 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2254 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2255 (var).retval_len16 = htonl(FW_LEN16(var)); \
2256} while (0)
2257
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302258int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2259 u32 addr, u32 val)
2260{
2261 struct fw_ldst_cmd c;
2262
2263 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002264 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2265 FW_CMD_WRITE |
2266 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302267 c.cycles_to_len16 = htonl(FW_LEN16(c));
2268 c.u.addrval.addr = htonl(addr);
2269 c.u.addrval.val = htonl(val);
2270
2271 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2272}
2273
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002274/**
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302275 * t4_mem_win_read_len - read memory through PCIE memory window
2276 * @adap: the adapter
2277 * @addr: address of first byte requested aligned on 32b.
2278 * @data: len bytes to hold the data read
2279 * @len: amount of data to read from window. Must be <=
2280 * MEMWIN0_APERATURE after adjusting for 16B alignment
2281 * requirements of the the memory window.
2282 *
2283 * Read len bytes of data from MC starting at @addr.
2284 */
2285int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2286{
2287 int i;
2288 int off;
2289
2290 /*
2291 * Align on a 16B boundary.
2292 */
2293 off = addr & 15;
2294 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2295 return -EINVAL;
2296
Vipul Pandya840f3002012-09-05 02:01:55 +00002297 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2298 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302299
2300 for (i = 0; i < len; i += 4)
2301 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2302
2303 return 0;
2304}
2305
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002306/**
2307 * t4_mdio_rd - read a PHY register through MDIO
2308 * @adap: the adapter
2309 * @mbox: mailbox to use for the FW command
2310 * @phy_addr: the PHY address
2311 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2312 * @reg: the register to read
2313 * @valp: where to store the value
2314 *
2315 * Issues a FW command through the given mailbox to read a PHY register.
2316 */
2317int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2318 unsigned int mmd, unsigned int reg, u16 *valp)
2319{
2320 int ret;
2321 struct fw_ldst_cmd c;
2322
2323 memset(&c, 0, sizeof(c));
2324 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2325 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2326 c.cycles_to_len16 = htonl(FW_LEN16(c));
2327 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2328 FW_LDST_CMD_MMD(mmd));
2329 c.u.mdio.raddr = htons(reg);
2330
2331 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2332 if (ret == 0)
2333 *valp = ntohs(c.u.mdio.rval);
2334 return ret;
2335}
2336
2337/**
2338 * t4_mdio_wr - write a PHY register through MDIO
2339 * @adap: the adapter
2340 * @mbox: mailbox to use for the FW command
2341 * @phy_addr: the PHY address
2342 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2343 * @reg: the register to write
2344 * @valp: value to write
2345 *
2346 * Issues a FW command through the given mailbox to write a PHY register.
2347 */
2348int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2349 unsigned int mmd, unsigned int reg, u16 val)
2350{
2351 struct fw_ldst_cmd c;
2352
2353 memset(&c, 0, sizeof(c));
2354 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2355 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2356 c.cycles_to_len16 = htonl(FW_LEN16(c));
2357 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2358 FW_LDST_CMD_MMD(mmd));
2359 c.u.mdio.raddr = htons(reg);
2360 c.u.mdio.rval = htons(val);
2361
2362 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2363}
2364
2365/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002366 * t4_fw_hello - establish communication with FW
2367 * @adap: the adapter
2368 * @mbox: mailbox to use for the FW command
2369 * @evt_mbox: mailbox to receive async FW events
2370 * @master: specifies the caller's willingness to be the device master
2371 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002372 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002373 * Issues a command to establish communication with FW. Returns either
2374 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002375 */
2376int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2377 enum dev_master master, enum dev_state *state)
2378{
2379 int ret;
2380 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002381 u32 v;
2382 unsigned int master_mbox;
2383 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002384
Vipul Pandya636f9d32012-09-26 02:39:39 +00002385retry:
2386 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002387 INIT_CMD(c, HELLO, WRITE);
2388 c.err_to_mbasyncnot = htonl(
2389 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2390 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002391 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2392 FW_HELLO_CMD_MBMASTER_MASK) |
2393 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2394 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2395 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002396
Vipul Pandya636f9d32012-09-26 02:39:39 +00002397 /*
2398 * Issue the HELLO command to the firmware. If it's not successful
2399 * but indicates that we got a "busy" or "timeout" condition, retry
2400 * the HELLO until we exhaust our retry limit.
2401 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002402 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002403 if (ret < 0) {
2404 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2405 goto retry;
2406 return ret;
2407 }
2408
2409 v = ntohl(c.err_to_mbasyncnot);
2410 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2411 if (state) {
2412 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002413 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002414 else if (v & FW_HELLO_CMD_INIT)
2415 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002416 else
2417 *state = DEV_STATE_UNINIT;
2418 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002419
2420 /*
2421 * If we're not the Master PF then we need to wait around for the
2422 * Master PF Driver to finish setting up the adapter.
2423 *
2424 * Note that we also do this wait if we're a non-Master-capable PF and
2425 * there is no current Master PF; a Master PF may show up momentarily
2426 * and we wouldn't want to fail pointlessly. (This can happen when an
2427 * OS loads lots of different drivers rapidly at the same time). In
2428 * this case, the Master PF returned by the firmware will be
2429 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2430 */
2431 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2432 master_mbox != mbox) {
2433 int waiting = FW_CMD_HELLO_TIMEOUT;
2434
2435 /*
2436 * Wait for the firmware to either indicate an error or
2437 * initialized state. If we see either of these we bail out
2438 * and report the issue to the caller. If we exhaust the
2439 * "hello timeout" and we haven't exhausted our retries, try
2440 * again. Otherwise bail with a timeout error.
2441 */
2442 for (;;) {
2443 u32 pcie_fw;
2444
2445 msleep(50);
2446 waiting -= 50;
2447
2448 /*
2449 * If neither Error nor Initialialized are indicated
2450 * by the firmware keep waiting till we exaust our
2451 * timeout ... and then retry if we haven't exhausted
2452 * our retries ...
2453 */
2454 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2455 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2456 if (waiting <= 0) {
2457 if (retries-- > 0)
2458 goto retry;
2459
2460 return -ETIMEDOUT;
2461 }
2462 continue;
2463 }
2464
2465 /*
2466 * We either have an Error or Initialized condition
2467 * report errors preferentially.
2468 */
2469 if (state) {
2470 if (pcie_fw & FW_PCIE_FW_ERR)
2471 *state = DEV_STATE_ERR;
2472 else if (pcie_fw & FW_PCIE_FW_INIT)
2473 *state = DEV_STATE_INIT;
2474 }
2475
2476 /*
2477 * If we arrived before a Master PF was selected and
2478 * there's not a valid Master PF, grab its identity
2479 * for our caller.
2480 */
2481 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2482 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2483 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2484 break;
2485 }
2486 }
2487
2488 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002489}
2490
2491/**
2492 * t4_fw_bye - end communication with FW
2493 * @adap: the adapter
2494 * @mbox: mailbox to use for the FW command
2495 *
2496 * Issues a command to terminate communication with FW.
2497 */
2498int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2499{
2500 struct fw_bye_cmd c;
2501
2502 INIT_CMD(c, BYE, WRITE);
2503 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2504}
2505
2506/**
2507 * t4_init_cmd - ask FW to initialize the device
2508 * @adap: the adapter
2509 * @mbox: mailbox to use for the FW command
2510 *
2511 * Issues a command to FW to partially initialize the device. This
2512 * performs initialization that generally doesn't depend on user input.
2513 */
2514int t4_early_init(struct adapter *adap, unsigned int mbox)
2515{
2516 struct fw_initialize_cmd c;
2517
2518 INIT_CMD(c, INITIALIZE, WRITE);
2519 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2520}
2521
2522/**
2523 * t4_fw_reset - issue a reset to FW
2524 * @adap: the adapter
2525 * @mbox: mailbox to use for the FW command
2526 * @reset: specifies the type of reset to perform
2527 *
2528 * Issues a reset command of the specified type to FW.
2529 */
2530int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2531{
2532 struct fw_reset_cmd c;
2533
2534 INIT_CMD(c, RESET, WRITE);
2535 c.val = htonl(reset);
2536 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2537}
2538
2539/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002540 * t4_fw_config_file - setup an adapter via a Configuration File
2541 * @adap: the adapter
2542 * @mbox: mailbox to use for the FW command
2543 * @mtype: the memory type where the Configuration File is located
2544 * @maddr: the memory address where the Configuration File is located
2545 * @finiver: return value for CF [fini] version
2546 * @finicsum: return value for CF [fini] checksum
2547 * @cfcsum: return value for CF computed checksum
2548 *
2549 * Issue a command to get the firmware to process the Configuration
2550 * File located at the specified mtype/maddress. If the Configuration
2551 * File is processed successfully and return value pointers are
2552 * provided, the Configuration File "[fini] section version and
2553 * checksum values will be returned along with the computed checksum.
2554 * It's up to the caller to decide how it wants to respond to the
2555 * checksums not matching but it recommended that a prominant warning
2556 * be emitted in order to help people rapidly identify changed or
2557 * corrupted Configuration Files.
2558 *
2559 * Also note that it's possible to modify things like "niccaps",
2560 * "toecaps",etc. between processing the Configuration File and telling
2561 * the firmware to use the new configuration. Callers which want to
2562 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2563 * Configuration Files if they want to do this.
2564 */
2565int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2566 unsigned int mtype, unsigned int maddr,
2567 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2568{
2569 struct fw_caps_config_cmd caps_cmd;
2570 int ret;
2571
2572 /*
2573 * Tell the firmware to process the indicated Configuration File.
2574 * If there are no errors and the caller has provided return value
2575 * pointers for the [fini] section version, checksum and computed
2576 * checksum, pass those back to the caller.
2577 */
2578 memset(&caps_cmd, 0, sizeof(caps_cmd));
2579 caps_cmd.op_to_write =
2580 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2581 FW_CMD_REQUEST |
2582 FW_CMD_READ);
2583 caps_cmd.retval_len16 =
2584 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2585 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2586 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2587 FW_LEN16(caps_cmd));
2588 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2589 if (ret < 0)
2590 return ret;
2591
2592 if (finiver)
2593 *finiver = ntohl(caps_cmd.finiver);
2594 if (finicsum)
2595 *finicsum = ntohl(caps_cmd.finicsum);
2596 if (cfcsum)
2597 *cfcsum = ntohl(caps_cmd.cfcsum);
2598
2599 /*
2600 * And now tell the firmware to use the configuration we just loaded.
2601 */
2602 caps_cmd.op_to_write =
2603 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2604 FW_CMD_REQUEST |
2605 FW_CMD_WRITE);
2606 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2607 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2608}
2609
2610/**
2611 * t4_fixup_host_params - fix up host-dependent parameters
2612 * @adap: the adapter
2613 * @page_size: the host's Base Page Size
2614 * @cache_line_size: the host's Cache Line Size
2615 *
2616 * Various registers in T4 contain values which are dependent on the
2617 * host's Base Page and Cache Line Sizes. This function will fix all of
2618 * those registers with the appropriate values as passed in ...
2619 */
2620int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2621 unsigned int cache_line_size)
2622{
2623 unsigned int page_shift = fls(page_size) - 1;
2624 unsigned int sge_hps = page_shift - 10;
2625 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2626 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2627 unsigned int fl_align_log = fls(fl_align) - 1;
2628
2629 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2630 HOSTPAGESIZEPF0(sge_hps) |
2631 HOSTPAGESIZEPF1(sge_hps) |
2632 HOSTPAGESIZEPF2(sge_hps) |
2633 HOSTPAGESIZEPF3(sge_hps) |
2634 HOSTPAGESIZEPF4(sge_hps) |
2635 HOSTPAGESIZEPF5(sge_hps) |
2636 HOSTPAGESIZEPF6(sge_hps) |
2637 HOSTPAGESIZEPF7(sge_hps));
2638
2639 t4_set_reg_field(adap, SGE_CONTROL,
2640 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2641 EGRSTATUSPAGESIZE_MASK,
2642 INGPADBOUNDARY(fl_align_log - 5) |
2643 EGRSTATUSPAGESIZE(stat_len != 64));
2644
2645 /*
2646 * Adjust various SGE Free List Host Buffer Sizes.
2647 *
2648 * This is something of a crock since we're using fixed indices into
2649 * the array which are also known by the sge.c code and the T4
2650 * Firmware Configuration File. We need to come up with a much better
2651 * approach to managing this array. For now, the first four entries
2652 * are:
2653 *
2654 * 0: Host Page Size
2655 * 1: 64KB
2656 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2657 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2658 *
2659 * For the single-MTU buffers in unpacked mode we need to include
2660 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2661 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2662 * Padding boundry. All of these are accommodated in the Factory
2663 * Default Firmware Configuration File but we need to adjust it for
2664 * this host's cache line size.
2665 */
2666 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2667 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2668 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2669 & ~(fl_align-1));
2670 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2671 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2672 & ~(fl_align-1));
2673
2674 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2675
2676 return 0;
2677}
2678
2679/**
2680 * t4_fw_initialize - ask FW to initialize the device
2681 * @adap: the adapter
2682 * @mbox: mailbox to use for the FW command
2683 *
2684 * Issues a command to FW to partially initialize the device. This
2685 * performs initialization that generally doesn't depend on user input.
2686 */
2687int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2688{
2689 struct fw_initialize_cmd c;
2690
2691 memset(&c, 0, sizeof(c));
2692 INIT_CMD(c, INITIALIZE, WRITE);
2693 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2694}
2695
2696/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002697 * t4_query_params - query FW or device parameters
2698 * @adap: the adapter
2699 * @mbox: mailbox to use for the FW command
2700 * @pf: the PF
2701 * @vf: the VF
2702 * @nparams: the number of parameters
2703 * @params: the parameter names
2704 * @val: the parameter values
2705 *
2706 * Reads the value of FW or device parameters. Up to 7 parameters can be
2707 * queried at once.
2708 */
2709int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2710 unsigned int vf, unsigned int nparams, const u32 *params,
2711 u32 *val)
2712{
2713 int i, ret;
2714 struct fw_params_cmd c;
2715 __be32 *p = &c.param[0].mnem;
2716
2717 if (nparams > 7)
2718 return -EINVAL;
2719
2720 memset(&c, 0, sizeof(c));
2721 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2722 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2723 FW_PARAMS_CMD_VFN(vf));
2724 c.retval_len16 = htonl(FW_LEN16(c));
2725 for (i = 0; i < nparams; i++, p += 2)
2726 *p = htonl(*params++);
2727
2728 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2729 if (ret == 0)
2730 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2731 *val++ = ntohl(*p);
2732 return ret;
2733}
2734
2735/**
2736 * t4_set_params - sets FW or device parameters
2737 * @adap: the adapter
2738 * @mbox: mailbox to use for the FW command
2739 * @pf: the PF
2740 * @vf: the VF
2741 * @nparams: the number of parameters
2742 * @params: the parameter names
2743 * @val: the parameter values
2744 *
2745 * Sets the value of FW or device parameters. Up to 7 parameters can be
2746 * specified at once.
2747 */
2748int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2749 unsigned int vf, unsigned int nparams, const u32 *params,
2750 const u32 *val)
2751{
2752 struct fw_params_cmd c;
2753 __be32 *p = &c.param[0].mnem;
2754
2755 if (nparams > 7)
2756 return -EINVAL;
2757
2758 memset(&c, 0, sizeof(c));
2759 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2760 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2761 FW_PARAMS_CMD_VFN(vf));
2762 c.retval_len16 = htonl(FW_LEN16(c));
2763 while (nparams--) {
2764 *p++ = htonl(*params++);
2765 *p++ = htonl(*val++);
2766 }
2767
2768 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2769}
2770
2771/**
2772 * t4_cfg_pfvf - configure PF/VF resource limits
2773 * @adap: the adapter
2774 * @mbox: mailbox to use for the FW command
2775 * @pf: the PF being configured
2776 * @vf: the VF being configured
2777 * @txq: the max number of egress queues
2778 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2779 * @rxqi: the max number of interrupt-capable ingress queues
2780 * @rxq: the max number of interruptless ingress queues
2781 * @tc: the PCI traffic class
2782 * @vi: the max number of virtual interfaces
2783 * @cmask: the channel access rights mask for the PF/VF
2784 * @pmask: the port access rights mask for the PF/VF
2785 * @nexact: the maximum number of exact MPS filters
2786 * @rcaps: read capabilities
2787 * @wxcaps: write/execute capabilities
2788 *
2789 * Configures resource limits and capabilities for a physical or virtual
2790 * function.
2791 */
2792int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2793 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2794 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2795 unsigned int vi, unsigned int cmask, unsigned int pmask,
2796 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2797{
2798 struct fw_pfvf_cmd c;
2799
2800 memset(&c, 0, sizeof(c));
2801 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2802 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2803 FW_PFVF_CMD_VFN(vf));
2804 c.retval_len16 = htonl(FW_LEN16(c));
2805 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2806 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002807 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002808 FW_PFVF_CMD_PMASK(pmask) |
2809 FW_PFVF_CMD_NEQ(txq));
2810 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2811 FW_PFVF_CMD_NEXACTF(nexact));
2812 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2813 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2814 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2815 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2816}
2817
2818/**
2819 * t4_alloc_vi - allocate a virtual interface
2820 * @adap: the adapter
2821 * @mbox: mailbox to use for the FW command
2822 * @port: physical port associated with the VI
2823 * @pf: the PF owning the VI
2824 * @vf: the VF owning the VI
2825 * @nmac: number of MAC addresses needed (1 to 5)
2826 * @mac: the MAC addresses of the VI
2827 * @rss_size: size of RSS table slice associated with this VI
2828 *
2829 * Allocates a virtual interface for the given physical port. If @mac is
2830 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2831 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2832 * stored consecutively so the space needed is @nmac * 6 bytes.
2833 * Returns a negative error number or the non-negative VI id.
2834 */
2835int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2836 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2837 unsigned int *rss_size)
2838{
2839 int ret;
2840 struct fw_vi_cmd c;
2841
2842 memset(&c, 0, sizeof(c));
2843 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2844 FW_CMD_WRITE | FW_CMD_EXEC |
2845 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2846 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2847 c.portid_pkd = FW_VI_CMD_PORTID(port);
2848 c.nmac = nmac - 1;
2849
2850 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2851 if (ret)
2852 return ret;
2853
2854 if (mac) {
2855 memcpy(mac, c.mac, sizeof(c.mac));
2856 switch (nmac) {
2857 case 5:
2858 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2859 case 4:
2860 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2861 case 3:
2862 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2863 case 2:
2864 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2865 }
2866 }
2867 if (rss_size)
2868 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002869 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002870}
2871
2872/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002873 * t4_set_rxmode - set Rx properties of a virtual interface
2874 * @adap: the adapter
2875 * @mbox: mailbox to use for the FW command
2876 * @viid: the VI id
2877 * @mtu: the new MTU or -1
2878 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2879 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2880 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002881 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002882 * @sleep_ok: if true we may sleep while awaiting command completion
2883 *
2884 * Sets Rx properties of a virtual interface.
2885 */
2886int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002887 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2888 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002889{
2890 struct fw_vi_rxmode_cmd c;
2891
2892 /* convert to FW values */
2893 if (mtu < 0)
2894 mtu = FW_RXMODE_MTU_NO_CHG;
2895 if (promisc < 0)
2896 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2897 if (all_multi < 0)
2898 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2899 if (bcast < 0)
2900 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002901 if (vlanex < 0)
2902 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002903
2904 memset(&c, 0, sizeof(c));
2905 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2906 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2907 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002908 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2909 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2910 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2911 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2912 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002913 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2914}
2915
2916/**
2917 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2918 * @adap: the adapter
2919 * @mbox: mailbox to use for the FW command
2920 * @viid: the VI id
2921 * @free: if true any existing filters for this VI id are first removed
2922 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2923 * @addr: the MAC address(es)
2924 * @idx: where to store the index of each allocated filter
2925 * @hash: pointer to hash address filter bitmap
2926 * @sleep_ok: call is allowed to sleep
2927 *
2928 * Allocates an exact-match filter for each of the supplied addresses and
2929 * sets it to the corresponding address. If @idx is not %NULL it should
2930 * have at least @naddr entries, each of which will be set to the index of
2931 * the filter allocated for the corresponding MAC address. If a filter
2932 * could not be allocated for an address its index is set to 0xffff.
2933 * If @hash is not %NULL addresses that fail to allocate an exact filter
2934 * are hashed and update the hash filter bitmap pointed at by @hash.
2935 *
2936 * Returns a negative error number or the number of filters allocated.
2937 */
2938int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2939 unsigned int viid, bool free, unsigned int naddr,
2940 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2941{
2942 int i, ret;
2943 struct fw_vi_mac_cmd c;
2944 struct fw_vi_mac_exact *p;
2945
2946 if (naddr > 7)
2947 return -EINVAL;
2948
2949 memset(&c, 0, sizeof(c));
2950 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2951 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2952 FW_VI_MAC_CMD_VIID(viid));
2953 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2954 FW_CMD_LEN16((naddr + 2) / 2));
2955
2956 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2957 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2958 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2959 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2960 }
2961
2962 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2963 if (ret)
2964 return ret;
2965
2966 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2967 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2968
2969 if (idx)
2970 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2971 if (index < NEXACT_MAC)
2972 ret++;
2973 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00002974 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002975 }
2976 return ret;
2977}
2978
2979/**
2980 * t4_change_mac - modifies the exact-match filter for a MAC address
2981 * @adap: the adapter
2982 * @mbox: mailbox to use for the FW command
2983 * @viid: the VI id
2984 * @idx: index of existing filter for old value of MAC address, or -1
2985 * @addr: the new MAC address value
2986 * @persist: whether a new MAC allocation should be persistent
2987 * @add_smt: if true also add the address to the HW SMT
2988 *
2989 * Modifies an exact-match filter and sets it to the new MAC address.
2990 * Note that in general it is not possible to modify the value of a given
2991 * filter so the generic way to modify an address filter is to free the one
2992 * being used by the old address value and allocate a new filter for the
2993 * new address value. @idx can be -1 if the address is a new addition.
2994 *
2995 * Returns a negative error number or the index of the filter with the new
2996 * MAC value.
2997 */
2998int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2999 int idx, const u8 *addr, bool persist, bool add_smt)
3000{
3001 int ret, mode;
3002 struct fw_vi_mac_cmd c;
3003 struct fw_vi_mac_exact *p = c.u.exact;
3004
3005 if (idx < 0) /* new allocation */
3006 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3007 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3008
3009 memset(&c, 0, sizeof(c));
3010 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3011 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3012 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3013 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3014 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3015 FW_VI_MAC_CMD_IDX(idx));
3016 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3017
3018 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3019 if (ret == 0) {
3020 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3021 if (ret >= NEXACT_MAC)
3022 ret = -ENOMEM;
3023 }
3024 return ret;
3025}
3026
3027/**
3028 * t4_set_addr_hash - program the MAC inexact-match hash filter
3029 * @adap: the adapter
3030 * @mbox: mailbox to use for the FW command
3031 * @viid: the VI id
3032 * @ucast: whether the hash filter should also match unicast addresses
3033 * @vec: the value to be written to the hash filter
3034 * @sleep_ok: call is allowed to sleep
3035 *
3036 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3037 */
3038int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3039 bool ucast, u64 vec, bool sleep_ok)
3040{
3041 struct fw_vi_mac_cmd c;
3042
3043 memset(&c, 0, sizeof(c));
3044 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3045 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3046 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3047 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3048 FW_CMD_LEN16(1));
3049 c.u.hash.hashvec = cpu_to_be64(vec);
3050 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3051}
3052
3053/**
3054 * t4_enable_vi - enable/disable a virtual interface
3055 * @adap: the adapter
3056 * @mbox: mailbox to use for the FW command
3057 * @viid: the VI id
3058 * @rx_en: 1=enable Rx, 0=disable Rx
3059 * @tx_en: 1=enable Tx, 0=disable Tx
3060 *
3061 * Enables/disables a virtual interface.
3062 */
3063int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3064 bool rx_en, bool tx_en)
3065{
3066 struct fw_vi_enable_cmd c;
3067
3068 memset(&c, 0, sizeof(c));
3069 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3070 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3071 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3072 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3073 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3074}
3075
3076/**
3077 * t4_identify_port - identify a VI's port by blinking its LED
3078 * @adap: the adapter
3079 * @mbox: mailbox to use for the FW command
3080 * @viid: the VI id
3081 * @nblinks: how many times to blink LED at 2.5 Hz
3082 *
3083 * Identifies a VI's port by blinking its LED.
3084 */
3085int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3086 unsigned int nblinks)
3087{
3088 struct fw_vi_enable_cmd c;
3089
3090 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3091 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3092 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3093 c.blinkdur = htons(nblinks);
3094 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3095}
3096
3097/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003098 * t4_iq_free - free an ingress queue and its FLs
3099 * @adap: the adapter
3100 * @mbox: mailbox to use for the FW command
3101 * @pf: the PF owning the queues
3102 * @vf: the VF owning the queues
3103 * @iqtype: the ingress queue type
3104 * @iqid: ingress queue id
3105 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3106 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3107 *
3108 * Frees an ingress queue and its associated FLs, if any.
3109 */
3110int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3111 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3112 unsigned int fl0id, unsigned int fl1id)
3113{
3114 struct fw_iq_cmd c;
3115
3116 memset(&c, 0, sizeof(c));
3117 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3118 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3119 FW_IQ_CMD_VFN(vf));
3120 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3121 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3122 c.iqid = htons(iqid);
3123 c.fl0id = htons(fl0id);
3124 c.fl1id = htons(fl1id);
3125 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3126}
3127
3128/**
3129 * t4_eth_eq_free - free an Ethernet egress queue
3130 * @adap: the adapter
3131 * @mbox: mailbox to use for the FW command
3132 * @pf: the PF owning the queue
3133 * @vf: the VF owning the queue
3134 * @eqid: egress queue id
3135 *
3136 * Frees an Ethernet egress queue.
3137 */
3138int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3139 unsigned int vf, unsigned int eqid)
3140{
3141 struct fw_eq_eth_cmd c;
3142
3143 memset(&c, 0, sizeof(c));
3144 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3145 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3146 FW_EQ_ETH_CMD_VFN(vf));
3147 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3148 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3149 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3150}
3151
3152/**
3153 * t4_ctrl_eq_free - free a control egress queue
3154 * @adap: the adapter
3155 * @mbox: mailbox to use for the FW command
3156 * @pf: the PF owning the queue
3157 * @vf: the VF owning the queue
3158 * @eqid: egress queue id
3159 *
3160 * Frees a control egress queue.
3161 */
3162int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3163 unsigned int vf, unsigned int eqid)
3164{
3165 struct fw_eq_ctrl_cmd c;
3166
3167 memset(&c, 0, sizeof(c));
3168 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3169 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3170 FW_EQ_CTRL_CMD_VFN(vf));
3171 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3172 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3173 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3174}
3175
3176/**
3177 * t4_ofld_eq_free - free an offload egress queue
3178 * @adap: the adapter
3179 * @mbox: mailbox to use for the FW command
3180 * @pf: the PF owning the queue
3181 * @vf: the VF owning the queue
3182 * @eqid: egress queue id
3183 *
3184 * Frees a control egress queue.
3185 */
3186int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3187 unsigned int vf, unsigned int eqid)
3188{
3189 struct fw_eq_ofld_cmd c;
3190
3191 memset(&c, 0, sizeof(c));
3192 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3193 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3194 FW_EQ_OFLD_CMD_VFN(vf));
3195 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3196 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3197 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3198}
3199
3200/**
3201 * t4_handle_fw_rpl - process a FW reply message
3202 * @adap: the adapter
3203 * @rpl: start of the FW message
3204 *
3205 * Processes a FW message, such as link state change messages.
3206 */
3207int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3208{
3209 u8 opcode = *(const u8 *)rpl;
3210
3211 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3212 int speed = 0, fc = 0;
3213 const struct fw_port_cmd *p = (void *)rpl;
3214 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3215 int port = adap->chan_map[chan];
3216 struct port_info *pi = adap2pinfo(adap, port);
3217 struct link_config *lc = &pi->link_cfg;
3218 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3219 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3220 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3221
3222 if (stat & FW_PORT_CMD_RXPAUSE)
3223 fc |= PAUSE_RX;
3224 if (stat & FW_PORT_CMD_TXPAUSE)
3225 fc |= PAUSE_TX;
3226 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3227 speed = SPEED_100;
3228 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3229 speed = SPEED_1000;
3230 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3231 speed = SPEED_10000;
3232
3233 if (link_ok != lc->link_ok || speed != lc->speed ||
3234 fc != lc->fc) { /* something changed */
3235 lc->link_ok = link_ok;
3236 lc->speed = speed;
3237 lc->fc = fc;
3238 t4_os_link_changed(adap, port, link_ok);
3239 }
3240 if (mod != pi->mod_type) {
3241 pi->mod_type = mod;
3242 t4_os_portmod_changed(adap, port);
3243 }
3244 }
3245 return 0;
3246}
3247
3248static void __devinit get_pci_mode(struct adapter *adapter,
3249 struct pci_params *p)
3250{
3251 u16 val;
3252 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3253
3254 if (pcie_cap) {
3255 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3256 &val);
3257 p->speed = val & PCI_EXP_LNKSTA_CLS;
3258 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3259 }
3260}
3261
3262/**
3263 * init_link_config - initialize a link's SW state
3264 * @lc: structure holding the link state
3265 * @caps: link capabilities
3266 *
3267 * Initializes the SW state maintained for each link, including the link's
3268 * capabilities and default speed/flow-control/autonegotiation settings.
3269 */
3270static void __devinit init_link_config(struct link_config *lc,
3271 unsigned int caps)
3272{
3273 lc->supported = caps;
3274 lc->requested_speed = 0;
3275 lc->speed = 0;
3276 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3277 if (lc->supported & FW_PORT_CAP_ANEG) {
3278 lc->advertising = lc->supported & ADVERT_MASK;
3279 lc->autoneg = AUTONEG_ENABLE;
3280 lc->requested_fc |= PAUSE_AUTONEG;
3281 } else {
3282 lc->advertising = 0;
3283 lc->autoneg = AUTONEG_DISABLE;
3284 }
3285}
3286
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003287int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003288{
3289 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3290 return 0;
3291 msleep(500);
3292 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3293}
3294
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003295static int __devinit get_flash_params(struct adapter *adap)
3296{
3297 int ret;
3298 u32 info;
3299
3300 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3301 if (!ret)
3302 ret = sf1_read(adap, 3, 0, 1, &info);
3303 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3304 if (ret)
3305 return ret;
3306
3307 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3308 return -EINVAL;
3309 info >>= 16; /* log2 of size */
3310 if (info >= 0x14 && info < 0x18)
3311 adap->params.sf_nsec = 1 << (info - 16);
3312 else if (info == 0x18)
3313 adap->params.sf_nsec = 64;
3314 else
3315 return -EINVAL;
3316 adap->params.sf_size = 1 << info;
3317 adap->params.sf_fw_start =
3318 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3319 return 0;
3320}
3321
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003322/**
3323 * t4_prep_adapter - prepare SW and HW for operation
3324 * @adapter: the adapter
3325 * @reset: if true perform a HW reset
3326 *
3327 * Initialize adapter SW state for the various HW modules, set initial
3328 * values for some adapter tunables, take PHYs out of reset, and
3329 * initialize the MDIO interface.
3330 */
3331int __devinit t4_prep_adapter(struct adapter *adapter)
3332{
3333 int ret;
3334
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003335 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003336 if (ret < 0)
3337 return ret;
3338
3339 get_pci_mode(adapter, &adapter->params.pci);
3340 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3341
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003342 ret = get_flash_params(adapter);
3343 if (ret < 0) {
3344 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3345 return ret;
3346 }
3347
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003348 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3349
3350 /*
3351 * Default port for debugging in case we can't reach FW.
3352 */
3353 adapter->params.nports = 1;
3354 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003355 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003356 return 0;
3357}
3358
3359int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3360{
3361 u8 addr[6];
3362 int ret, i, j = 0;
3363 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003364 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003365
3366 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003367 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003368
3369 for_each_port(adap, i) {
3370 unsigned int rss_size;
3371 struct port_info *p = adap2pinfo(adap, i);
3372
3373 while ((adap->params.portvec & (1 << j)) == 0)
3374 j++;
3375
3376 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3377 FW_CMD_REQUEST | FW_CMD_READ |
3378 FW_PORT_CMD_PORTID(j));
3379 c.action_to_len16 = htonl(
3380 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3381 FW_LEN16(c));
3382 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3383 if (ret)
3384 return ret;
3385
3386 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3387 if (ret < 0)
3388 return ret;
3389
3390 p->viid = ret;
3391 p->tx_chan = j;
3392 p->lport = j;
3393 p->rss_size = rss_size;
3394 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3395 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00003396 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003397
3398 ret = ntohl(c.u.info.lstatus_to_modtype);
3399 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3400 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3401 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003402 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003403
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003404 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3405 FW_CMD_REQUEST | FW_CMD_READ |
3406 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3407 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3408 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3409 if (ret)
3410 return ret;
3411 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3412
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003413 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3414 j++;
3415 }
3416 return 0;
3417}