blob: 419432d4204964909ea1bb51abd555ec1cfd44bd [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
Joe Perches005b5712010-12-14 21:36:53 +0000186 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000333/*
334 * t4_mem_win_rw - read/write memory through PCIE memory window
335 * @adap: the adapter
336 * @addr: address of first byte requested
337 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
338 * @dir: direction of transfer 1 => read, 0 => write
339 *
340 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
341 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
342 * address @addr.
343 */
344static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
345{
346 int i;
347
348 /*
349 * Setup offset into PCIE memory window. Address must be a
350 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
351 * ensure that changes propagate before we attempt to use the new
352 * values.)
353 */
354 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
355 addr & ~(MEMWIN0_APERTURE - 1));
356 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
357
358 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
359 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
360 if (dir)
361 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
362 else
363 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
364 }
365
366 return 0;
367}
368
369/**
370 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
371 * @adap: the adapter
372 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
373 * @addr: address within indicated memory type
374 * @len: amount of memory to transfer
375 * @buf: host memory buffer
376 * @dir: direction of transfer 1 => read, 0 => write
377 *
378 * Reads/writes an [almost] arbitrary memory region in the firmware: the
379 * firmware memory address, length and host buffer must be aligned on
380 * 32-bit boudaries. The memory is transferred as a raw byte sequence
381 * from/to the firmware's memory. If this memory contains data
382 * structures which contain multi-byte integers, it's the callers
383 * responsibility to perform appropriate byte order conversions.
384 */
385static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
386 __be32 *buf, int dir)
387{
388 u32 pos, start, end, offset, memoffset;
389 int ret;
390
391 /*
392 * Argument sanity checks ...
393 */
394 if ((addr & 0x3) || (len & 0x3))
395 return -EINVAL;
396
397 /*
398 * Offset into the region of memory which is being accessed
399 * MEM_EDC0 = 0
400 * MEM_EDC1 = 1
401 * MEM_MC = 2
402 */
403 memoffset = (mtype * (5 * 1024 * 1024));
404
405 /* Determine the PCIE_MEM_ACCESS_OFFSET */
406 addr = addr + memoffset;
407
408 /*
409 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
410 * at a time so we need to round down the start and round up the end.
411 * We'll start copying out of the first line at (addr - start) a word
412 * at a time.
413 */
414 start = addr & ~(MEMWIN0_APERTURE-1);
415 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
416 offset = (addr - start)/sizeof(__be32);
417
418 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
419 __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
420
421 /*
422 * If we're writing, copy the data from the caller's memory
423 * buffer
424 */
425 if (!dir) {
426 /*
427 * If we're doing a partial write, then we need to do
428 * a read-modify-write ...
429 */
430 if (offset || len < MEMWIN0_APERTURE) {
431 ret = t4_mem_win_rw(adap, pos, data, 1);
432 if (ret)
433 return ret;
434 }
435 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
436 len > 0) {
437 data[offset++] = *buf++;
438 len -= sizeof(__be32);
439 }
440 }
441
442 /*
443 * Transfer a block of memory and bail if there's an error.
444 */
445 ret = t4_mem_win_rw(adap, pos, data, dir);
446 if (ret)
447 return ret;
448
449 /*
450 * If we're reading, copy the data into the caller's memory
451 * buffer.
452 */
453 if (dir)
454 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
455 len > 0) {
456 *buf++ = data[offset++];
457 len -= sizeof(__be32);
458 }
459 }
460
461 return 0;
462}
463
464int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
465 __be32 *buf)
466{
467 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
468}
469
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000470#define EEPROM_STAT_ADDR 0x7bfc
471#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000472#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000473
474/**
475 * t4_seeprom_wp - enable/disable EEPROM write protection
476 * @adapter: the adapter
477 * @enable: whether to enable or disable write protection
478 *
479 * Enables or disables write protection on the serial EEPROM.
480 */
481int t4_seeprom_wp(struct adapter *adapter, bool enable)
482{
483 unsigned int v = enable ? 0xc : 0;
484 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
485 return ret < 0 ? ret : 0;
486}
487
488/**
489 * get_vpd_params - read VPD parameters from VPD EEPROM
490 * @adapter: adapter to read
491 * @p: where to store the parameters
492 *
493 * Reads card parameters stored in VPD EEPROM.
494 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000495int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000496{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000497 u32 cclk_param, cclk_val;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000498 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000499 int ec, sn;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000500 u8 vpd[VPD_LEN], csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000501 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000502
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000503 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000504 if (ret < 0)
505 return ret;
506
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000507 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
508 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
509 return -EINVAL;
510 }
511
512 id_len = pci_vpd_lrdt_size(vpd);
513 if (id_len > ID_LEN)
514 id_len = ID_LEN;
515
516 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
517 if (i < 0) {
518 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
519 return -EINVAL;
520 }
521
522 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
523 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
524 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000525 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
526 return -EINVAL;
527 }
528
529#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000530 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000531 if (var < 0) { \
532 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
533 return -EINVAL; \
534 } \
535 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
536} while (0)
537
538 FIND_VPD_KW(i, "RV");
539 for (csum = 0; i >= 0; i--)
540 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000541
542 if (csum) {
543 dev_err(adapter->pdev_dev,
544 "corrupted VPD EEPROM, actual csum %u\n", csum);
545 return -EINVAL;
546 }
547
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000548 FIND_VPD_KW(ec, "EC");
549 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000550#undef FIND_VPD_KW
551
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000552 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000553 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000554 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000555 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000556 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
557 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000558 strim(p->sn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000559
560 /*
561 * Ask firmware for the Core Clock since it knows how to translate the
562 * Reference Clock ('V2') VPD field into a Core Clock value ...
563 */
564 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
565 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
566 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
567 1, &cclk_param, &cclk_val);
568 if (ret)
569 return ret;
570 p->cclk = cclk_val;
571
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000572 return 0;
573}
574
575/* serial flash and firmware constants */
576enum {
577 SF_ATTEMPTS = 10, /* max retries for SF operations */
578
579 /* flash command opcodes */
580 SF_PROG_PAGE = 2, /* program page */
581 SF_WR_DISABLE = 4, /* disable writes */
582 SF_RD_STATUS = 5, /* read status register */
583 SF_WR_ENABLE = 6, /* enable writes */
584 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000585 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000586 SF_ERASE_SECTOR = 0xd8, /* erase sector */
587
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000588 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000589};
590
591/**
592 * sf1_read - read data from the serial flash
593 * @adapter: the adapter
594 * @byte_cnt: number of bytes to read
595 * @cont: whether another operation will be chained
596 * @lock: whether to lock SF for PL access only
597 * @valp: where to store the read data
598 *
599 * Reads up to 4 bytes of data from the serial flash. The location of
600 * the read needs to be specified prior to calling this by issuing the
601 * appropriate commands to the serial flash.
602 */
603static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
604 int lock, u32 *valp)
605{
606 int ret;
607
608 if (!byte_cnt || byte_cnt > 4)
609 return -EINVAL;
610 if (t4_read_reg(adapter, SF_OP) & BUSY)
611 return -EBUSY;
612 cont = cont ? SF_CONT : 0;
613 lock = lock ? SF_LOCK : 0;
614 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
615 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
616 if (!ret)
617 *valp = t4_read_reg(adapter, SF_DATA);
618 return ret;
619}
620
621/**
622 * sf1_write - write data to the serial flash
623 * @adapter: the adapter
624 * @byte_cnt: number of bytes to write
625 * @cont: whether another operation will be chained
626 * @lock: whether to lock SF for PL access only
627 * @val: value to write
628 *
629 * Writes up to 4 bytes of data to the serial flash. The location of
630 * the write needs to be specified prior to calling this by issuing the
631 * appropriate commands to the serial flash.
632 */
633static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
634 int lock, u32 val)
635{
636 if (!byte_cnt || byte_cnt > 4)
637 return -EINVAL;
638 if (t4_read_reg(adapter, SF_OP) & BUSY)
639 return -EBUSY;
640 cont = cont ? SF_CONT : 0;
641 lock = lock ? SF_LOCK : 0;
642 t4_write_reg(adapter, SF_DATA, val);
643 t4_write_reg(adapter, SF_OP, lock |
644 cont | BYTECNT(byte_cnt - 1) | OP_WR);
645 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
646}
647
648/**
649 * flash_wait_op - wait for a flash operation to complete
650 * @adapter: the adapter
651 * @attempts: max number of polls of the status register
652 * @delay: delay between polls in ms
653 *
654 * Wait for a flash operation to complete by polling the status register.
655 */
656static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
657{
658 int ret;
659 u32 status;
660
661 while (1) {
662 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
663 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
664 return ret;
665 if (!(status & 1))
666 return 0;
667 if (--attempts == 0)
668 return -EAGAIN;
669 if (delay)
670 msleep(delay);
671 }
672}
673
674/**
675 * t4_read_flash - read words from serial flash
676 * @adapter: the adapter
677 * @addr: the start address for the read
678 * @nwords: how many 32-bit words to read
679 * @data: where to store the read data
680 * @byte_oriented: whether to store data as bytes or as words
681 *
682 * Read the specified number of 32-bit words from the serial flash.
683 * If @byte_oriented is set the read data is stored as a byte array
684 * (i.e., big-endian), otherwise as 32-bit words in the platform's
685 * natural endianess.
686 */
Roland Dreierde498c82010-04-21 08:59:17 +0000687static int t4_read_flash(struct adapter *adapter, unsigned int addr,
688 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000689{
690 int ret;
691
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000692 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000693 return -EINVAL;
694
695 addr = swab32(addr) | SF_RD_DATA_FAST;
696
697 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
698 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
699 return ret;
700
701 for ( ; nwords; nwords--, data++) {
702 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
703 if (nwords == 1)
704 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
705 if (ret)
706 return ret;
707 if (byte_oriented)
708 *data = htonl(*data);
709 }
710 return 0;
711}
712
713/**
714 * t4_write_flash - write up to a page of data to the serial flash
715 * @adapter: the adapter
716 * @addr: the start address to write
717 * @n: length of data to write in bytes
718 * @data: the data to write
719 *
720 * Writes up to a page of data (256 bytes) to the serial flash starting
721 * at the given address. All the data must be written to the same page.
722 */
723static int t4_write_flash(struct adapter *adapter, unsigned int addr,
724 unsigned int n, const u8 *data)
725{
726 int ret;
727 u32 buf[64];
728 unsigned int i, c, left, val, offset = addr & 0xff;
729
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000730 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000731 return -EINVAL;
732
733 val = swab32(addr) | SF_PROG_PAGE;
734
735 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
736 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
737 goto unlock;
738
739 for (left = n; left; left -= c) {
740 c = min(left, 4U);
741 for (val = 0, i = 0; i < c; ++i)
742 val = (val << 8) + *data++;
743
744 ret = sf1_write(adapter, c, c != left, 1, val);
745 if (ret)
746 goto unlock;
747 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000748 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000749 if (ret)
750 goto unlock;
751
752 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
753
754 /* Read the page to verify the write succeeded */
755 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
756 if (ret)
757 return ret;
758
759 if (memcmp(data - n, (u8 *)buf + offset, n)) {
760 dev_err(adapter->pdev_dev,
761 "failed to correctly write the flash page at %#x\n",
762 addr);
763 return -EIO;
764 }
765 return 0;
766
767unlock:
768 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
769 return ret;
770}
771
772/**
773 * get_fw_version - read the firmware version
774 * @adapter: the adapter
775 * @vers: where to place the version
776 *
777 * Reads the FW version from flash.
778 */
779static int get_fw_version(struct adapter *adapter, u32 *vers)
780{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000781 return t4_read_flash(adapter, adapter->params.sf_fw_start +
782 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000783}
784
785/**
786 * get_tp_version - read the TP microcode version
787 * @adapter: the adapter
788 * @vers: where to place the version
789 *
790 * Reads the TP microcode version from flash.
791 */
792static int get_tp_version(struct adapter *adapter, u32 *vers)
793{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000794 return t4_read_flash(adapter, adapter->params.sf_fw_start +
795 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000796 1, vers, 0);
797}
798
799/**
800 * t4_check_fw_version - check if the FW is compatible with this driver
801 * @adapter: the adapter
802 *
803 * Checks if an adapter's FW is compatible with the driver. Returns 0
804 * if there's exact match, a negative error if the version could not be
805 * read or there's a major version mismatch, and a positive value if the
806 * expected major version is found but there's a minor version mismatch.
807 */
808int t4_check_fw_version(struct adapter *adapter)
809{
810 u32 api_vers[2];
811 int ret, major, minor, micro;
812
813 ret = get_fw_version(adapter, &adapter->params.fw_vers);
814 if (!ret)
815 ret = get_tp_version(adapter, &adapter->params.tp_vers);
816 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000817 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
818 offsetof(struct fw_hdr, intfver_nic),
819 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000820 if (ret)
821 return ret;
822
823 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
824 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
825 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
826 memcpy(adapter->params.api_vers, api_vers,
827 sizeof(adapter->params.api_vers));
828
829 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
830 dev_err(adapter->pdev_dev,
831 "card FW has major version %u, driver wants %u\n",
832 major, FW_VERSION_MAJOR);
833 return -EINVAL;
834 }
835
836 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
837 return 0; /* perfect match */
838
839 /* Minor/micro version mismatch. Report it but often it's OK. */
840 return 1;
841}
842
843/**
844 * t4_flash_erase_sectors - erase a range of flash sectors
845 * @adapter: the adapter
846 * @start: the first sector to erase
847 * @end: the last sector to erase
848 *
849 * Erases the sectors in the given inclusive range.
850 */
851static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
852{
853 int ret = 0;
854
855 while (start <= end) {
856 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
857 (ret = sf1_write(adapter, 4, 0, 1,
858 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000859 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000860 dev_err(adapter->pdev_dev,
861 "erase of flash sector %d failed, error %d\n",
862 start, ret);
863 break;
864 }
865 start++;
866 }
867 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
868 return ret;
869}
870
871/**
Vipul Pandya636f9d32012-09-26 02:39:39 +0000872 * t4_flash_cfg_addr - return the address of the flash configuration file
873 * @adapter: the adapter
874 *
875 * Return the address within the flash where the Firmware Configuration
876 * File is stored.
877 */
878unsigned int t4_flash_cfg_addr(struct adapter *adapter)
879{
880 if (adapter->params.sf_size == 0x100000)
881 return FLASH_FPGA_CFG_START;
882 else
883 return FLASH_CFG_START;
884}
885
886/**
887 * t4_load_cfg - download config file
888 * @adap: the adapter
889 * @cfg_data: the cfg text file to write
890 * @size: text file size
891 *
892 * Write the supplied config text file to the card's serial flash.
893 */
894int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
895{
896 int ret, i, n;
897 unsigned int addr;
898 unsigned int flash_cfg_start_sec;
899 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
900
901 addr = t4_flash_cfg_addr(adap);
902 flash_cfg_start_sec = addr / SF_SEC_SIZE;
903
904 if (size > FLASH_CFG_MAX_SIZE) {
905 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
906 FLASH_CFG_MAX_SIZE);
907 return -EFBIG;
908 }
909
910 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
911 sf_sec_size);
912 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
913 flash_cfg_start_sec + i - 1);
914 /*
915 * If size == 0 then we're simply erasing the FLASH sectors associated
916 * with the on-adapter Firmware Configuration File.
917 */
918 if (ret || size == 0)
919 goto out;
920
921 /* this will write to the flash up to SF_PAGE_SIZE at a time */
922 for (i = 0; i < size; i += SF_PAGE_SIZE) {
923 if ((size - i) < SF_PAGE_SIZE)
924 n = size - i;
925 else
926 n = SF_PAGE_SIZE;
927 ret = t4_write_flash(adap, addr, n, cfg_data);
928 if (ret)
929 goto out;
930
931 addr += SF_PAGE_SIZE;
932 cfg_data += SF_PAGE_SIZE;
933 }
934
935out:
936 if (ret)
937 dev_err(adap->pdev_dev, "config file %s failed %d\n",
938 (size == 0 ? "clear" : "download"), ret);
939 return ret;
940}
941
942/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000943 * t4_load_fw - download firmware
944 * @adap: the adapter
945 * @fw_data: the firmware image to write
946 * @size: image size
947 *
948 * Write the supplied firmware image to the card's serial flash.
949 */
950int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
951{
952 u32 csum;
953 int ret, addr;
954 unsigned int i;
955 u8 first_page[SF_PAGE_SIZE];
956 const u32 *p = (const u32 *)fw_data;
957 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000958 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
959 unsigned int fw_img_start = adap->params.sf_fw_start;
960 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000961
962 if (!size) {
963 dev_err(adap->pdev_dev, "FW image has no data\n");
964 return -EINVAL;
965 }
966 if (size & 511) {
967 dev_err(adap->pdev_dev,
968 "FW image size not multiple of 512 bytes\n");
969 return -EINVAL;
970 }
971 if (ntohs(hdr->len512) * 512 != size) {
972 dev_err(adap->pdev_dev,
973 "FW image size differs from size in FW header\n");
974 return -EINVAL;
975 }
976 if (size > FW_MAX_SIZE) {
977 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
978 FW_MAX_SIZE);
979 return -EFBIG;
980 }
981
982 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
983 csum += ntohl(p[i]);
984
985 if (csum != 0xffffffff) {
986 dev_err(adap->pdev_dev,
987 "corrupted firmware image, checksum %#x\n", csum);
988 return -EINVAL;
989 }
990
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000991 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
992 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000993 if (ret)
994 goto out;
995
996 /*
997 * We write the correct version at the end so the driver can see a bad
998 * version if the FW write fails. Start by writing a copy of the
999 * first page with a bad version.
1000 */
1001 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1002 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001003 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001004 if (ret)
1005 goto out;
1006
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001007 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001008 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1009 addr += SF_PAGE_SIZE;
1010 fw_data += SF_PAGE_SIZE;
1011 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1012 if (ret)
1013 goto out;
1014 }
1015
1016 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001017 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001018 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1019out:
1020 if (ret)
1021 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1022 ret);
1023 return ret;
1024}
1025
1026#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1027 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1028
1029/**
1030 * t4_link_start - apply link configuration to MAC/PHY
1031 * @phy: the PHY to setup
1032 * @mac: the MAC to setup
1033 * @lc: the requested link configuration
1034 *
1035 * Set up a port's MAC and PHY according to a desired link configuration.
1036 * - If the PHY can auto-negotiate first decide what to advertise, then
1037 * enable/disable auto-negotiation as desired, and reset.
1038 * - If the PHY does not auto-negotiate just reset it.
1039 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1040 * otherwise do it later based on the outcome of auto-negotiation.
1041 */
1042int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1043 struct link_config *lc)
1044{
1045 struct fw_port_cmd c;
1046 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1047
1048 lc->link_ok = 0;
1049 if (lc->requested_fc & PAUSE_RX)
1050 fc |= FW_PORT_CAP_FC_RX;
1051 if (lc->requested_fc & PAUSE_TX)
1052 fc |= FW_PORT_CAP_FC_TX;
1053
1054 memset(&c, 0, sizeof(c));
1055 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1056 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1057 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1058 FW_LEN16(c));
1059
1060 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1061 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1062 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063 } else if (lc->autoneg == AUTONEG_DISABLE) {
1064 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1065 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1066 } else
1067 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1068
1069 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1070}
1071
1072/**
1073 * t4_restart_aneg - restart autonegotiation
1074 * @adap: the adapter
1075 * @mbox: mbox to use for the FW command
1076 * @port: the port id
1077 *
1078 * Restarts autonegotiation for the selected port.
1079 */
1080int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1081{
1082 struct fw_port_cmd c;
1083
1084 memset(&c, 0, sizeof(c));
1085 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1086 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1087 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1088 FW_LEN16(c));
1089 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1090 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1091}
1092
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301093typedef void (*int_handler_t)(struct adapter *adap);
1094
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001095struct intr_info {
1096 unsigned int mask; /* bits to check in interrupt status */
1097 const char *msg; /* message to print or NULL */
1098 short stat_idx; /* stat counter to increment or -1 */
1099 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301100 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001101};
1102
1103/**
1104 * t4_handle_intr_status - table driven interrupt handler
1105 * @adapter: the adapter that generated the interrupt
1106 * @reg: the interrupt status register to process
1107 * @acts: table of interrupt actions
1108 *
1109 * A table driven interrupt handler that applies a set of masks to an
1110 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001111 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001112 * optionally emitting a warning or alert message. The table is terminated
1113 * by an entry specifying mask 0. Returns the number of fatal interrupt
1114 * conditions.
1115 */
1116static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1117 const struct intr_info *acts)
1118{
1119 int fatal = 0;
1120 unsigned int mask = 0;
1121 unsigned int status = t4_read_reg(adapter, reg);
1122
1123 for ( ; acts->mask; ++acts) {
1124 if (!(status & acts->mask))
1125 continue;
1126 if (acts->fatal) {
1127 fatal++;
1128 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1129 status & acts->mask);
1130 } else if (acts->msg && printk_ratelimit())
1131 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1132 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301133 if (acts->int_handler)
1134 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001135 mask |= acts->mask;
1136 }
1137 status &= mask;
1138 if (status) /* clear processed interrupts */
1139 t4_write_reg(adapter, reg, status);
1140 return fatal;
1141}
1142
1143/*
1144 * Interrupt handler for the PCIE module.
1145 */
1146static void pcie_intr_handler(struct adapter *adapter)
1147{
Joe Perches005b5712010-12-14 21:36:53 +00001148 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001149 { RNPP, "RXNP array parity error", -1, 1 },
1150 { RPCP, "RXPC array parity error", -1, 1 },
1151 { RCIP, "RXCIF array parity error", -1, 1 },
1152 { RCCP, "Rx completions control array parity error", -1, 1 },
1153 { RFTP, "RXFT array parity error", -1, 1 },
1154 { 0 }
1155 };
Joe Perches005b5712010-12-14 21:36:53 +00001156 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001157 { TPCP, "TXPC array parity error", -1, 1 },
1158 { TNPP, "TXNP array parity error", -1, 1 },
1159 { TFTP, "TXFT array parity error", -1, 1 },
1160 { TCAP, "TXCA array parity error", -1, 1 },
1161 { TCIP, "TXCIF array parity error", -1, 1 },
1162 { RCAP, "RXCA array parity error", -1, 1 },
1163 { OTDD, "outbound request TLP discarded", -1, 1 },
1164 { RDPE, "Rx data parity error", -1, 1 },
1165 { TDUE, "Tx uncorrectable data error", -1, 1 },
1166 { 0 }
1167 };
Joe Perches005b5712010-12-14 21:36:53 +00001168 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001169 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1170 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1171 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1172 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1173 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1174 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1175 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1176 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1177 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1178 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1179 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1180 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1181 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1182 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1183 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1184 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1185 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1186 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1187 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1188 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1189 { FIDPERR, "PCI FID parity error", -1, 1 },
1190 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1191 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1192 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1193 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1194 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1195 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1196 { PCIESINT, "PCI core secondary fault", -1, 1 },
1197 { PCIEPINT, "PCI core primary fault", -1, 1 },
1198 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1199 { 0 }
1200 };
1201
1202 int fat;
1203
1204 fat = t4_handle_intr_status(adapter,
1205 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1206 sysbus_intr_info) +
1207 t4_handle_intr_status(adapter,
1208 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1209 pcie_port_intr_info) +
1210 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1211 if (fat)
1212 t4_fatal_err(adapter);
1213}
1214
1215/*
1216 * TP interrupt handler.
1217 */
1218static void tp_intr_handler(struct adapter *adapter)
1219{
Joe Perches005b5712010-12-14 21:36:53 +00001220 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001221 { 0x3fffffff, "TP parity error", -1, 1 },
1222 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1223 { 0 }
1224 };
1225
1226 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1227 t4_fatal_err(adapter);
1228}
1229
1230/*
1231 * SGE interrupt handler.
1232 */
1233static void sge_intr_handler(struct adapter *adapter)
1234{
1235 u64 v;
1236
Joe Perches005b5712010-12-14 21:36:53 +00001237 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001238 { ERR_CPL_EXCEED_IQE_SIZE,
1239 "SGE received CPL exceeding IQE size", -1, 1 },
1240 { ERR_INVALID_CIDX_INC,
1241 "SGE GTS CIDX increment too large", -1, 0 },
1242 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001243 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1244 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1245 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001246 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1247 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1248 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1249 0 },
1250 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1251 0 },
1252 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1253 0 },
1254 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1255 0 },
1256 { ERR_ING_CTXT_PRIO,
1257 "SGE too many priority ingress contexts", -1, 0 },
1258 { ERR_EGR_CTXT_PRIO,
1259 "SGE too many priority egress contexts", -1, 0 },
1260 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1261 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1262 { 0 }
1263 };
1264
1265 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301266 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001267 if (v) {
1268 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301269 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001270 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1271 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1272 }
1273
1274 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1275 v != 0)
1276 t4_fatal_err(adapter);
1277}
1278
1279/*
1280 * CIM interrupt handler.
1281 */
1282static void cim_intr_handler(struct adapter *adapter)
1283{
Joe Perches005b5712010-12-14 21:36:53 +00001284 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001285 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1286 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1287 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1288 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1289 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1290 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1291 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1292 { 0 }
1293 };
Joe Perches005b5712010-12-14 21:36:53 +00001294 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001295 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1296 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1297 { ILLWRINT, "CIM illegal write", -1, 1 },
1298 { ILLRDINT, "CIM illegal read", -1, 1 },
1299 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1300 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1301 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1302 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1303 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1304 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1305 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1306 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1307 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1308 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1309 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1310 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1311 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1312 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1313 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1314 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1315 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1316 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1317 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1318 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1319 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1320 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1321 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1322 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1323 { 0 }
1324 };
1325
1326 int fat;
1327
1328 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1329 cim_intr_info) +
1330 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1331 cim_upintr_info);
1332 if (fat)
1333 t4_fatal_err(adapter);
1334}
1335
1336/*
1337 * ULP RX interrupt handler.
1338 */
1339static void ulprx_intr_handler(struct adapter *adapter)
1340{
Joe Perches005b5712010-12-14 21:36:53 +00001341 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001342 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001343 { 0x7fffff, "ULPRX parity error", -1, 1 },
1344 { 0 }
1345 };
1346
1347 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1348 t4_fatal_err(adapter);
1349}
1350
1351/*
1352 * ULP TX interrupt handler.
1353 */
1354static void ulptx_intr_handler(struct adapter *adapter)
1355{
Joe Perches005b5712010-12-14 21:36:53 +00001356 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001357 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1358 0 },
1359 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1360 0 },
1361 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1362 0 },
1363 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1364 0 },
1365 { 0xfffffff, "ULPTX parity error", -1, 1 },
1366 { 0 }
1367 };
1368
1369 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1370 t4_fatal_err(adapter);
1371}
1372
1373/*
1374 * PM TX interrupt handler.
1375 */
1376static void pmtx_intr_handler(struct adapter *adapter)
1377{
Joe Perches005b5712010-12-14 21:36:53 +00001378 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001379 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1380 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1381 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1382 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1383 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1384 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1385 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1386 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1387 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1388 { 0 }
1389 };
1390
1391 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1392 t4_fatal_err(adapter);
1393}
1394
1395/*
1396 * PM RX interrupt handler.
1397 */
1398static void pmrx_intr_handler(struct adapter *adapter)
1399{
Joe Perches005b5712010-12-14 21:36:53 +00001400 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001401 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1402 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1403 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1404 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1405 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1406 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1407 { 0 }
1408 };
1409
1410 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1411 t4_fatal_err(adapter);
1412}
1413
1414/*
1415 * CPL switch interrupt handler.
1416 */
1417static void cplsw_intr_handler(struct adapter *adapter)
1418{
Joe Perches005b5712010-12-14 21:36:53 +00001419 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001420 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1421 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1422 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1423 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1424 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1425 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1426 { 0 }
1427 };
1428
1429 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1430 t4_fatal_err(adapter);
1431}
1432
1433/*
1434 * LE interrupt handler.
1435 */
1436static void le_intr_handler(struct adapter *adap)
1437{
Joe Perches005b5712010-12-14 21:36:53 +00001438 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001439 { LIPMISS, "LE LIP miss", -1, 0 },
1440 { LIP0, "LE 0 LIP error", -1, 0 },
1441 { PARITYERR, "LE parity error", -1, 1 },
1442 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1443 { REQQPARERR, "LE request queue parity error", -1, 1 },
1444 { 0 }
1445 };
1446
1447 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1448 t4_fatal_err(adap);
1449}
1450
1451/*
1452 * MPS interrupt handler.
1453 */
1454static void mps_intr_handler(struct adapter *adapter)
1455{
Joe Perches005b5712010-12-14 21:36:53 +00001456 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001457 { 0xffffff, "MPS Rx parity error", -1, 1 },
1458 { 0 }
1459 };
Joe Perches005b5712010-12-14 21:36:53 +00001460 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001461 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1462 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1463 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1464 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1465 { BUBBLE, "MPS Tx underflow", -1, 1 },
1466 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1467 { FRMERR, "MPS Tx framing error", -1, 1 },
1468 { 0 }
1469 };
Joe Perches005b5712010-12-14 21:36:53 +00001470 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001471 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1472 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1473 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1474 { 0 }
1475 };
Joe Perches005b5712010-12-14 21:36:53 +00001476 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001477 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1478 { 0 }
1479 };
Joe Perches005b5712010-12-14 21:36:53 +00001480 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001481 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1482 { 0 }
1483 };
Joe Perches005b5712010-12-14 21:36:53 +00001484 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001485 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1486 { 0 }
1487 };
Joe Perches005b5712010-12-14 21:36:53 +00001488 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001489 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1490 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1491 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1492 { 0 }
1493 };
1494
1495 int fat;
1496
1497 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1498 mps_rx_intr_info) +
1499 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1500 mps_tx_intr_info) +
1501 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1502 mps_trc_intr_info) +
1503 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1504 mps_stat_sram_intr_info) +
1505 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1506 mps_stat_tx_intr_info) +
1507 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1508 mps_stat_rx_intr_info) +
1509 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1510 mps_cls_intr_info);
1511
1512 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1513 RXINT | TXINT | STATINT);
1514 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1515 if (fat)
1516 t4_fatal_err(adapter);
1517}
1518
1519#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1520
1521/*
1522 * EDC/MC interrupt handler.
1523 */
1524static void mem_intr_handler(struct adapter *adapter, int idx)
1525{
1526 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1527
1528 unsigned int addr, cnt_addr, v;
1529
1530 if (idx <= MEM_EDC1) {
1531 addr = EDC_REG(EDC_INT_CAUSE, idx);
1532 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1533 } else {
1534 addr = MC_INT_CAUSE;
1535 cnt_addr = MC_ECC_STATUS;
1536 }
1537
1538 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1539 if (v & PERR_INT_CAUSE)
1540 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1541 name[idx]);
1542 if (v & ECC_CE_INT_CAUSE) {
1543 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1544
1545 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1546 if (printk_ratelimit())
1547 dev_warn(adapter->pdev_dev,
1548 "%u %s correctable ECC data error%s\n",
1549 cnt, name[idx], cnt > 1 ? "s" : "");
1550 }
1551 if (v & ECC_UE_INT_CAUSE)
1552 dev_alert(adapter->pdev_dev,
1553 "%s uncorrectable ECC data error\n", name[idx]);
1554
1555 t4_write_reg(adapter, addr, v);
1556 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1557 t4_fatal_err(adapter);
1558}
1559
1560/*
1561 * MA interrupt handler.
1562 */
1563static void ma_intr_handler(struct adapter *adap)
1564{
1565 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1566
1567 if (status & MEM_PERR_INT_CAUSE)
1568 dev_alert(adap->pdev_dev,
1569 "MA parity error, parity status %#x\n",
1570 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1571 if (status & MEM_WRAP_INT_CAUSE) {
1572 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1573 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1574 "client %u to address %#x\n",
1575 MEM_WRAP_CLIENT_NUM_GET(v),
1576 MEM_WRAP_ADDRESS_GET(v) << 4);
1577 }
1578 t4_write_reg(adap, MA_INT_CAUSE, status);
1579 t4_fatal_err(adap);
1580}
1581
1582/*
1583 * SMB interrupt handler.
1584 */
1585static void smb_intr_handler(struct adapter *adap)
1586{
Joe Perches005b5712010-12-14 21:36:53 +00001587 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001588 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1589 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1590 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1591 { 0 }
1592 };
1593
1594 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1595 t4_fatal_err(adap);
1596}
1597
1598/*
1599 * NC-SI interrupt handler.
1600 */
1601static void ncsi_intr_handler(struct adapter *adap)
1602{
Joe Perches005b5712010-12-14 21:36:53 +00001603 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001604 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1605 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1606 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1607 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1608 { 0 }
1609 };
1610
1611 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1612 t4_fatal_err(adap);
1613}
1614
1615/*
1616 * XGMAC interrupt handler.
1617 */
1618static void xgmac_intr_handler(struct adapter *adap, int port)
1619{
1620 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1621
1622 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1623 if (!v)
1624 return;
1625
1626 if (v & TXFIFO_PRTY_ERR)
1627 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1628 port);
1629 if (v & RXFIFO_PRTY_ERR)
1630 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1631 port);
1632 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1633 t4_fatal_err(adap);
1634}
1635
1636/*
1637 * PL interrupt handler.
1638 */
1639static void pl_intr_handler(struct adapter *adap)
1640{
Joe Perches005b5712010-12-14 21:36:53 +00001641 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001642 { FATALPERR, "T4 fatal parity error", -1, 1 },
1643 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1644 { 0 }
1645 };
1646
1647 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1648 t4_fatal_err(adap);
1649}
1650
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001651#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001652#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1653 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1654 CPL_SWITCH | SGE | ULP_TX)
1655
1656/**
1657 * t4_slow_intr_handler - control path interrupt handler
1658 * @adapter: the adapter
1659 *
1660 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1661 * The designation 'slow' is because it involves register reads, while
1662 * data interrupts typically don't involve any MMIOs.
1663 */
1664int t4_slow_intr_handler(struct adapter *adapter)
1665{
1666 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1667
1668 if (!(cause & GLBL_INTR_MASK))
1669 return 0;
1670 if (cause & CIM)
1671 cim_intr_handler(adapter);
1672 if (cause & MPS)
1673 mps_intr_handler(adapter);
1674 if (cause & NCSI)
1675 ncsi_intr_handler(adapter);
1676 if (cause & PL)
1677 pl_intr_handler(adapter);
1678 if (cause & SMB)
1679 smb_intr_handler(adapter);
1680 if (cause & XGMAC0)
1681 xgmac_intr_handler(adapter, 0);
1682 if (cause & XGMAC1)
1683 xgmac_intr_handler(adapter, 1);
1684 if (cause & XGMAC_KR0)
1685 xgmac_intr_handler(adapter, 2);
1686 if (cause & XGMAC_KR1)
1687 xgmac_intr_handler(adapter, 3);
1688 if (cause & PCIE)
1689 pcie_intr_handler(adapter);
1690 if (cause & MC)
1691 mem_intr_handler(adapter, MEM_MC);
1692 if (cause & EDC0)
1693 mem_intr_handler(adapter, MEM_EDC0);
1694 if (cause & EDC1)
1695 mem_intr_handler(adapter, MEM_EDC1);
1696 if (cause & LE)
1697 le_intr_handler(adapter);
1698 if (cause & TP)
1699 tp_intr_handler(adapter);
1700 if (cause & MA)
1701 ma_intr_handler(adapter);
1702 if (cause & PM_TX)
1703 pmtx_intr_handler(adapter);
1704 if (cause & PM_RX)
1705 pmrx_intr_handler(adapter);
1706 if (cause & ULP_RX)
1707 ulprx_intr_handler(adapter);
1708 if (cause & CPL_SWITCH)
1709 cplsw_intr_handler(adapter);
1710 if (cause & SGE)
1711 sge_intr_handler(adapter);
1712 if (cause & ULP_TX)
1713 ulptx_intr_handler(adapter);
1714
1715 /* Clear the interrupts just processed for which we are the master. */
1716 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1717 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1718 return 1;
1719}
1720
1721/**
1722 * t4_intr_enable - enable interrupts
1723 * @adapter: the adapter whose interrupts should be enabled
1724 *
1725 * Enable PF-specific interrupts for the calling function and the top-level
1726 * interrupt concentrator for global interrupts. Interrupts are already
1727 * enabled at each module, here we just enable the roots of the interrupt
1728 * hierarchies.
1729 *
1730 * Note: this function should be called only when the driver manages
1731 * non PF-specific interrupts from the various HW modules. Only one PCI
1732 * function at a time should be doing this.
1733 */
1734void t4_intr_enable(struct adapter *adapter)
1735{
1736 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1737
1738 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1739 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1740 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1741 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1742 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1743 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1744 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001745 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001746 EGRESS_SIZE_ERR);
1747 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1748 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1749}
1750
1751/**
1752 * t4_intr_disable - disable interrupts
1753 * @adapter: the adapter whose interrupts should be disabled
1754 *
1755 * Disable interrupts. We only disable the top-level interrupt
1756 * concentrators. The caller must be a PCI function managing global
1757 * interrupts.
1758 */
1759void t4_intr_disable(struct adapter *adapter)
1760{
1761 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1762
1763 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1764 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1765}
1766
1767/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001768 * hash_mac_addr - return the hash value of a MAC address
1769 * @addr: the 48-bit Ethernet MAC address
1770 *
1771 * Hashes a MAC address according to the hash function used by HW inexact
1772 * (hash) address matching.
1773 */
1774static int hash_mac_addr(const u8 *addr)
1775{
1776 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1777 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1778 a ^= b;
1779 a ^= (a >> 12);
1780 a ^= (a >> 6);
1781 return a & 0x3f;
1782}
1783
1784/**
1785 * t4_config_rss_range - configure a portion of the RSS mapping table
1786 * @adapter: the adapter
1787 * @mbox: mbox to use for the FW command
1788 * @viid: virtual interface whose RSS subtable is to be written
1789 * @start: start entry in the table to write
1790 * @n: how many table entries to write
1791 * @rspq: values for the response queue lookup table
1792 * @nrspq: number of values in @rspq
1793 *
1794 * Programs the selected part of the VI's RSS mapping table with the
1795 * provided values. If @nrspq < @n the supplied values are used repeatedly
1796 * until the full table range is populated.
1797 *
1798 * The caller must ensure the values in @rspq are in the range allowed for
1799 * @viid.
1800 */
1801int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1802 int start, int n, const u16 *rspq, unsigned int nrspq)
1803{
1804 int ret;
1805 const u16 *rsp = rspq;
1806 const u16 *rsp_end = rspq + nrspq;
1807 struct fw_rss_ind_tbl_cmd cmd;
1808
1809 memset(&cmd, 0, sizeof(cmd));
1810 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1811 FW_CMD_REQUEST | FW_CMD_WRITE |
1812 FW_RSS_IND_TBL_CMD_VIID(viid));
1813 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1814
1815 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1816 while (n > 0) {
1817 int nq = min(n, 32);
1818 __be32 *qp = &cmd.iq0_to_iq2;
1819
1820 cmd.niqid = htons(nq);
1821 cmd.startidx = htons(start);
1822
1823 start += nq;
1824 n -= nq;
1825
1826 while (nq > 0) {
1827 unsigned int v;
1828
1829 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1830 if (++rsp >= rsp_end)
1831 rsp = rspq;
1832 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1833 if (++rsp >= rsp_end)
1834 rsp = rspq;
1835 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1836 if (++rsp >= rsp_end)
1837 rsp = rspq;
1838
1839 *qp++ = htonl(v);
1840 nq -= 3;
1841 }
1842
1843 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1844 if (ret)
1845 return ret;
1846 }
1847 return 0;
1848}
1849
1850/**
1851 * t4_config_glbl_rss - configure the global RSS mode
1852 * @adapter: the adapter
1853 * @mbox: mbox to use for the FW command
1854 * @mode: global RSS mode
1855 * @flags: mode-specific flags
1856 *
1857 * Sets the global RSS mode.
1858 */
1859int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1860 unsigned int flags)
1861{
1862 struct fw_rss_glb_config_cmd c;
1863
1864 memset(&c, 0, sizeof(c));
1865 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1866 FW_CMD_REQUEST | FW_CMD_WRITE);
1867 c.retval_len16 = htonl(FW_LEN16(c));
1868 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1869 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1870 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1871 c.u.basicvirtual.mode_pkd =
1872 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1873 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1874 } else
1875 return -EINVAL;
1876 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1877}
1878
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001879/**
1880 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1881 * @adap: the adapter
1882 * @v4: holds the TCP/IP counter values
1883 * @v6: holds the TCP/IPv6 counter values
1884 *
1885 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1886 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1887 */
1888void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1889 struct tp_tcp_stats *v6)
1890{
1891 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1892
1893#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1894#define STAT(x) val[STAT_IDX(x)]
1895#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1896
1897 if (v4) {
1898 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1899 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1900 v4->tcpOutRsts = STAT(OUT_RST);
1901 v4->tcpInSegs = STAT64(IN_SEG);
1902 v4->tcpOutSegs = STAT64(OUT_SEG);
1903 v4->tcpRetransSegs = STAT64(RXT_SEG);
1904 }
1905 if (v6) {
1906 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1907 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1908 v6->tcpOutRsts = STAT(OUT_RST);
1909 v6->tcpInSegs = STAT64(IN_SEG);
1910 v6->tcpOutSegs = STAT64(OUT_SEG);
1911 v6->tcpRetransSegs = STAT64(RXT_SEG);
1912 }
1913#undef STAT64
1914#undef STAT
1915#undef STAT_IDX
1916}
1917
1918/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001919 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1920 * @adap: the adapter
1921 * @mtus: where to store the MTU values
1922 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1923 *
1924 * Reads the HW path MTU table.
1925 */
1926void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1927{
1928 u32 v;
1929 int i;
1930
1931 for (i = 0; i < NMTUS; ++i) {
1932 t4_write_reg(adap, TP_MTU_TABLE,
1933 MTUINDEX(0xff) | MTUVALUE(i));
1934 v = t4_read_reg(adap, TP_MTU_TABLE);
1935 mtus[i] = MTUVALUE_GET(v);
1936 if (mtu_log)
1937 mtu_log[i] = MTUWIDTH_GET(v);
1938 }
1939}
1940
1941/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001942 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1943 * @adap: the adapter
1944 * @addr: the indirect TP register address
1945 * @mask: specifies the field within the register to modify
1946 * @val: new value for the field
1947 *
1948 * Sets a field of an indirect TP register to the given value.
1949 */
1950void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1951 unsigned int mask, unsigned int val)
1952{
1953 t4_write_reg(adap, TP_PIO_ADDR, addr);
1954 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1955 t4_write_reg(adap, TP_PIO_DATA, val);
1956}
1957
1958/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001959 * init_cong_ctrl - initialize congestion control parameters
1960 * @a: the alpha values for congestion control
1961 * @b: the beta values for congestion control
1962 *
1963 * Initialize the congestion control parameters.
1964 */
1965static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1966{
1967 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1968 a[9] = 2;
1969 a[10] = 3;
1970 a[11] = 4;
1971 a[12] = 5;
1972 a[13] = 6;
1973 a[14] = 7;
1974 a[15] = 8;
1975 a[16] = 9;
1976 a[17] = 10;
1977 a[18] = 14;
1978 a[19] = 17;
1979 a[20] = 21;
1980 a[21] = 25;
1981 a[22] = 30;
1982 a[23] = 35;
1983 a[24] = 45;
1984 a[25] = 60;
1985 a[26] = 80;
1986 a[27] = 100;
1987 a[28] = 200;
1988 a[29] = 300;
1989 a[30] = 400;
1990 a[31] = 500;
1991
1992 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1993 b[9] = b[10] = 1;
1994 b[11] = b[12] = 2;
1995 b[13] = b[14] = b[15] = b[16] = 3;
1996 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1997 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1998 b[28] = b[29] = 6;
1999 b[30] = b[31] = 7;
2000}
2001
2002/* The minimum additive increment value for the congestion control table */
2003#define CC_MIN_INCR 2U
2004
2005/**
2006 * t4_load_mtus - write the MTU and congestion control HW tables
2007 * @adap: the adapter
2008 * @mtus: the values for the MTU table
2009 * @alpha: the values for the congestion control alpha parameter
2010 * @beta: the values for the congestion control beta parameter
2011 *
2012 * Write the HW MTU table with the supplied MTUs and the high-speed
2013 * congestion control table with the supplied alpha, beta, and MTUs.
2014 * We write the two tables together because the additive increments
2015 * depend on the MTUs.
2016 */
2017void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2018 const unsigned short *alpha, const unsigned short *beta)
2019{
2020 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2021 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2022 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2023 28672, 40960, 57344, 81920, 114688, 163840, 229376
2024 };
2025
2026 unsigned int i, w;
2027
2028 for (i = 0; i < NMTUS; ++i) {
2029 unsigned int mtu = mtus[i];
2030 unsigned int log2 = fls(mtu);
2031
2032 if (!(mtu & ((1 << log2) >> 2))) /* round */
2033 log2--;
2034 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2035 MTUWIDTH(log2) | MTUVALUE(mtu));
2036
2037 for (w = 0; w < NCCTRL_WIN; ++w) {
2038 unsigned int inc;
2039
2040 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2041 CC_MIN_INCR);
2042
2043 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2044 (w << 16) | (beta[w] << 13) | inc);
2045 }
2046 }
2047}
2048
2049/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002050 * get_mps_bg_map - return the buffer groups associated with a port
2051 * @adap: the adapter
2052 * @idx: the port index
2053 *
2054 * Returns a bitmap indicating which MPS buffer groups are associated
2055 * with the given port. Bit i is set if buffer group i is used by the
2056 * port.
2057 */
2058static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2059{
2060 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2061
2062 if (n == 0)
2063 return idx == 0 ? 0xf : 0;
2064 if (n == 1)
2065 return idx < 2 ? (3 << (2 * idx)) : 0;
2066 return 1 << idx;
2067}
2068
2069/**
2070 * t4_get_port_stats - collect port statistics
2071 * @adap: the adapter
2072 * @idx: the port index
2073 * @p: the stats structure to fill
2074 *
2075 * Collect statistics related to the given port from HW.
2076 */
2077void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2078{
2079 u32 bgmap = get_mps_bg_map(adap, idx);
2080
2081#define GET_STAT(name) \
2082 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2083#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2084
2085 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2086 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2087 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2088 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2089 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2090 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2091 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2092 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2093 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2094 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2095 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2096 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2097 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2098 p->tx_drop = GET_STAT(TX_PORT_DROP);
2099 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2100 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2101 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2102 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2103 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2104 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2105 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2106 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2107 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2108
2109 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2110 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2111 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2112 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2113 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2114 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2115 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2116 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2117 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2118 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2119 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2120 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2121 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2122 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2123 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2124 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2125 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2126 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2127 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2128 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2129 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2130 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2131 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2132 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2133 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2134 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2135 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2136
2137 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2138 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2139 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2140 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2141 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2142 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2143 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2144 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2145
2146#undef GET_STAT
2147#undef GET_STAT_COM
2148}
2149
2150/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002151 * t4_wol_magic_enable - enable/disable magic packet WoL
2152 * @adap: the adapter
2153 * @port: the physical port index
2154 * @addr: MAC address expected in magic packets, %NULL to disable
2155 *
2156 * Enables/disables magic packet wake-on-LAN for the selected port.
2157 */
2158void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2159 const u8 *addr)
2160{
2161 if (addr) {
2162 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2163 (addr[2] << 24) | (addr[3] << 16) |
2164 (addr[4] << 8) | addr[5]);
2165 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2166 (addr[0] << 8) | addr[1]);
2167 }
2168 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2169 addr ? MAGICEN : 0);
2170}
2171
2172/**
2173 * t4_wol_pat_enable - enable/disable pattern-based WoL
2174 * @adap: the adapter
2175 * @port: the physical port index
2176 * @map: bitmap of which HW pattern filters to set
2177 * @mask0: byte mask for bytes 0-63 of a packet
2178 * @mask1: byte mask for bytes 64-127 of a packet
2179 * @crc: Ethernet CRC for selected bytes
2180 * @enable: enable/disable switch
2181 *
2182 * Sets the pattern filters indicated in @map to mask out the bytes
2183 * specified in @mask0/@mask1 in received packets and compare the CRC of
2184 * the resulting packet against @crc. If @enable is %true pattern-based
2185 * WoL is enabled, otherwise disabled.
2186 */
2187int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2188 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2189{
2190 int i;
2191
2192 if (!enable) {
2193 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2194 PATEN, 0);
2195 return 0;
2196 }
2197 if (map > 0xff)
2198 return -EINVAL;
2199
2200#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2201
2202 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2203 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2204 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2205
2206 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2207 if (!(map & 1))
2208 continue;
2209
2210 /* write byte masks */
2211 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2212 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2213 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2214 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2215 return -ETIMEDOUT;
2216
2217 /* write CRC */
2218 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2219 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2220 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2221 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2222 return -ETIMEDOUT;
2223 }
2224#undef EPIO_REG
2225
2226 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2227 return 0;
2228}
2229
2230#define INIT_CMD(var, cmd, rd_wr) do { \
2231 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2232 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2233 (var).retval_len16 = htonl(FW_LEN16(var)); \
2234} while (0)
2235
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302236int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2237 u32 addr, u32 val)
2238{
2239 struct fw_ldst_cmd c;
2240
2241 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002242 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2243 FW_CMD_WRITE |
2244 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302245 c.cycles_to_len16 = htonl(FW_LEN16(c));
2246 c.u.addrval.addr = htonl(addr);
2247 c.u.addrval.val = htonl(val);
2248
2249 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2250}
2251
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002252/**
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302253 * t4_mem_win_read_len - read memory through PCIE memory window
2254 * @adap: the adapter
2255 * @addr: address of first byte requested aligned on 32b.
2256 * @data: len bytes to hold the data read
2257 * @len: amount of data to read from window. Must be <=
2258 * MEMWIN0_APERATURE after adjusting for 16B alignment
2259 * requirements of the the memory window.
2260 *
2261 * Read len bytes of data from MC starting at @addr.
2262 */
2263int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2264{
2265 int i;
2266 int off;
2267
2268 /*
2269 * Align on a 16B boundary.
2270 */
2271 off = addr & 15;
2272 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2273 return -EINVAL;
2274
Vipul Pandya840f3002012-09-05 02:01:55 +00002275 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2276 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302277
2278 for (i = 0; i < len; i += 4)
2279 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2280
2281 return 0;
2282}
2283
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002284/**
2285 * t4_mdio_rd - read a PHY register through MDIO
2286 * @adap: the adapter
2287 * @mbox: mailbox to use for the FW command
2288 * @phy_addr: the PHY address
2289 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2290 * @reg: the register to read
2291 * @valp: where to store the value
2292 *
2293 * Issues a FW command through the given mailbox to read a PHY register.
2294 */
2295int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2296 unsigned int mmd, unsigned int reg, u16 *valp)
2297{
2298 int ret;
2299 struct fw_ldst_cmd c;
2300
2301 memset(&c, 0, sizeof(c));
2302 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2303 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2304 c.cycles_to_len16 = htonl(FW_LEN16(c));
2305 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2306 FW_LDST_CMD_MMD(mmd));
2307 c.u.mdio.raddr = htons(reg);
2308
2309 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2310 if (ret == 0)
2311 *valp = ntohs(c.u.mdio.rval);
2312 return ret;
2313}
2314
2315/**
2316 * t4_mdio_wr - write a PHY register through MDIO
2317 * @adap: the adapter
2318 * @mbox: mailbox to use for the FW command
2319 * @phy_addr: the PHY address
2320 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2321 * @reg: the register to write
2322 * @valp: value to write
2323 *
2324 * Issues a FW command through the given mailbox to write a PHY register.
2325 */
2326int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2327 unsigned int mmd, unsigned int reg, u16 val)
2328{
2329 struct fw_ldst_cmd c;
2330
2331 memset(&c, 0, sizeof(c));
2332 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2333 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2334 c.cycles_to_len16 = htonl(FW_LEN16(c));
2335 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2336 FW_LDST_CMD_MMD(mmd));
2337 c.u.mdio.raddr = htons(reg);
2338 c.u.mdio.rval = htons(val);
2339
2340 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2341}
2342
2343/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002344 * t4_fw_hello - establish communication with FW
2345 * @adap: the adapter
2346 * @mbox: mailbox to use for the FW command
2347 * @evt_mbox: mailbox to receive async FW events
2348 * @master: specifies the caller's willingness to be the device master
2349 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002350 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002351 * Issues a command to establish communication with FW. Returns either
2352 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002353 */
2354int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2355 enum dev_master master, enum dev_state *state)
2356{
2357 int ret;
2358 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002359 u32 v;
2360 unsigned int master_mbox;
2361 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002362
Vipul Pandya636f9d32012-09-26 02:39:39 +00002363retry:
2364 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002365 INIT_CMD(c, HELLO, WRITE);
2366 c.err_to_mbasyncnot = htonl(
2367 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2368 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002369 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2370 FW_HELLO_CMD_MBMASTER_MASK) |
2371 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2372 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2373 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002374
Vipul Pandya636f9d32012-09-26 02:39:39 +00002375 /*
2376 * Issue the HELLO command to the firmware. If it's not successful
2377 * but indicates that we got a "busy" or "timeout" condition, retry
2378 * the HELLO until we exhaust our retry limit.
2379 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002380 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002381 if (ret < 0) {
2382 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2383 goto retry;
2384 return ret;
2385 }
2386
2387 v = ntohl(c.err_to_mbasyncnot);
2388 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2389 if (state) {
2390 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002391 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002392 else if (v & FW_HELLO_CMD_INIT)
2393 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002394 else
2395 *state = DEV_STATE_UNINIT;
2396 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002397
2398 /*
2399 * If we're not the Master PF then we need to wait around for the
2400 * Master PF Driver to finish setting up the adapter.
2401 *
2402 * Note that we also do this wait if we're a non-Master-capable PF and
2403 * there is no current Master PF; a Master PF may show up momentarily
2404 * and we wouldn't want to fail pointlessly. (This can happen when an
2405 * OS loads lots of different drivers rapidly at the same time). In
2406 * this case, the Master PF returned by the firmware will be
2407 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2408 */
2409 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2410 master_mbox != mbox) {
2411 int waiting = FW_CMD_HELLO_TIMEOUT;
2412
2413 /*
2414 * Wait for the firmware to either indicate an error or
2415 * initialized state. If we see either of these we bail out
2416 * and report the issue to the caller. If we exhaust the
2417 * "hello timeout" and we haven't exhausted our retries, try
2418 * again. Otherwise bail with a timeout error.
2419 */
2420 for (;;) {
2421 u32 pcie_fw;
2422
2423 msleep(50);
2424 waiting -= 50;
2425
2426 /*
2427 * If neither Error nor Initialialized are indicated
2428 * by the firmware keep waiting till we exaust our
2429 * timeout ... and then retry if we haven't exhausted
2430 * our retries ...
2431 */
2432 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2433 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2434 if (waiting <= 0) {
2435 if (retries-- > 0)
2436 goto retry;
2437
2438 return -ETIMEDOUT;
2439 }
2440 continue;
2441 }
2442
2443 /*
2444 * We either have an Error or Initialized condition
2445 * report errors preferentially.
2446 */
2447 if (state) {
2448 if (pcie_fw & FW_PCIE_FW_ERR)
2449 *state = DEV_STATE_ERR;
2450 else if (pcie_fw & FW_PCIE_FW_INIT)
2451 *state = DEV_STATE_INIT;
2452 }
2453
2454 /*
2455 * If we arrived before a Master PF was selected and
2456 * there's not a valid Master PF, grab its identity
2457 * for our caller.
2458 */
2459 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2460 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2461 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2462 break;
2463 }
2464 }
2465
2466 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002467}
2468
2469/**
2470 * t4_fw_bye - end communication with FW
2471 * @adap: the adapter
2472 * @mbox: mailbox to use for the FW command
2473 *
2474 * Issues a command to terminate communication with FW.
2475 */
2476int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2477{
2478 struct fw_bye_cmd c;
2479
2480 INIT_CMD(c, BYE, WRITE);
2481 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2482}
2483
2484/**
2485 * t4_init_cmd - ask FW to initialize the device
2486 * @adap: the adapter
2487 * @mbox: mailbox to use for the FW command
2488 *
2489 * Issues a command to FW to partially initialize the device. This
2490 * performs initialization that generally doesn't depend on user input.
2491 */
2492int t4_early_init(struct adapter *adap, unsigned int mbox)
2493{
2494 struct fw_initialize_cmd c;
2495
2496 INIT_CMD(c, INITIALIZE, WRITE);
2497 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2498}
2499
2500/**
2501 * t4_fw_reset - issue a reset to FW
2502 * @adap: the adapter
2503 * @mbox: mailbox to use for the FW command
2504 * @reset: specifies the type of reset to perform
2505 *
2506 * Issues a reset command of the specified type to FW.
2507 */
2508int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2509{
2510 struct fw_reset_cmd c;
2511
2512 INIT_CMD(c, RESET, WRITE);
2513 c.val = htonl(reset);
2514 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2515}
2516
2517/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002518 * t4_fw_config_file - setup an adapter via a Configuration File
2519 * @adap: the adapter
2520 * @mbox: mailbox to use for the FW command
2521 * @mtype: the memory type where the Configuration File is located
2522 * @maddr: the memory address where the Configuration File is located
2523 * @finiver: return value for CF [fini] version
2524 * @finicsum: return value for CF [fini] checksum
2525 * @cfcsum: return value for CF computed checksum
2526 *
2527 * Issue a command to get the firmware to process the Configuration
2528 * File located at the specified mtype/maddress. If the Configuration
2529 * File is processed successfully and return value pointers are
2530 * provided, the Configuration File "[fini] section version and
2531 * checksum values will be returned along with the computed checksum.
2532 * It's up to the caller to decide how it wants to respond to the
2533 * checksums not matching but it recommended that a prominant warning
2534 * be emitted in order to help people rapidly identify changed or
2535 * corrupted Configuration Files.
2536 *
2537 * Also note that it's possible to modify things like "niccaps",
2538 * "toecaps",etc. between processing the Configuration File and telling
2539 * the firmware to use the new configuration. Callers which want to
2540 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2541 * Configuration Files if they want to do this.
2542 */
2543int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2544 unsigned int mtype, unsigned int maddr,
2545 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2546{
2547 struct fw_caps_config_cmd caps_cmd;
2548 int ret;
2549
2550 /*
2551 * Tell the firmware to process the indicated Configuration File.
2552 * If there are no errors and the caller has provided return value
2553 * pointers for the [fini] section version, checksum and computed
2554 * checksum, pass those back to the caller.
2555 */
2556 memset(&caps_cmd, 0, sizeof(caps_cmd));
2557 caps_cmd.op_to_write =
2558 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2559 FW_CMD_REQUEST |
2560 FW_CMD_READ);
2561 caps_cmd.retval_len16 =
2562 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2563 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2564 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2565 FW_LEN16(caps_cmd));
2566 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2567 if (ret < 0)
2568 return ret;
2569
2570 if (finiver)
2571 *finiver = ntohl(caps_cmd.finiver);
2572 if (finicsum)
2573 *finicsum = ntohl(caps_cmd.finicsum);
2574 if (cfcsum)
2575 *cfcsum = ntohl(caps_cmd.cfcsum);
2576
2577 /*
2578 * And now tell the firmware to use the configuration we just loaded.
2579 */
2580 caps_cmd.op_to_write =
2581 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2582 FW_CMD_REQUEST |
2583 FW_CMD_WRITE);
2584 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2585 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2586}
2587
2588/**
2589 * t4_fixup_host_params - fix up host-dependent parameters
2590 * @adap: the adapter
2591 * @page_size: the host's Base Page Size
2592 * @cache_line_size: the host's Cache Line Size
2593 *
2594 * Various registers in T4 contain values which are dependent on the
2595 * host's Base Page and Cache Line Sizes. This function will fix all of
2596 * those registers with the appropriate values as passed in ...
2597 */
2598int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2599 unsigned int cache_line_size)
2600{
2601 unsigned int page_shift = fls(page_size) - 1;
2602 unsigned int sge_hps = page_shift - 10;
2603 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2604 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2605 unsigned int fl_align_log = fls(fl_align) - 1;
2606
2607 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2608 HOSTPAGESIZEPF0(sge_hps) |
2609 HOSTPAGESIZEPF1(sge_hps) |
2610 HOSTPAGESIZEPF2(sge_hps) |
2611 HOSTPAGESIZEPF3(sge_hps) |
2612 HOSTPAGESIZEPF4(sge_hps) |
2613 HOSTPAGESIZEPF5(sge_hps) |
2614 HOSTPAGESIZEPF6(sge_hps) |
2615 HOSTPAGESIZEPF7(sge_hps));
2616
2617 t4_set_reg_field(adap, SGE_CONTROL,
2618 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2619 EGRSTATUSPAGESIZE_MASK,
2620 INGPADBOUNDARY(fl_align_log - 5) |
2621 EGRSTATUSPAGESIZE(stat_len != 64));
2622
2623 /*
2624 * Adjust various SGE Free List Host Buffer Sizes.
2625 *
2626 * This is something of a crock since we're using fixed indices into
2627 * the array which are also known by the sge.c code and the T4
2628 * Firmware Configuration File. We need to come up with a much better
2629 * approach to managing this array. For now, the first four entries
2630 * are:
2631 *
2632 * 0: Host Page Size
2633 * 1: 64KB
2634 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2635 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2636 *
2637 * For the single-MTU buffers in unpacked mode we need to include
2638 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2639 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2640 * Padding boundry. All of these are accommodated in the Factory
2641 * Default Firmware Configuration File but we need to adjust it for
2642 * this host's cache line size.
2643 */
2644 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2645 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2646 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2647 & ~(fl_align-1));
2648 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2649 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2650 & ~(fl_align-1));
2651
2652 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2653
2654 return 0;
2655}
2656
2657/**
2658 * t4_fw_initialize - ask FW to initialize the device
2659 * @adap: the adapter
2660 * @mbox: mailbox to use for the FW command
2661 *
2662 * Issues a command to FW to partially initialize the device. This
2663 * performs initialization that generally doesn't depend on user input.
2664 */
2665int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2666{
2667 struct fw_initialize_cmd c;
2668
2669 memset(&c, 0, sizeof(c));
2670 INIT_CMD(c, INITIALIZE, WRITE);
2671 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2672}
2673
2674/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002675 * t4_query_params - query FW or device parameters
2676 * @adap: the adapter
2677 * @mbox: mailbox to use for the FW command
2678 * @pf: the PF
2679 * @vf: the VF
2680 * @nparams: the number of parameters
2681 * @params: the parameter names
2682 * @val: the parameter values
2683 *
2684 * Reads the value of FW or device parameters. Up to 7 parameters can be
2685 * queried at once.
2686 */
2687int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2688 unsigned int vf, unsigned int nparams, const u32 *params,
2689 u32 *val)
2690{
2691 int i, ret;
2692 struct fw_params_cmd c;
2693 __be32 *p = &c.param[0].mnem;
2694
2695 if (nparams > 7)
2696 return -EINVAL;
2697
2698 memset(&c, 0, sizeof(c));
2699 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2700 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2701 FW_PARAMS_CMD_VFN(vf));
2702 c.retval_len16 = htonl(FW_LEN16(c));
2703 for (i = 0; i < nparams; i++, p += 2)
2704 *p = htonl(*params++);
2705
2706 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2707 if (ret == 0)
2708 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2709 *val++ = ntohl(*p);
2710 return ret;
2711}
2712
2713/**
2714 * t4_set_params - sets FW or device parameters
2715 * @adap: the adapter
2716 * @mbox: mailbox to use for the FW command
2717 * @pf: the PF
2718 * @vf: the VF
2719 * @nparams: the number of parameters
2720 * @params: the parameter names
2721 * @val: the parameter values
2722 *
2723 * Sets the value of FW or device parameters. Up to 7 parameters can be
2724 * specified at once.
2725 */
2726int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2727 unsigned int vf, unsigned int nparams, const u32 *params,
2728 const u32 *val)
2729{
2730 struct fw_params_cmd c;
2731 __be32 *p = &c.param[0].mnem;
2732
2733 if (nparams > 7)
2734 return -EINVAL;
2735
2736 memset(&c, 0, sizeof(c));
2737 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2738 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2739 FW_PARAMS_CMD_VFN(vf));
2740 c.retval_len16 = htonl(FW_LEN16(c));
2741 while (nparams--) {
2742 *p++ = htonl(*params++);
2743 *p++ = htonl(*val++);
2744 }
2745
2746 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2747}
2748
2749/**
2750 * t4_cfg_pfvf - configure PF/VF resource limits
2751 * @adap: the adapter
2752 * @mbox: mailbox to use for the FW command
2753 * @pf: the PF being configured
2754 * @vf: the VF being configured
2755 * @txq: the max number of egress queues
2756 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2757 * @rxqi: the max number of interrupt-capable ingress queues
2758 * @rxq: the max number of interruptless ingress queues
2759 * @tc: the PCI traffic class
2760 * @vi: the max number of virtual interfaces
2761 * @cmask: the channel access rights mask for the PF/VF
2762 * @pmask: the port access rights mask for the PF/VF
2763 * @nexact: the maximum number of exact MPS filters
2764 * @rcaps: read capabilities
2765 * @wxcaps: write/execute capabilities
2766 *
2767 * Configures resource limits and capabilities for a physical or virtual
2768 * function.
2769 */
2770int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2771 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2772 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2773 unsigned int vi, unsigned int cmask, unsigned int pmask,
2774 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2775{
2776 struct fw_pfvf_cmd c;
2777
2778 memset(&c, 0, sizeof(c));
2779 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2780 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2781 FW_PFVF_CMD_VFN(vf));
2782 c.retval_len16 = htonl(FW_LEN16(c));
2783 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2784 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002785 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002786 FW_PFVF_CMD_PMASK(pmask) |
2787 FW_PFVF_CMD_NEQ(txq));
2788 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2789 FW_PFVF_CMD_NEXACTF(nexact));
2790 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2791 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2792 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2793 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2794}
2795
2796/**
2797 * t4_alloc_vi - allocate a virtual interface
2798 * @adap: the adapter
2799 * @mbox: mailbox to use for the FW command
2800 * @port: physical port associated with the VI
2801 * @pf: the PF owning the VI
2802 * @vf: the VF owning the VI
2803 * @nmac: number of MAC addresses needed (1 to 5)
2804 * @mac: the MAC addresses of the VI
2805 * @rss_size: size of RSS table slice associated with this VI
2806 *
2807 * Allocates a virtual interface for the given physical port. If @mac is
2808 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2809 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2810 * stored consecutively so the space needed is @nmac * 6 bytes.
2811 * Returns a negative error number or the non-negative VI id.
2812 */
2813int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2814 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2815 unsigned int *rss_size)
2816{
2817 int ret;
2818 struct fw_vi_cmd c;
2819
2820 memset(&c, 0, sizeof(c));
2821 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2822 FW_CMD_WRITE | FW_CMD_EXEC |
2823 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2824 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2825 c.portid_pkd = FW_VI_CMD_PORTID(port);
2826 c.nmac = nmac - 1;
2827
2828 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2829 if (ret)
2830 return ret;
2831
2832 if (mac) {
2833 memcpy(mac, c.mac, sizeof(c.mac));
2834 switch (nmac) {
2835 case 5:
2836 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2837 case 4:
2838 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2839 case 3:
2840 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2841 case 2:
2842 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2843 }
2844 }
2845 if (rss_size)
2846 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00002847 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002848}
2849
2850/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002851 * t4_set_rxmode - set Rx properties of a virtual interface
2852 * @adap: the adapter
2853 * @mbox: mailbox to use for the FW command
2854 * @viid: the VI id
2855 * @mtu: the new MTU or -1
2856 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2857 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2858 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002859 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002860 * @sleep_ok: if true we may sleep while awaiting command completion
2861 *
2862 * Sets Rx properties of a virtual interface.
2863 */
2864int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002865 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2866 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002867{
2868 struct fw_vi_rxmode_cmd c;
2869
2870 /* convert to FW values */
2871 if (mtu < 0)
2872 mtu = FW_RXMODE_MTU_NO_CHG;
2873 if (promisc < 0)
2874 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2875 if (all_multi < 0)
2876 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2877 if (bcast < 0)
2878 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002879 if (vlanex < 0)
2880 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002881
2882 memset(&c, 0, sizeof(c));
2883 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2884 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2885 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002886 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2887 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2888 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2889 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2890 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002891 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2892}
2893
2894/**
2895 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2896 * @adap: the adapter
2897 * @mbox: mailbox to use for the FW command
2898 * @viid: the VI id
2899 * @free: if true any existing filters for this VI id are first removed
2900 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2901 * @addr: the MAC address(es)
2902 * @idx: where to store the index of each allocated filter
2903 * @hash: pointer to hash address filter bitmap
2904 * @sleep_ok: call is allowed to sleep
2905 *
2906 * Allocates an exact-match filter for each of the supplied addresses and
2907 * sets it to the corresponding address. If @idx is not %NULL it should
2908 * have at least @naddr entries, each of which will be set to the index of
2909 * the filter allocated for the corresponding MAC address. If a filter
2910 * could not be allocated for an address its index is set to 0xffff.
2911 * If @hash is not %NULL addresses that fail to allocate an exact filter
2912 * are hashed and update the hash filter bitmap pointed at by @hash.
2913 *
2914 * Returns a negative error number or the number of filters allocated.
2915 */
2916int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2917 unsigned int viid, bool free, unsigned int naddr,
2918 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2919{
2920 int i, ret;
2921 struct fw_vi_mac_cmd c;
2922 struct fw_vi_mac_exact *p;
2923
2924 if (naddr > 7)
2925 return -EINVAL;
2926
2927 memset(&c, 0, sizeof(c));
2928 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2929 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2930 FW_VI_MAC_CMD_VIID(viid));
2931 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2932 FW_CMD_LEN16((naddr + 2) / 2));
2933
2934 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2935 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2936 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2937 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2938 }
2939
2940 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2941 if (ret)
2942 return ret;
2943
2944 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2945 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2946
2947 if (idx)
2948 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2949 if (index < NEXACT_MAC)
2950 ret++;
2951 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00002952 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002953 }
2954 return ret;
2955}
2956
2957/**
2958 * t4_change_mac - modifies the exact-match filter for a MAC address
2959 * @adap: the adapter
2960 * @mbox: mailbox to use for the FW command
2961 * @viid: the VI id
2962 * @idx: index of existing filter for old value of MAC address, or -1
2963 * @addr: the new MAC address value
2964 * @persist: whether a new MAC allocation should be persistent
2965 * @add_smt: if true also add the address to the HW SMT
2966 *
2967 * Modifies an exact-match filter and sets it to the new MAC address.
2968 * Note that in general it is not possible to modify the value of a given
2969 * filter so the generic way to modify an address filter is to free the one
2970 * being used by the old address value and allocate a new filter for the
2971 * new address value. @idx can be -1 if the address is a new addition.
2972 *
2973 * Returns a negative error number or the index of the filter with the new
2974 * MAC value.
2975 */
2976int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2977 int idx, const u8 *addr, bool persist, bool add_smt)
2978{
2979 int ret, mode;
2980 struct fw_vi_mac_cmd c;
2981 struct fw_vi_mac_exact *p = c.u.exact;
2982
2983 if (idx < 0) /* new allocation */
2984 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2985 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2986
2987 memset(&c, 0, sizeof(c));
2988 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2989 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2990 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2991 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2992 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2993 FW_VI_MAC_CMD_IDX(idx));
2994 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2995
2996 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2997 if (ret == 0) {
2998 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2999 if (ret >= NEXACT_MAC)
3000 ret = -ENOMEM;
3001 }
3002 return ret;
3003}
3004
3005/**
3006 * t4_set_addr_hash - program the MAC inexact-match hash filter
3007 * @adap: the adapter
3008 * @mbox: mailbox to use for the FW command
3009 * @viid: the VI id
3010 * @ucast: whether the hash filter should also match unicast addresses
3011 * @vec: the value to be written to the hash filter
3012 * @sleep_ok: call is allowed to sleep
3013 *
3014 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3015 */
3016int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3017 bool ucast, u64 vec, bool sleep_ok)
3018{
3019 struct fw_vi_mac_cmd c;
3020
3021 memset(&c, 0, sizeof(c));
3022 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3023 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3024 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3025 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3026 FW_CMD_LEN16(1));
3027 c.u.hash.hashvec = cpu_to_be64(vec);
3028 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3029}
3030
3031/**
3032 * t4_enable_vi - enable/disable a virtual interface
3033 * @adap: the adapter
3034 * @mbox: mailbox to use for the FW command
3035 * @viid: the VI id
3036 * @rx_en: 1=enable Rx, 0=disable Rx
3037 * @tx_en: 1=enable Tx, 0=disable Tx
3038 *
3039 * Enables/disables a virtual interface.
3040 */
3041int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3042 bool rx_en, bool tx_en)
3043{
3044 struct fw_vi_enable_cmd c;
3045
3046 memset(&c, 0, sizeof(c));
3047 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3048 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3049 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3050 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3051 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3052}
3053
3054/**
3055 * t4_identify_port - identify a VI's port by blinking its LED
3056 * @adap: the adapter
3057 * @mbox: mailbox to use for the FW command
3058 * @viid: the VI id
3059 * @nblinks: how many times to blink LED at 2.5 Hz
3060 *
3061 * Identifies a VI's port by blinking its LED.
3062 */
3063int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3064 unsigned int nblinks)
3065{
3066 struct fw_vi_enable_cmd c;
3067
3068 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3069 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3070 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3071 c.blinkdur = htons(nblinks);
3072 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3073}
3074
3075/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003076 * t4_iq_free - free an ingress queue and its FLs
3077 * @adap: the adapter
3078 * @mbox: mailbox to use for the FW command
3079 * @pf: the PF owning the queues
3080 * @vf: the VF owning the queues
3081 * @iqtype: the ingress queue type
3082 * @iqid: ingress queue id
3083 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3084 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3085 *
3086 * Frees an ingress queue and its associated FLs, if any.
3087 */
3088int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3089 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3090 unsigned int fl0id, unsigned int fl1id)
3091{
3092 struct fw_iq_cmd c;
3093
3094 memset(&c, 0, sizeof(c));
3095 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3096 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3097 FW_IQ_CMD_VFN(vf));
3098 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3099 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3100 c.iqid = htons(iqid);
3101 c.fl0id = htons(fl0id);
3102 c.fl1id = htons(fl1id);
3103 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3104}
3105
3106/**
3107 * t4_eth_eq_free - free an Ethernet egress queue
3108 * @adap: the adapter
3109 * @mbox: mailbox to use for the FW command
3110 * @pf: the PF owning the queue
3111 * @vf: the VF owning the queue
3112 * @eqid: egress queue id
3113 *
3114 * Frees an Ethernet egress queue.
3115 */
3116int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3117 unsigned int vf, unsigned int eqid)
3118{
3119 struct fw_eq_eth_cmd c;
3120
3121 memset(&c, 0, sizeof(c));
3122 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3123 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3124 FW_EQ_ETH_CMD_VFN(vf));
3125 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3126 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3127 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3128}
3129
3130/**
3131 * t4_ctrl_eq_free - free a control egress queue
3132 * @adap: the adapter
3133 * @mbox: mailbox to use for the FW command
3134 * @pf: the PF owning the queue
3135 * @vf: the VF owning the queue
3136 * @eqid: egress queue id
3137 *
3138 * Frees a control egress queue.
3139 */
3140int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3141 unsigned int vf, unsigned int eqid)
3142{
3143 struct fw_eq_ctrl_cmd c;
3144
3145 memset(&c, 0, sizeof(c));
3146 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3147 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3148 FW_EQ_CTRL_CMD_VFN(vf));
3149 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3150 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3151 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3152}
3153
3154/**
3155 * t4_ofld_eq_free - free an offload egress queue
3156 * @adap: the adapter
3157 * @mbox: mailbox to use for the FW command
3158 * @pf: the PF owning the queue
3159 * @vf: the VF owning the queue
3160 * @eqid: egress queue id
3161 *
3162 * Frees a control egress queue.
3163 */
3164int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3165 unsigned int vf, unsigned int eqid)
3166{
3167 struct fw_eq_ofld_cmd c;
3168
3169 memset(&c, 0, sizeof(c));
3170 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3171 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3172 FW_EQ_OFLD_CMD_VFN(vf));
3173 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3174 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3175 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3176}
3177
3178/**
3179 * t4_handle_fw_rpl - process a FW reply message
3180 * @adap: the adapter
3181 * @rpl: start of the FW message
3182 *
3183 * Processes a FW message, such as link state change messages.
3184 */
3185int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3186{
3187 u8 opcode = *(const u8 *)rpl;
3188
3189 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3190 int speed = 0, fc = 0;
3191 const struct fw_port_cmd *p = (void *)rpl;
3192 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3193 int port = adap->chan_map[chan];
3194 struct port_info *pi = adap2pinfo(adap, port);
3195 struct link_config *lc = &pi->link_cfg;
3196 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3197 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3198 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3199
3200 if (stat & FW_PORT_CMD_RXPAUSE)
3201 fc |= PAUSE_RX;
3202 if (stat & FW_PORT_CMD_TXPAUSE)
3203 fc |= PAUSE_TX;
3204 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3205 speed = SPEED_100;
3206 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3207 speed = SPEED_1000;
3208 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3209 speed = SPEED_10000;
3210
3211 if (link_ok != lc->link_ok || speed != lc->speed ||
3212 fc != lc->fc) { /* something changed */
3213 lc->link_ok = link_ok;
3214 lc->speed = speed;
3215 lc->fc = fc;
3216 t4_os_link_changed(adap, port, link_ok);
3217 }
3218 if (mod != pi->mod_type) {
3219 pi->mod_type = mod;
3220 t4_os_portmod_changed(adap, port);
3221 }
3222 }
3223 return 0;
3224}
3225
3226static void __devinit get_pci_mode(struct adapter *adapter,
3227 struct pci_params *p)
3228{
3229 u16 val;
3230 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3231
3232 if (pcie_cap) {
3233 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3234 &val);
3235 p->speed = val & PCI_EXP_LNKSTA_CLS;
3236 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3237 }
3238}
3239
3240/**
3241 * init_link_config - initialize a link's SW state
3242 * @lc: structure holding the link state
3243 * @caps: link capabilities
3244 *
3245 * Initializes the SW state maintained for each link, including the link's
3246 * capabilities and default speed/flow-control/autonegotiation settings.
3247 */
3248static void __devinit init_link_config(struct link_config *lc,
3249 unsigned int caps)
3250{
3251 lc->supported = caps;
3252 lc->requested_speed = 0;
3253 lc->speed = 0;
3254 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3255 if (lc->supported & FW_PORT_CAP_ANEG) {
3256 lc->advertising = lc->supported & ADVERT_MASK;
3257 lc->autoneg = AUTONEG_ENABLE;
3258 lc->requested_fc |= PAUSE_AUTONEG;
3259 } else {
3260 lc->advertising = 0;
3261 lc->autoneg = AUTONEG_DISABLE;
3262 }
3263}
3264
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003265int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003266{
3267 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3268 return 0;
3269 msleep(500);
3270 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3271}
3272
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003273static int __devinit get_flash_params(struct adapter *adap)
3274{
3275 int ret;
3276 u32 info;
3277
3278 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3279 if (!ret)
3280 ret = sf1_read(adap, 3, 0, 1, &info);
3281 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3282 if (ret)
3283 return ret;
3284
3285 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3286 return -EINVAL;
3287 info >>= 16; /* log2 of size */
3288 if (info >= 0x14 && info < 0x18)
3289 adap->params.sf_nsec = 1 << (info - 16);
3290 else if (info == 0x18)
3291 adap->params.sf_nsec = 64;
3292 else
3293 return -EINVAL;
3294 adap->params.sf_size = 1 << info;
3295 adap->params.sf_fw_start =
3296 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3297 return 0;
3298}
3299
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003300/**
3301 * t4_prep_adapter - prepare SW and HW for operation
3302 * @adapter: the adapter
3303 * @reset: if true perform a HW reset
3304 *
3305 * Initialize adapter SW state for the various HW modules, set initial
3306 * values for some adapter tunables, take PHYs out of reset, and
3307 * initialize the MDIO interface.
3308 */
3309int __devinit t4_prep_adapter(struct adapter *adapter)
3310{
3311 int ret;
3312
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003313 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003314 if (ret < 0)
3315 return ret;
3316
3317 get_pci_mode(adapter, &adapter->params.pci);
3318 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3319
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003320 ret = get_flash_params(adapter);
3321 if (ret < 0) {
3322 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3323 return ret;
3324 }
3325
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003326 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3327
3328 /*
3329 * Default port for debugging in case we can't reach FW.
3330 */
3331 adapter->params.nports = 1;
3332 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003333 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003334 return 0;
3335}
3336
3337int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3338{
3339 u8 addr[6];
3340 int ret, i, j = 0;
3341 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003342 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003343
3344 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003345 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003346
3347 for_each_port(adap, i) {
3348 unsigned int rss_size;
3349 struct port_info *p = adap2pinfo(adap, i);
3350
3351 while ((adap->params.portvec & (1 << j)) == 0)
3352 j++;
3353
3354 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3355 FW_CMD_REQUEST | FW_CMD_READ |
3356 FW_PORT_CMD_PORTID(j));
3357 c.action_to_len16 = htonl(
3358 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3359 FW_LEN16(c));
3360 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3361 if (ret)
3362 return ret;
3363
3364 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3365 if (ret < 0)
3366 return ret;
3367
3368 p->viid = ret;
3369 p->tx_chan = j;
3370 p->lport = j;
3371 p->rss_size = rss_size;
3372 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3373 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00003374 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003375
3376 ret = ntohl(c.u.info.lstatus_to_modtype);
3377 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3378 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3379 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003380 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003381
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003382 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3383 FW_CMD_REQUEST | FW_CMD_READ |
3384 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3385 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3386 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3387 if (ret)
3388 return ret;
3389 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3390
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003391 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3392 j++;
3393 }
3394 return 0;
3395}