blob: a9d9d74e4f092f969c2d58eaf01455b7bca7e257 [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000035#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
40/**
41 * t4_wait_op_done_val - wait until an operation is completed
42 * @adapter: the adapter performing the operation
43 * @reg: the register to check for completion
44 * @mask: a single-bit field within @reg that indicates completion
45 * @polarity: the value of the field when the operation is completed
46 * @attempts: number of check iterations
47 * @delay: delay in usecs between iterations
48 * @valp: where to store the value of the register at completion time
49 *
50 * Wait until an operation is completed by checking a bit in a register
51 * up to @attempts times. If @valp is not NULL the value of the register
52 * at the time it indicated completion is stored there. Returns 0 if the
53 * operation completes and -EAGAIN otherwise.
54 */
Roland Dreierde498c82010-04-21 08:59:17 +000055static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000057{
58 while (1) {
59 u32 val = t4_read_reg(adapter, reg);
60
61 if (!!(val & mask) == polarity) {
62 if (valp)
63 *valp = val;
64 return 0;
65 }
66 if (--attempts == 0)
67 return -EAGAIN;
68 if (delay)
69 udelay(delay);
70 }
71}
72
73static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
74 int polarity, int attempts, int delay)
75{
76 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
77 delay, NULL);
78}
79
80/**
81 * t4_set_reg_field - set a register field to a value
82 * @adapter: the adapter to program
83 * @addr: the register address
84 * @mask: specifies the portion of the register to modify
85 * @val: the new value for the register field
86 *
87 * Sets a register field specified by the supplied mask to the
88 * given value.
89 */
90void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 u32 val)
92{
93 u32 v = t4_read_reg(adapter, addr) & ~mask;
94
95 t4_write_reg(adapter, addr, v | val);
96 (void) t4_read_reg(adapter, addr); /* flush */
97}
98
99/**
100 * t4_read_indirect - read indirectly addressed registers
101 * @adap: the adapter
102 * @addr_reg: register holding the indirect address
103 * @data_reg: register holding the value of the indirect register
104 * @vals: where the read register values are stored
105 * @nregs: how many indirect registers to read
106 * @start_idx: index of first indirect register to read
107 *
108 * Reads registers that are accessed indirectly through an address/data
109 * register pair.
110 */
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000111void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
Roland Dreierde498c82010-04-21 08:59:17 +0000112 unsigned int data_reg, u32 *vals,
113 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000114{
115 while (nregs--) {
116 t4_write_reg(adap, addr_reg, start_idx);
117 *vals++ = t4_read_reg(adap, data_reg);
118 start_idx++;
119 }
120}
121
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000122/**
123 * t4_write_indirect - write indirectly addressed registers
124 * @adap: the adapter
125 * @addr_reg: register holding the indirect addresses
126 * @data_reg: register holding the value for the indirect registers
127 * @vals: values to write
128 * @nregs: how many indirect registers to write
129 * @start_idx: address of first indirect register to write
130 *
131 * Writes a sequential block of registers that are accessed indirectly
132 * through an address/data register pair.
133 */
134void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
135 unsigned int data_reg, const u32 *vals,
136 unsigned int nregs, unsigned int start_idx)
137{
138 while (nregs--) {
139 t4_write_reg(adap, addr_reg, start_idx++);
140 t4_write_reg(adap, data_reg, *vals++);
141 }
142}
143
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000144/*
Hariprasad Shenai0abfd152014-06-27 19:23:48 +0530145 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
146 * mechanism. This guarantees that we get the real value even if we're
147 * operating within a Virtual Machine and the Hypervisor is trapping our
148 * Configuration Space accesses.
149 */
150void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151{
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
153
154 if (is_t4(adap->params.chip))
155 req |= F_LOCALCFG;
156
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
159
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().)
164 */
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
166}
167
168/*
Hariprasad Shenai31d55c22014-09-01 19:54:58 +0530169 * t4_report_fw_error - report firmware error
170 * @adap: the adapter
171 *
172 * The adapter firmware can indicate error conditions to the host.
173 * If the firmware has indicated an error, print out the reason for
174 * the firmware error.
175 */
176static void t4_report_fw_error(struct adapter *adap)
177{
178 static const char *const reason[] = {
179 "Crash", /* PCIE_FW_EVAL_CRASH */
180 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
181 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
182 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
183 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
184 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
185 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
186 "Reserved", /* reserved */
187 };
188 u32 pcie_fw;
189
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
191 if (pcie_fw & FW_PCIE_FW_ERR)
192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
194}
195
196/*
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000197 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
198 */
199static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
200 u32 mbox_addr)
201{
202 for ( ; nflit; nflit--, mbox_addr += 8)
203 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
204}
205
206/*
207 * Handle a FW assertion reported in a mailbox.
208 */
209static void fw_asrt(struct adapter *adap, u32 mbox_addr)
210{
211 struct fw_debug_cmd asrt;
212
213 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
214 dev_alert(adap->pdev_dev,
215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
217 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
218}
219
220static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
221{
222 dev_err(adap->pdev_dev,
223 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
224 (unsigned long long)t4_read_reg64(adap, data_reg),
225 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
232}
233
234/**
235 * t4_wr_mbox_meat - send a command to FW through the given mailbox
236 * @adap: the adapter
237 * @mbox: index of the mailbox to use
238 * @cmd: the command to write
239 * @size: command length in bytes
240 * @rpl: where to optionally store the reply
241 * @sleep_ok: if true we may sleep while awaiting command completion
242 *
243 * Sends the given command to FW through the selected mailbox and waits
244 * for the FW to execute the command. If @rpl is not %NULL it is used to
245 * store the FW's reply to the command. The command and its optional
246 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
247 * to respond. @sleep_ok determines whether we may sleep while awaiting
248 * the response. If sleeping is allowed we use progressive backoff
249 * otherwise we spin.
250 *
251 * The return value is 0 on success or a negative errno on failure. A
252 * failure can happen either because we are not able to execute the
253 * command or FW executes it but signals an error. In the latter case
254 * the return value is the error code indicated by FW (negated).
255 */
256int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
257 void *rpl, bool sleep_ok)
258{
Joe Perches005b5712010-12-14 21:36:53 +0000259 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000260 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
261 };
262
263 u32 v;
264 u64 res;
265 int i, ms, delay_idx;
266 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
269
270 if ((size & 15) || size > MBOX_LEN)
271 return -EINVAL;
272
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000273 /*
274 * If the device is off-line, as in EEH, commands will time out.
275 * Fail them early so we don't waste time waiting.
276 */
277 if (adap->pdev->error_state != pci_channel_io_normal)
278 return -EIO;
279
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283
284 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT;
286
287 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */
292
293 delay_idx = 0;
294 ms = delay[0];
295
296 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
297 if (sleep_ok) {
298 ms = delay[delay_idx]; /* last element may repeat */
299 if (delay_idx < ARRAY_SIZE(delay) - 1)
300 delay_idx++;
301 msleep(ms);
302 } else
303 mdelay(ms);
304
305 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) {
308 t4_write_reg(adap, ctl_reg, 0);
309 continue;
310 }
311
312 res = t4_read_reg64(adap, data_reg);
313 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
314 fw_asrt(adap, data_reg);
315 res = FW_CMD_RETVAL(EIO);
316 } else if (rpl)
317 get_mbox_rpl(adap, rpl, size / 8, data_reg);
318
319 if (FW_CMD_RETVAL_GET((int)res))
320 dump_mbox(adap, mbox, data_reg);
321 t4_write_reg(adap, ctl_reg, 0);
322 return -FW_CMD_RETVAL_GET((int)res);
323 }
324 }
325
326 dump_mbox(adap, mbox, data_reg);
327 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
328 *(const u8 *)cmd, mbox);
Hariprasad Shenai31d55c22014-09-01 19:54:58 +0530329 t4_report_fw_error(adap);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000330 return -ETIMEDOUT;
331}
332
333/**
334 * t4_mc_read - read from MC through backdoor accesses
335 * @adap: the adapter
336 * @addr: address of first byte requested
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000337 * @idx: which MC to access
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000338 * @data: 64 bytes of data containing the requested address
339 * @ecc: where to store the corresponding 64-bit ECC word
340 *
341 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
342 * that covers the requested address @addr. If @parity is not %NULL it
343 * is assigned the 64-bit ECC word for the read data.
344 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000345int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000346{
347 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000348 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
349 u32 mc_bist_status_rdata, mc_bist_data_pattern;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000350
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530351 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000352 mc_bist_cmd = MC_BIST_CMD;
353 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
354 mc_bist_cmd_len = MC_BIST_CMD_LEN;
355 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
356 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
357 } else {
358 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
359 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
360 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
361 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
362 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
363 }
364
365 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000366 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000367 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
368 t4_write_reg(adap, mc_bist_cmd_len, 64);
369 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
370 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000371 BIST_CMD_GAP(1));
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000372 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000373 if (i)
374 return i;
375
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000376#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000377
378 for (i = 15; i >= 0; i--)
379 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
380 if (ecc)
381 *ecc = t4_read_reg64(adap, MC_DATA(16));
382#undef MC_DATA
383 return 0;
384}
385
386/**
387 * t4_edc_read - read from EDC through backdoor accesses
388 * @adap: the adapter
389 * @idx: which EDC to access
390 * @addr: address of first byte requested
391 * @data: 64 bytes of data containing the requested address
392 * @ecc: where to store the corresponding 64-bit ECC word
393 *
394 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
395 * that covers the requested address @addr. If @parity is not %NULL it
396 * is assigned the 64-bit ECC word for the read data.
397 */
398int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
399{
400 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000401 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
402 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000403
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530404 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000405 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
406 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
407 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
408 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
409 idx);
410 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
411 idx);
412 } else {
413 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
414 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
415 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
416 edc_bist_cmd_data_pattern =
417 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
418 edc_bist_status_rdata =
419 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
420 }
421
422 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000423 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000424 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
425 t4_write_reg(adap, edc_bist_cmd_len, 64);
426 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
427 t4_write_reg(adap, edc_bist_cmd,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000428 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000429 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000430 if (i)
431 return i;
432
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000433#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000434
435 for (i = 15; i >= 0; i--)
436 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
437 if (ecc)
438 *ecc = t4_read_reg64(adap, EDC_DATA(16));
439#undef EDC_DATA
440 return 0;
441}
442
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000443/**
444 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
445 * @adap: the adapter
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530446 * @win: PCI-E Memory Window to use
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000447 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
448 * @addr: address within indicated memory type
449 * @len: amount of memory to transfer
450 * @buf: host memory buffer
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530451 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000452 *
453 * Reads/writes an [almost] arbitrary memory region in the firmware: the
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530454 * firmware memory address and host buffer must be aligned on 32-bit
455 * boudaries; the length may be arbitrary. The memory is transferred as
456 * a raw byte sequence from/to the firmware's memory. If this memory
457 * contains data structures which contain multi-byte integers, it's the
458 * caller's responsibility to perform appropriate byte order conversions.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000459 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530460int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
461 u32 len, __be32 *buf, int dir)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000462{
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530463 u32 pos, offset, resid, memoffset;
464 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000465
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530466 /* Argument sanity checks ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000467 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530468 if (addr & 0x3)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000469 return -EINVAL;
470
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530471 /* It's convenient to be able to handle lengths which aren't a
472 * multiple of 32-bits because we often end up transferring files to
473 * the firmware. So we'll handle that by normalizing the length here
474 * and then handling any residual transfer at the end.
475 */
476 resid = len & 0x3;
477 len -= resid;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000478
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000479 /* Offset into the region of memory which is being accessed
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000480 * MEM_EDC0 = 0
481 * MEM_EDC1 = 1
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000482 * MEM_MC = 2 -- T4
483 * MEM_MC0 = 2 -- For T5
484 * MEM_MC1 = 3 -- For T5
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000485 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000486 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
487 if (mtype != MEM_MC1)
488 memoffset = (mtype * (edc_size * 1024 * 1024));
489 else {
490 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
491 MA_EXT_MEMORY_BAR));
492 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
493 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000494
495 /* Determine the PCIE_MEM_ACCESS_OFFSET */
496 addr = addr + memoffset;
497
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530498 /* Each PCI-E Memory Window is programmed with a window size -- or
499 * "aperture" -- which controls the granularity of its mapping onto
500 * adapter memory. We need to grab that aperture in order to know
501 * how to use the specified window. The window is also programmed
502 * with the base address of the Memory Window in BAR0's address
503 * space. For T4 this is an absolute PCI-E Bus Address. For T5
504 * the address is relative to BAR0.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000505 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530506 mem_reg = t4_read_reg(adap,
507 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
508 win));
509 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
510 mem_base = GET_PCIEOFST(mem_reg) << 10;
511 if (is_t4(adap->params.chip))
512 mem_base -= adap->t4_bar0;
513 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000514
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530515 /* Calculate our initial PCI-E Memory Window Position and Offset into
516 * that Window.
517 */
518 pos = addr & ~(mem_aperture-1);
519 offset = addr - pos;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000520
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530521 /* Set up initial PCI-E Memory Window to cover the start of our
522 * transfer. (Read it back to ensure that changes propagate before we
523 * attempt to use the new value.)
524 */
525 t4_write_reg(adap,
526 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
527 pos | win_pf);
528 t4_read_reg(adap,
529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
530
531 /* Transfer data to/from the adapter as long as there's an integral
532 * number of 32-bit transfers to complete.
533 */
534 while (len > 0) {
535 if (dir == T4_MEMORY_READ)
536 *buf++ = (__force __be32) t4_read_reg(adap,
537 mem_base + offset);
538 else
539 t4_write_reg(adap, mem_base + offset,
540 (__force u32) *buf++);
541 offset += sizeof(__be32);
542 len -= sizeof(__be32);
543
544 /* If we've reached the end of our current window aperture,
545 * move the PCI-E Memory Window on to the next. Note that
546 * doing this here after "len" may be 0 allows us to set up
547 * the PCI-E Memory Window for a possible final residual
548 * transfer below ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000549 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530550 if (offset == mem_aperture) {
551 pos += mem_aperture;
552 offset = 0;
553 t4_write_reg(adap,
554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
555 win), pos | win_pf);
556 t4_read_reg(adap,
557 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
558 win));
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000559 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000560 }
561
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530562 /* If the original transfer had a length which wasn't a multiple of
563 * 32-bits, now's where we need to finish off the transfer of the
564 * residual amount. The PCI-E Memory Window has already been moved
565 * above (if necessary) to cover this final transfer.
566 */
567 if (resid) {
568 union {
569 __be32 word;
570 char byte[4];
571 } last;
572 unsigned char *bp;
573 int i;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000574
Hariprasad Shenaic81576c2014-07-24 17:16:30 +0530575 if (dir == T4_MEMORY_READ) {
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530576 last.word = (__force __be32) t4_read_reg(adap,
577 mem_base + offset);
578 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
579 bp[i] = last.byte[i];
580 } else {
581 last.word = *buf;
582 for (i = resid; i < 4; i++)
583 last.byte[i] = 0;
584 t4_write_reg(adap, mem_base + offset,
585 (__force u32) last.word);
586 }
587 }
588
589 return 0;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000590}
591
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000592#define EEPROM_STAT_ADDR 0x7bfc
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000593#define VPD_BASE 0x400
594#define VPD_BASE_OLD 0
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000595#define VPD_LEN 1024
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530596#define CHELSIO_VPD_UNIQUE_ID 0x82
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000597
598/**
599 * t4_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: whether to enable or disable write protection
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t4_seeprom_wp(struct adapter *adapter, bool enable)
606{
607 unsigned int v = enable ? 0xc : 0;
608 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
609 return ret < 0 ? ret : 0;
610}
611
612/**
613 * get_vpd_params - read VPD parameters from VPD EEPROM
614 * @adapter: adapter to read
615 * @p: where to store the parameters
616 *
617 * Reads card parameters stored in VPD EEPROM.
618 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000619int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000620{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000621 u32 cclk_param, cclk_val;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000622 int i, ret, addr;
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530623 int ec, sn, pn;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000624 u8 *vpd, csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000625 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000626
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000627 vpd = vmalloc(VPD_LEN);
628 if (!vpd)
629 return -ENOMEM;
630
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000631 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
632 if (ret < 0)
633 goto out;
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530634
635 /* The VPD shall have a unique identifier specified by the PCI SIG.
636 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
637 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
638 * is expected to automatically put this entry at the
639 * beginning of the VPD.
640 */
641 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000642
643 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000644 if (ret < 0)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000645 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000646
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000647 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
648 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000649 ret = -EINVAL;
650 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000651 }
652
653 id_len = pci_vpd_lrdt_size(vpd);
654 if (id_len > ID_LEN)
655 id_len = ID_LEN;
656
657 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
658 if (i < 0) {
659 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000660 ret = -EINVAL;
661 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000662 }
663
664 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
665 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
666 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000667 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000668 ret = -EINVAL;
669 goto out;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000670 }
671
672#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000673 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000674 if (var < 0) { \
675 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000676 ret = -EINVAL; \
677 goto out; \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000678 } \
679 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
680} while (0)
681
682 FIND_VPD_KW(i, "RV");
683 for (csum = 0; i >= 0; i--)
684 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000685
686 if (csum) {
687 dev_err(adapter->pdev_dev,
688 "corrupted VPD EEPROM, actual csum %u\n", csum);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000689 ret = -EINVAL;
690 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000691 }
692
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000693 FIND_VPD_KW(ec, "EC");
694 FIND_VPD_KW(sn, "SN");
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530695 FIND_VPD_KW(pn, "PN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000696#undef FIND_VPD_KW
697
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000698 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000699 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000700 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000701 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000702 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
703 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000704 strim(p->sn);
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530705 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530706 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
707 strim(p->pn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000708
709 /*
710 * Ask firmware for the Core Clock since it knows how to translate the
711 * Reference Clock ('V2') VPD field into a Core Clock value ...
712 */
713 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
714 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
715 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
716 1, &cclk_param, &cclk_val);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000717
718out:
719 vfree(vpd);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000720 if (ret)
721 return ret;
722 p->cclk = cclk_val;
723
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000724 return 0;
725}
726
727/* serial flash and firmware constants */
728enum {
729 SF_ATTEMPTS = 10, /* max retries for SF operations */
730
731 /* flash command opcodes */
732 SF_PROG_PAGE = 2, /* program page */
733 SF_WR_DISABLE = 4, /* disable writes */
734 SF_RD_STATUS = 5, /* read status register */
735 SF_WR_ENABLE = 6, /* enable writes */
736 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000737 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000738 SF_ERASE_SECTOR = 0xd8, /* erase sector */
739
Steve Wise6f1d7212014-04-15 14:22:34 -0500740 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000741};
742
743/**
744 * sf1_read - read data from the serial flash
745 * @adapter: the adapter
746 * @byte_cnt: number of bytes to read
747 * @cont: whether another operation will be chained
748 * @lock: whether to lock SF for PL access only
749 * @valp: where to store the read data
750 *
751 * Reads up to 4 bytes of data from the serial flash. The location of
752 * the read needs to be specified prior to calling this by issuing the
753 * appropriate commands to the serial flash.
754 */
755static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 int lock, u32 *valp)
757{
758 int ret;
759
760 if (!byte_cnt || byte_cnt > 4)
761 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530762 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000763 return -EBUSY;
764 cont = cont ? SF_CONT : 0;
765 lock = lock ? SF_LOCK : 0;
766 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530767 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000768 if (!ret)
769 *valp = t4_read_reg(adapter, SF_DATA);
770 return ret;
771}
772
773/**
774 * sf1_write - write data to the serial flash
775 * @adapter: the adapter
776 * @byte_cnt: number of bytes to write
777 * @cont: whether another operation will be chained
778 * @lock: whether to lock SF for PL access only
779 * @val: value to write
780 *
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
784 */
785static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786 int lock, u32 val)
787{
788 if (!byte_cnt || byte_cnt > 4)
789 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530790 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000791 return -EBUSY;
792 cont = cont ? SF_CONT : 0;
793 lock = lock ? SF_LOCK : 0;
794 t4_write_reg(adapter, SF_DATA, val);
795 t4_write_reg(adapter, SF_OP, lock |
796 cont | BYTECNT(byte_cnt - 1) | OP_WR);
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530797 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000798}
799
800/**
801 * flash_wait_op - wait for a flash operation to complete
802 * @adapter: the adapter
803 * @attempts: max number of polls of the status register
804 * @delay: delay between polls in ms
805 *
806 * Wait for a flash operation to complete by polling the status register.
807 */
808static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
809{
810 int ret;
811 u32 status;
812
813 while (1) {
814 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
815 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
816 return ret;
817 if (!(status & 1))
818 return 0;
819 if (--attempts == 0)
820 return -EAGAIN;
821 if (delay)
822 msleep(delay);
823 }
824}
825
826/**
827 * t4_read_flash - read words from serial flash
828 * @adapter: the adapter
829 * @addr: the start address for the read
830 * @nwords: how many 32-bit words to read
831 * @data: where to store the read data
832 * @byte_oriented: whether to store data as bytes or as words
833 *
834 * Read the specified number of 32-bit words from the serial flash.
835 * If @byte_oriented is set the read data is stored as a byte array
836 * (i.e., big-endian), otherwise as 32-bit words in the platform's
837 * natural endianess.
838 */
Roland Dreierde498c82010-04-21 08:59:17 +0000839static int t4_read_flash(struct adapter *adapter, unsigned int addr,
840 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000841{
842 int ret;
843
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000844 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000845 return -EINVAL;
846
847 addr = swab32(addr) | SF_RD_DATA_FAST;
848
849 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
850 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
851 return ret;
852
853 for ( ; nwords; nwords--, data++) {
854 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
855 if (nwords == 1)
856 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
857 if (ret)
858 return ret;
859 if (byte_oriented)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000860 *data = (__force __u32) (htonl(*data));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000861 }
862 return 0;
863}
864
865/**
866 * t4_write_flash - write up to a page of data to the serial flash
867 * @adapter: the adapter
868 * @addr: the start address to write
869 * @n: length of data to write in bytes
870 * @data: the data to write
871 *
872 * Writes up to a page of data (256 bytes) to the serial flash starting
873 * at the given address. All the data must be written to the same page.
874 */
875static int t4_write_flash(struct adapter *adapter, unsigned int addr,
876 unsigned int n, const u8 *data)
877{
878 int ret;
879 u32 buf[64];
880 unsigned int i, c, left, val, offset = addr & 0xff;
881
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000882 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000883 return -EINVAL;
884
885 val = swab32(addr) | SF_PROG_PAGE;
886
887 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
888 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
889 goto unlock;
890
891 for (left = n; left; left -= c) {
892 c = min(left, 4U);
893 for (val = 0, i = 0; i < c; ++i)
894 val = (val << 8) + *data++;
895
896 ret = sf1_write(adapter, c, c != left, 1, val);
897 if (ret)
898 goto unlock;
899 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000900 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000901 if (ret)
902 goto unlock;
903
904 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
905
906 /* Read the page to verify the write succeeded */
907 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
908 if (ret)
909 return ret;
910
911 if (memcmp(data - n, (u8 *)buf + offset, n)) {
912 dev_err(adapter->pdev_dev,
913 "failed to correctly write the flash page at %#x\n",
914 addr);
915 return -EIO;
916 }
917 return 0;
918
919unlock:
920 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
921 return ret;
922}
923
924/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530925 * t4_get_fw_version - read the firmware version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000926 * @adapter: the adapter
927 * @vers: where to place the version
928 *
929 * Reads the FW version from flash.
930 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530931int t4_get_fw_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000932{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530933 return t4_read_flash(adapter, FLASH_FW_START +
934 offsetof(struct fw_hdr, fw_ver), 1,
935 vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000936}
937
938/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530939 * t4_get_tp_version - read the TP microcode version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000940 * @adapter: the adapter
941 * @vers: where to place the version
942 *
943 * Reads the TP microcode version from flash.
944 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530945int t4_get_tp_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000946{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530947 return t4_read_flash(adapter, FLASH_FW_START +
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000948 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000949 1, vers, 0);
950}
951
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530952/* Is the given firmware API compatible with the one the driver was compiled
953 * with?
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000954 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530955static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000956{
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000957
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530958 /* short circuit if it's the exact same firmware version */
959 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
960 return 1;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000961
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530962#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
963 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
964 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
965 return 1;
966#undef SAME_INTF
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000967
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530968 return 0;
969}
970
971/* The firmware in the filesystem is usable, but should it be installed?
972 * This routine explains itself in detail if it indicates the filesystem
973 * firmware should be installed.
974 */
975static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
976 int k, int c)
977{
978 const char *reason;
979
980 if (!card_fw_usable) {
981 reason = "incompatible or unusable";
982 goto install;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000983 }
984
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530985 if (k > c) {
986 reason = "older than the version supported with this driver";
987 goto install;
Jay Hernandeze69972f2013-05-30 03:24:14 +0000988 }
989
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530990 return 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000991
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530992install:
993 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
994 "installing firmware %u.%u.%u.%u on card.\n",
995 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
996 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
997 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
998 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000999
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001000 return 1;
1001}
1002
Hariprasad Shenai16e47622013-12-03 17:05:58 +05301003int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1004 const u8 *fw_data, unsigned int fw_size,
1005 struct fw_hdr *card_fw, enum dev_state state,
1006 int *reset)
1007{
1008 int ret, card_fw_usable, fs_fw_usable;
1009 const struct fw_hdr *fs_fw;
1010 const struct fw_hdr *drv_fw;
1011
1012 drv_fw = &fw_info->fw_hdr;
1013
1014 /* Read the header of the firmware on the card */
1015 ret = -t4_read_flash(adap, FLASH_FW_START,
1016 sizeof(*card_fw) / sizeof(uint32_t),
1017 (uint32_t *)card_fw, 1);
1018 if (ret == 0) {
1019 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1020 } else {
1021 dev_err(adap->pdev_dev,
1022 "Unable to read card's firmware header: %d\n", ret);
1023 card_fw_usable = 0;
1024 }
1025
1026 if (fw_data != NULL) {
1027 fs_fw = (const void *)fw_data;
1028 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1029 } else {
1030 fs_fw = NULL;
1031 fs_fw_usable = 0;
1032 }
1033
1034 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1035 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1036 /* Common case: the firmware on the card is an exact match and
1037 * the filesystem one is an exact match too, or the filesystem
1038 * one is absent/incompatible.
1039 */
1040 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1041 should_install_fs_fw(adap, card_fw_usable,
1042 be32_to_cpu(fs_fw->fw_ver),
1043 be32_to_cpu(card_fw->fw_ver))) {
1044 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1045 fw_size, 0);
1046 if (ret != 0) {
1047 dev_err(adap->pdev_dev,
1048 "failed to install firmware: %d\n", ret);
1049 goto bye;
1050 }
1051
1052 /* Installed successfully, update the cached header too. */
1053 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1054 card_fw_usable = 1;
1055 *reset = 0; /* already reset as part of load_fw */
1056 }
1057
1058 if (!card_fw_usable) {
1059 uint32_t d, c, k;
1060
1061 d = be32_to_cpu(drv_fw->fw_ver);
1062 c = be32_to_cpu(card_fw->fw_ver);
1063 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1064
1065 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1066 "chip state %d, "
1067 "driver compiled with %d.%d.%d.%d, "
1068 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1069 state,
1070 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1071 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1072 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1073 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1074 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1075 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1076 ret = EINVAL;
1077 goto bye;
1078 }
1079
1080 /* We're using whatever's on the card and it's known to be good. */
1081 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1082 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1083
1084bye:
1085 return ret;
1086}
1087
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001088/**
1089 * t4_flash_erase_sectors - erase a range of flash sectors
1090 * @adapter: the adapter
1091 * @start: the first sector to erase
1092 * @end: the last sector to erase
1093 *
1094 * Erases the sectors in the given inclusive range.
1095 */
1096static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1097{
1098 int ret = 0;
1099
Hariprasad Shenaic0d5b8c2014-09-10 17:44:29 +05301100 if (end >= adapter->params.sf_nsec)
1101 return -EINVAL;
1102
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001103 while (start <= end) {
1104 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1105 (ret = sf1_write(adapter, 4, 0, 1,
1106 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001107 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001108 dev_err(adapter->pdev_dev,
1109 "erase of flash sector %d failed, error %d\n",
1110 start, ret);
1111 break;
1112 }
1113 start++;
1114 }
1115 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1116 return ret;
1117}
1118
1119/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001120 * t4_flash_cfg_addr - return the address of the flash configuration file
1121 * @adapter: the adapter
1122 *
1123 * Return the address within the flash where the Firmware Configuration
1124 * File is stored.
1125 */
1126unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1127{
1128 if (adapter->params.sf_size == 0x100000)
1129 return FLASH_FPGA_CFG_START;
1130 else
1131 return FLASH_CFG_START;
1132}
1133
1134/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001135 * t4_load_fw - download firmware
1136 * @adap: the adapter
1137 * @fw_data: the firmware image to write
1138 * @size: image size
1139 *
1140 * Write the supplied firmware image to the card's serial flash.
1141 */
1142int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1143{
1144 u32 csum;
1145 int ret, addr;
1146 unsigned int i;
1147 u8 first_page[SF_PAGE_SIZE];
Vipul Pandya404d9e32012-10-08 02:59:43 +00001148 const __be32 *p = (const __be32 *)fw_data;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001149 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001150 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1151 unsigned int fw_img_start = adap->params.sf_fw_start;
1152 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001153
1154 if (!size) {
1155 dev_err(adap->pdev_dev, "FW image has no data\n");
1156 return -EINVAL;
1157 }
1158 if (size & 511) {
1159 dev_err(adap->pdev_dev,
1160 "FW image size not multiple of 512 bytes\n");
1161 return -EINVAL;
1162 }
1163 if (ntohs(hdr->len512) * 512 != size) {
1164 dev_err(adap->pdev_dev,
1165 "FW image size differs from size in FW header\n");
1166 return -EINVAL;
1167 }
1168 if (size > FW_MAX_SIZE) {
1169 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1170 FW_MAX_SIZE);
1171 return -EFBIG;
1172 }
1173
1174 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1175 csum += ntohl(p[i]);
1176
1177 if (csum != 0xffffffff) {
1178 dev_err(adap->pdev_dev,
1179 "corrupted firmware image, checksum %#x\n", csum);
1180 return -EINVAL;
1181 }
1182
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001183 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1184 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001185 if (ret)
1186 goto out;
1187
1188 /*
1189 * We write the correct version at the end so the driver can see a bad
1190 * version if the FW write fails. Start by writing a copy of the
1191 * first page with a bad version.
1192 */
1193 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1194 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001195 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001196 if (ret)
1197 goto out;
1198
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001199 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001200 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1201 addr += SF_PAGE_SIZE;
1202 fw_data += SF_PAGE_SIZE;
1203 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1204 if (ret)
1205 goto out;
1206 }
1207
1208 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001209 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001210 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1211out:
1212 if (ret)
1213 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1214 ret);
1215 return ret;
1216}
1217
1218#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05301219 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1220 FW_PORT_CAP_ANEG)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001221
1222/**
1223 * t4_link_start - apply link configuration to MAC/PHY
1224 * @phy: the PHY to setup
1225 * @mac: the MAC to setup
1226 * @lc: the requested link configuration
1227 *
1228 * Set up a port's MAC and PHY according to a desired link configuration.
1229 * - If the PHY can auto-negotiate first decide what to advertise, then
1230 * enable/disable auto-negotiation as desired, and reset.
1231 * - If the PHY does not auto-negotiate just reset it.
1232 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1233 * otherwise do it later based on the outcome of auto-negotiation.
1234 */
1235int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1236 struct link_config *lc)
1237{
1238 struct fw_port_cmd c;
1239 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1240
1241 lc->link_ok = 0;
1242 if (lc->requested_fc & PAUSE_RX)
1243 fc |= FW_PORT_CAP_FC_RX;
1244 if (lc->requested_fc & PAUSE_TX)
1245 fc |= FW_PORT_CAP_FC_TX;
1246
1247 memset(&c, 0, sizeof(c));
1248 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1249 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1250 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1251 FW_LEN16(c));
1252
1253 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1254 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1255 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1256 } else if (lc->autoneg == AUTONEG_DISABLE) {
1257 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1258 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1259 } else
1260 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1261
1262 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1263}
1264
1265/**
1266 * t4_restart_aneg - restart autonegotiation
1267 * @adap: the adapter
1268 * @mbox: mbox to use for the FW command
1269 * @port: the port id
1270 *
1271 * Restarts autonegotiation for the selected port.
1272 */
1273int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1274{
1275 struct fw_port_cmd c;
1276
1277 memset(&c, 0, sizeof(c));
1278 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1279 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1280 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1281 FW_LEN16(c));
1282 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1283 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1284}
1285
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301286typedef void (*int_handler_t)(struct adapter *adap);
1287
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001288struct intr_info {
1289 unsigned int mask; /* bits to check in interrupt status */
1290 const char *msg; /* message to print or NULL */
1291 short stat_idx; /* stat counter to increment or -1 */
1292 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301293 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001294};
1295
1296/**
1297 * t4_handle_intr_status - table driven interrupt handler
1298 * @adapter: the adapter that generated the interrupt
1299 * @reg: the interrupt status register to process
1300 * @acts: table of interrupt actions
1301 *
1302 * A table driven interrupt handler that applies a set of masks to an
1303 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001304 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001305 * optionally emitting a warning or alert message. The table is terminated
1306 * by an entry specifying mask 0. Returns the number of fatal interrupt
1307 * conditions.
1308 */
1309static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1310 const struct intr_info *acts)
1311{
1312 int fatal = 0;
1313 unsigned int mask = 0;
1314 unsigned int status = t4_read_reg(adapter, reg);
1315
1316 for ( ; acts->mask; ++acts) {
1317 if (!(status & acts->mask))
1318 continue;
1319 if (acts->fatal) {
1320 fatal++;
1321 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1322 status & acts->mask);
1323 } else if (acts->msg && printk_ratelimit())
1324 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1325 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301326 if (acts->int_handler)
1327 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001328 mask |= acts->mask;
1329 }
1330 status &= mask;
1331 if (status) /* clear processed interrupts */
1332 t4_write_reg(adapter, reg, status);
1333 return fatal;
1334}
1335
1336/*
1337 * Interrupt handler for the PCIE module.
1338 */
1339static void pcie_intr_handler(struct adapter *adapter)
1340{
Joe Perches005b5712010-12-14 21:36:53 +00001341 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001342 { RNPP, "RXNP array parity error", -1, 1 },
1343 { RPCP, "RXPC array parity error", -1, 1 },
1344 { RCIP, "RXCIF array parity error", -1, 1 },
1345 { RCCP, "Rx completions control array parity error", -1, 1 },
1346 { RFTP, "RXFT array parity error", -1, 1 },
1347 { 0 }
1348 };
Joe Perches005b5712010-12-14 21:36:53 +00001349 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001350 { TPCP, "TXPC array parity error", -1, 1 },
1351 { TNPP, "TXNP array parity error", -1, 1 },
1352 { TFTP, "TXFT array parity error", -1, 1 },
1353 { TCAP, "TXCA array parity error", -1, 1 },
1354 { TCIP, "TXCIF array parity error", -1, 1 },
1355 { RCAP, "RXCA array parity error", -1, 1 },
1356 { OTDD, "outbound request TLP discarded", -1, 1 },
1357 { RDPE, "Rx data parity error", -1, 1 },
1358 { TDUE, "Tx uncorrectable data error", -1, 1 },
1359 { 0 }
1360 };
Joe Perches005b5712010-12-14 21:36:53 +00001361 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001362 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1363 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1364 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1365 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1366 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1367 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1368 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1369 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1370 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1371 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1372 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1373 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1374 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1375 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1376 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1377 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1378 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1379 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1380 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1381 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1382 { FIDPERR, "PCI FID parity error", -1, 1 },
1383 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1384 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1385 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1386 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1387 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1388 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1389 { PCIESINT, "PCI core secondary fault", -1, 1 },
1390 { PCIEPINT, "PCI core primary fault", -1, 1 },
1391 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1392 { 0 }
1393 };
1394
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001395 static struct intr_info t5_pcie_intr_info[] = {
1396 { MSTGRPPERR, "Master Response Read Queue parity error",
1397 -1, 1 },
1398 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1399 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1400 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1401 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1402 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1403 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1404 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1405 -1, 1 },
1406 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1407 -1, 1 },
1408 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1409 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1410 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1411 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1412 { DREQWRPERR, "PCI DMA channel write request parity error",
1413 -1, 1 },
1414 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1415 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1416 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1417 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1418 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1419 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1420 { FIDPERR, "PCI FID parity error", -1, 1 },
1421 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1422 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1423 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1424 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1425 -1, 1 },
1426 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1427 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1428 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1429 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1430 { READRSPERR, "Outbound read error", -1, 0 },
1431 { 0 }
1432 };
1433
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001434 int fat;
1435
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301436 if (is_t4(adapter->params.chip))
1437 fat = t4_handle_intr_status(adapter,
1438 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1439 sysbus_intr_info) +
1440 t4_handle_intr_status(adapter,
1441 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1442 pcie_port_intr_info) +
1443 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1444 pcie_intr_info);
1445 else
1446 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1447 t5_pcie_intr_info);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001448
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001449 if (fat)
1450 t4_fatal_err(adapter);
1451}
1452
1453/*
1454 * TP interrupt handler.
1455 */
1456static void tp_intr_handler(struct adapter *adapter)
1457{
Joe Perches005b5712010-12-14 21:36:53 +00001458 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001459 { 0x3fffffff, "TP parity error", -1, 1 },
1460 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1461 { 0 }
1462 };
1463
1464 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1465 t4_fatal_err(adapter);
1466}
1467
1468/*
1469 * SGE interrupt handler.
1470 */
1471static void sge_intr_handler(struct adapter *adapter)
1472{
1473 u64 v;
1474
Joe Perches005b5712010-12-14 21:36:53 +00001475 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001476 { ERR_CPL_EXCEED_IQE_SIZE,
1477 "SGE received CPL exceeding IQE size", -1, 1 },
1478 { ERR_INVALID_CIDX_INC,
1479 "SGE GTS CIDX increment too large", -1, 0 },
1480 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001481 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1482 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1483 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001484 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1485 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1486 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1487 0 },
1488 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1489 0 },
1490 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1491 0 },
1492 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1493 0 },
1494 { ERR_ING_CTXT_PRIO,
1495 "SGE too many priority ingress contexts", -1, 0 },
1496 { ERR_EGR_CTXT_PRIO,
1497 "SGE too many priority egress contexts", -1, 0 },
1498 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1499 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1500 { 0 }
1501 };
1502
1503 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301504 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001505 if (v) {
1506 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301507 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001508 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1509 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1510 }
1511
1512 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1513 v != 0)
1514 t4_fatal_err(adapter);
1515}
1516
1517/*
1518 * CIM interrupt handler.
1519 */
1520static void cim_intr_handler(struct adapter *adapter)
1521{
Joe Perches005b5712010-12-14 21:36:53 +00001522 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001523 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1524 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1525 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1526 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1527 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1528 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1529 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1530 { 0 }
1531 };
Joe Perches005b5712010-12-14 21:36:53 +00001532 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001533 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1534 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1535 { ILLWRINT, "CIM illegal write", -1, 1 },
1536 { ILLRDINT, "CIM illegal read", -1, 1 },
1537 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1538 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1539 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1540 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1541 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1542 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1543 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1544 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1545 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1546 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1547 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1548 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1549 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1550 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1551 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1552 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1553 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1554 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1555 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1556 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1557 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1558 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1559 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1560 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1561 { 0 }
1562 };
1563
1564 int fat;
1565
Hariprasad Shenai31d55c22014-09-01 19:54:58 +05301566 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1567 t4_report_fw_error(adapter);
1568
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001569 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1570 cim_intr_info) +
1571 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1572 cim_upintr_info);
1573 if (fat)
1574 t4_fatal_err(adapter);
1575}
1576
1577/*
1578 * ULP RX interrupt handler.
1579 */
1580static void ulprx_intr_handler(struct adapter *adapter)
1581{
Joe Perches005b5712010-12-14 21:36:53 +00001582 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001583 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001584 { 0x7fffff, "ULPRX parity error", -1, 1 },
1585 { 0 }
1586 };
1587
1588 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1589 t4_fatal_err(adapter);
1590}
1591
1592/*
1593 * ULP TX interrupt handler.
1594 */
1595static void ulptx_intr_handler(struct adapter *adapter)
1596{
Joe Perches005b5712010-12-14 21:36:53 +00001597 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001598 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1599 0 },
1600 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1601 0 },
1602 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1603 0 },
1604 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1605 0 },
1606 { 0xfffffff, "ULPTX parity error", -1, 1 },
1607 { 0 }
1608 };
1609
1610 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1611 t4_fatal_err(adapter);
1612}
1613
1614/*
1615 * PM TX interrupt handler.
1616 */
1617static void pmtx_intr_handler(struct adapter *adapter)
1618{
Joe Perches005b5712010-12-14 21:36:53 +00001619 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001620 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1621 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1622 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1623 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1624 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1625 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1626 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1627 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1628 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1629 { 0 }
1630 };
1631
1632 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1633 t4_fatal_err(adapter);
1634}
1635
1636/*
1637 * PM RX interrupt handler.
1638 */
1639static void pmrx_intr_handler(struct adapter *adapter)
1640{
Joe Perches005b5712010-12-14 21:36:53 +00001641 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001642 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1643 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1644 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1645 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1646 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1647 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1648 { 0 }
1649 };
1650
1651 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1652 t4_fatal_err(adapter);
1653}
1654
1655/*
1656 * CPL switch interrupt handler.
1657 */
1658static void cplsw_intr_handler(struct adapter *adapter)
1659{
Joe Perches005b5712010-12-14 21:36:53 +00001660 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001661 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1662 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1663 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1664 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1665 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1666 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1667 { 0 }
1668 };
1669
1670 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1671 t4_fatal_err(adapter);
1672}
1673
1674/*
1675 * LE interrupt handler.
1676 */
1677static void le_intr_handler(struct adapter *adap)
1678{
Joe Perches005b5712010-12-14 21:36:53 +00001679 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001680 { LIPMISS, "LE LIP miss", -1, 0 },
1681 { LIP0, "LE 0 LIP error", -1, 0 },
1682 { PARITYERR, "LE parity error", -1, 1 },
1683 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1684 { REQQPARERR, "LE request queue parity error", -1, 1 },
1685 { 0 }
1686 };
1687
1688 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1689 t4_fatal_err(adap);
1690}
1691
1692/*
1693 * MPS interrupt handler.
1694 */
1695static void mps_intr_handler(struct adapter *adapter)
1696{
Joe Perches005b5712010-12-14 21:36:53 +00001697 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001698 { 0xffffff, "MPS Rx parity error", -1, 1 },
1699 { 0 }
1700 };
Joe Perches005b5712010-12-14 21:36:53 +00001701 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001702 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1703 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1704 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1705 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1706 { BUBBLE, "MPS Tx underflow", -1, 1 },
1707 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1708 { FRMERR, "MPS Tx framing error", -1, 1 },
1709 { 0 }
1710 };
Joe Perches005b5712010-12-14 21:36:53 +00001711 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001712 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1713 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1714 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1715 { 0 }
1716 };
Joe Perches005b5712010-12-14 21:36:53 +00001717 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001718 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1719 { 0 }
1720 };
Joe Perches005b5712010-12-14 21:36:53 +00001721 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001722 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1723 { 0 }
1724 };
Joe Perches005b5712010-12-14 21:36:53 +00001725 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001726 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1727 { 0 }
1728 };
Joe Perches005b5712010-12-14 21:36:53 +00001729 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001730 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1731 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1732 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1733 { 0 }
1734 };
1735
1736 int fat;
1737
1738 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1739 mps_rx_intr_info) +
1740 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1741 mps_tx_intr_info) +
1742 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1743 mps_trc_intr_info) +
1744 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1745 mps_stat_sram_intr_info) +
1746 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1747 mps_stat_tx_intr_info) +
1748 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1749 mps_stat_rx_intr_info) +
1750 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1751 mps_cls_intr_info);
1752
1753 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1754 RXINT | TXINT | STATINT);
1755 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1756 if (fat)
1757 t4_fatal_err(adapter);
1758}
1759
1760#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1761
1762/*
1763 * EDC/MC interrupt handler.
1764 */
1765static void mem_intr_handler(struct adapter *adapter, int idx)
1766{
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301767 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001768
1769 unsigned int addr, cnt_addr, v;
1770
1771 if (idx <= MEM_EDC1) {
1772 addr = EDC_REG(EDC_INT_CAUSE, idx);
1773 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301774 } else if (idx == MEM_MC) {
1775 if (is_t4(adapter->params.chip)) {
1776 addr = MC_INT_CAUSE;
1777 cnt_addr = MC_ECC_STATUS;
1778 } else {
1779 addr = MC_P_INT_CAUSE;
1780 cnt_addr = MC_P_ECC_STATUS;
1781 }
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001782 } else {
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301783 addr = MC_REG(MC_P_INT_CAUSE, 1);
1784 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001785 }
1786
1787 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1788 if (v & PERR_INT_CAUSE)
1789 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1790 name[idx]);
1791 if (v & ECC_CE_INT_CAUSE) {
1792 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1793
1794 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1795 if (printk_ratelimit())
1796 dev_warn(adapter->pdev_dev,
1797 "%u %s correctable ECC data error%s\n",
1798 cnt, name[idx], cnt > 1 ? "s" : "");
1799 }
1800 if (v & ECC_UE_INT_CAUSE)
1801 dev_alert(adapter->pdev_dev,
1802 "%s uncorrectable ECC data error\n", name[idx]);
1803
1804 t4_write_reg(adapter, addr, v);
1805 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1806 t4_fatal_err(adapter);
1807}
1808
1809/*
1810 * MA interrupt handler.
1811 */
1812static void ma_intr_handler(struct adapter *adap)
1813{
1814 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1815
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301816 if (status & MEM_PERR_INT_CAUSE) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001817 dev_alert(adap->pdev_dev,
1818 "MA parity error, parity status %#x\n",
1819 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301820 if (is_t5(adap->params.chip))
1821 dev_alert(adap->pdev_dev,
1822 "MA parity error, parity status %#x\n",
1823 t4_read_reg(adap,
1824 MA_PARITY_ERROR_STATUS2));
1825 }
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001826 if (status & MEM_WRAP_INT_CAUSE) {
1827 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1828 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1829 "client %u to address %#x\n",
1830 MEM_WRAP_CLIENT_NUM_GET(v),
1831 MEM_WRAP_ADDRESS_GET(v) << 4);
1832 }
1833 t4_write_reg(adap, MA_INT_CAUSE, status);
1834 t4_fatal_err(adap);
1835}
1836
1837/*
1838 * SMB interrupt handler.
1839 */
1840static void smb_intr_handler(struct adapter *adap)
1841{
Joe Perches005b5712010-12-14 21:36:53 +00001842 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001843 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1844 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1845 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1846 { 0 }
1847 };
1848
1849 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1850 t4_fatal_err(adap);
1851}
1852
1853/*
1854 * NC-SI interrupt handler.
1855 */
1856static void ncsi_intr_handler(struct adapter *adap)
1857{
Joe Perches005b5712010-12-14 21:36:53 +00001858 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001859 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1860 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1861 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1862 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1863 { 0 }
1864 };
1865
1866 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1867 t4_fatal_err(adap);
1868}
1869
1870/*
1871 * XGMAC interrupt handler.
1872 */
1873static void xgmac_intr_handler(struct adapter *adap, int port)
1874{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001875 u32 v, int_cause_reg;
1876
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301877 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001878 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1879 else
1880 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1881
1882 v = t4_read_reg(adap, int_cause_reg);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001883
1884 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1885 if (!v)
1886 return;
1887
1888 if (v & TXFIFO_PRTY_ERR)
1889 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1890 port);
1891 if (v & RXFIFO_PRTY_ERR)
1892 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1893 port);
1894 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1895 t4_fatal_err(adap);
1896}
1897
1898/*
1899 * PL interrupt handler.
1900 */
1901static void pl_intr_handler(struct adapter *adap)
1902{
Joe Perches005b5712010-12-14 21:36:53 +00001903 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001904 { FATALPERR, "T4 fatal parity error", -1, 1 },
1905 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1906 { 0 }
1907 };
1908
1909 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1910 t4_fatal_err(adap);
1911}
1912
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001913#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001914#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1915 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1916 CPL_SWITCH | SGE | ULP_TX)
1917
1918/**
1919 * t4_slow_intr_handler - control path interrupt handler
1920 * @adapter: the adapter
1921 *
1922 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1923 * The designation 'slow' is because it involves register reads, while
1924 * data interrupts typically don't involve any MMIOs.
1925 */
1926int t4_slow_intr_handler(struct adapter *adapter)
1927{
1928 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1929
1930 if (!(cause & GLBL_INTR_MASK))
1931 return 0;
1932 if (cause & CIM)
1933 cim_intr_handler(adapter);
1934 if (cause & MPS)
1935 mps_intr_handler(adapter);
1936 if (cause & NCSI)
1937 ncsi_intr_handler(adapter);
1938 if (cause & PL)
1939 pl_intr_handler(adapter);
1940 if (cause & SMB)
1941 smb_intr_handler(adapter);
1942 if (cause & XGMAC0)
1943 xgmac_intr_handler(adapter, 0);
1944 if (cause & XGMAC1)
1945 xgmac_intr_handler(adapter, 1);
1946 if (cause & XGMAC_KR0)
1947 xgmac_intr_handler(adapter, 2);
1948 if (cause & XGMAC_KR1)
1949 xgmac_intr_handler(adapter, 3);
1950 if (cause & PCIE)
1951 pcie_intr_handler(adapter);
1952 if (cause & MC)
1953 mem_intr_handler(adapter, MEM_MC);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301954 if (!is_t4(adapter->params.chip) && (cause & MC1))
1955 mem_intr_handler(adapter, MEM_MC1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001956 if (cause & EDC0)
1957 mem_intr_handler(adapter, MEM_EDC0);
1958 if (cause & EDC1)
1959 mem_intr_handler(adapter, MEM_EDC1);
1960 if (cause & LE)
1961 le_intr_handler(adapter);
1962 if (cause & TP)
1963 tp_intr_handler(adapter);
1964 if (cause & MA)
1965 ma_intr_handler(adapter);
1966 if (cause & PM_TX)
1967 pmtx_intr_handler(adapter);
1968 if (cause & PM_RX)
1969 pmrx_intr_handler(adapter);
1970 if (cause & ULP_RX)
1971 ulprx_intr_handler(adapter);
1972 if (cause & CPL_SWITCH)
1973 cplsw_intr_handler(adapter);
1974 if (cause & SGE)
1975 sge_intr_handler(adapter);
1976 if (cause & ULP_TX)
1977 ulptx_intr_handler(adapter);
1978
1979 /* Clear the interrupts just processed for which we are the master. */
1980 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1981 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1982 return 1;
1983}
1984
1985/**
1986 * t4_intr_enable - enable interrupts
1987 * @adapter: the adapter whose interrupts should be enabled
1988 *
1989 * Enable PF-specific interrupts for the calling function and the top-level
1990 * interrupt concentrator for global interrupts. Interrupts are already
1991 * enabled at each module, here we just enable the roots of the interrupt
1992 * hierarchies.
1993 *
1994 * Note: this function should be called only when the driver manages
1995 * non PF-specific interrupts from the various HW modules. Only one PCI
1996 * function at a time should be doing this.
1997 */
1998void t4_intr_enable(struct adapter *adapter)
1999{
2000 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2001
2002 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2003 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2004 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2005 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2006 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2007 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2008 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00002009 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002010 EGRESS_SIZE_ERR);
2011 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2012 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2013}
2014
2015/**
2016 * t4_intr_disable - disable interrupts
2017 * @adapter: the adapter whose interrupts should be disabled
2018 *
2019 * Disable interrupts. We only disable the top-level interrupt
2020 * concentrators. The caller must be a PCI function managing global
2021 * interrupts.
2022 */
2023void t4_intr_disable(struct adapter *adapter)
2024{
2025 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2026
2027 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2028 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2029}
2030
2031/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002032 * hash_mac_addr - return the hash value of a MAC address
2033 * @addr: the 48-bit Ethernet MAC address
2034 *
2035 * Hashes a MAC address according to the hash function used by HW inexact
2036 * (hash) address matching.
2037 */
2038static int hash_mac_addr(const u8 *addr)
2039{
2040 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2041 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2042 a ^= b;
2043 a ^= (a >> 12);
2044 a ^= (a >> 6);
2045 return a & 0x3f;
2046}
2047
2048/**
2049 * t4_config_rss_range - configure a portion of the RSS mapping table
2050 * @adapter: the adapter
2051 * @mbox: mbox to use for the FW command
2052 * @viid: virtual interface whose RSS subtable is to be written
2053 * @start: start entry in the table to write
2054 * @n: how many table entries to write
2055 * @rspq: values for the response queue lookup table
2056 * @nrspq: number of values in @rspq
2057 *
2058 * Programs the selected part of the VI's RSS mapping table with the
2059 * provided values. If @nrspq < @n the supplied values are used repeatedly
2060 * until the full table range is populated.
2061 *
2062 * The caller must ensure the values in @rspq are in the range allowed for
2063 * @viid.
2064 */
2065int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2066 int start, int n, const u16 *rspq, unsigned int nrspq)
2067{
2068 int ret;
2069 const u16 *rsp = rspq;
2070 const u16 *rsp_end = rspq + nrspq;
2071 struct fw_rss_ind_tbl_cmd cmd;
2072
2073 memset(&cmd, 0, sizeof(cmd));
2074 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2075 FW_CMD_REQUEST | FW_CMD_WRITE |
2076 FW_RSS_IND_TBL_CMD_VIID(viid));
2077 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2078
2079 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2080 while (n > 0) {
2081 int nq = min(n, 32);
2082 __be32 *qp = &cmd.iq0_to_iq2;
2083
2084 cmd.niqid = htons(nq);
2085 cmd.startidx = htons(start);
2086
2087 start += nq;
2088 n -= nq;
2089
2090 while (nq > 0) {
2091 unsigned int v;
2092
2093 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2094 if (++rsp >= rsp_end)
2095 rsp = rspq;
2096 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2097 if (++rsp >= rsp_end)
2098 rsp = rspq;
2099 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2100 if (++rsp >= rsp_end)
2101 rsp = rspq;
2102
2103 *qp++ = htonl(v);
2104 nq -= 3;
2105 }
2106
2107 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2108 if (ret)
2109 return ret;
2110 }
2111 return 0;
2112}
2113
2114/**
2115 * t4_config_glbl_rss - configure the global RSS mode
2116 * @adapter: the adapter
2117 * @mbox: mbox to use for the FW command
2118 * @mode: global RSS mode
2119 * @flags: mode-specific flags
2120 *
2121 * Sets the global RSS mode.
2122 */
2123int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2124 unsigned int flags)
2125{
2126 struct fw_rss_glb_config_cmd c;
2127
2128 memset(&c, 0, sizeof(c));
2129 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2130 FW_CMD_REQUEST | FW_CMD_WRITE);
2131 c.retval_len16 = htonl(FW_LEN16(c));
2132 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2133 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2134 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2135 c.u.basicvirtual.mode_pkd =
2136 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2137 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2138 } else
2139 return -EINVAL;
2140 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2141}
2142
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002143/**
2144 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2145 * @adap: the adapter
2146 * @v4: holds the TCP/IP counter values
2147 * @v6: holds the TCP/IPv6 counter values
2148 *
2149 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2150 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2151 */
2152void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2153 struct tp_tcp_stats *v6)
2154{
2155 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2156
2157#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2158#define STAT(x) val[STAT_IDX(x)]
2159#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2160
2161 if (v4) {
2162 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2163 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2164 v4->tcpOutRsts = STAT(OUT_RST);
2165 v4->tcpInSegs = STAT64(IN_SEG);
2166 v4->tcpOutSegs = STAT64(OUT_SEG);
2167 v4->tcpRetransSegs = STAT64(RXT_SEG);
2168 }
2169 if (v6) {
2170 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2171 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2172 v6->tcpOutRsts = STAT(OUT_RST);
2173 v6->tcpInSegs = STAT64(IN_SEG);
2174 v6->tcpOutSegs = STAT64(OUT_SEG);
2175 v6->tcpRetransSegs = STAT64(RXT_SEG);
2176 }
2177#undef STAT64
2178#undef STAT
2179#undef STAT_IDX
2180}
2181
2182/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002183 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2184 * @adap: the adapter
2185 * @mtus: where to store the MTU values
2186 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2187 *
2188 * Reads the HW path MTU table.
2189 */
2190void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2191{
2192 u32 v;
2193 int i;
2194
2195 for (i = 0; i < NMTUS; ++i) {
2196 t4_write_reg(adap, TP_MTU_TABLE,
2197 MTUINDEX(0xff) | MTUVALUE(i));
2198 v = t4_read_reg(adap, TP_MTU_TABLE);
2199 mtus[i] = MTUVALUE_GET(v);
2200 if (mtu_log)
2201 mtu_log[i] = MTUWIDTH_GET(v);
2202 }
2203}
2204
2205/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002206 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2207 * @adap: the adapter
2208 * @addr: the indirect TP register address
2209 * @mask: specifies the field within the register to modify
2210 * @val: new value for the field
2211 *
2212 * Sets a field of an indirect TP register to the given value.
2213 */
2214void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2215 unsigned int mask, unsigned int val)
2216{
2217 t4_write_reg(adap, TP_PIO_ADDR, addr);
2218 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2219 t4_write_reg(adap, TP_PIO_DATA, val);
2220}
2221
2222/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002223 * init_cong_ctrl - initialize congestion control parameters
2224 * @a: the alpha values for congestion control
2225 * @b: the beta values for congestion control
2226 *
2227 * Initialize the congestion control parameters.
2228 */
Bill Pemberton91744942012-12-03 09:23:02 -05002229static void init_cong_ctrl(unsigned short *a, unsigned short *b)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002230{
2231 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2232 a[9] = 2;
2233 a[10] = 3;
2234 a[11] = 4;
2235 a[12] = 5;
2236 a[13] = 6;
2237 a[14] = 7;
2238 a[15] = 8;
2239 a[16] = 9;
2240 a[17] = 10;
2241 a[18] = 14;
2242 a[19] = 17;
2243 a[20] = 21;
2244 a[21] = 25;
2245 a[22] = 30;
2246 a[23] = 35;
2247 a[24] = 45;
2248 a[25] = 60;
2249 a[26] = 80;
2250 a[27] = 100;
2251 a[28] = 200;
2252 a[29] = 300;
2253 a[30] = 400;
2254 a[31] = 500;
2255
2256 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2257 b[9] = b[10] = 1;
2258 b[11] = b[12] = 2;
2259 b[13] = b[14] = b[15] = b[16] = 3;
2260 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2261 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2262 b[28] = b[29] = 6;
2263 b[30] = b[31] = 7;
2264}
2265
2266/* The minimum additive increment value for the congestion control table */
2267#define CC_MIN_INCR 2U
2268
2269/**
2270 * t4_load_mtus - write the MTU and congestion control HW tables
2271 * @adap: the adapter
2272 * @mtus: the values for the MTU table
2273 * @alpha: the values for the congestion control alpha parameter
2274 * @beta: the values for the congestion control beta parameter
2275 *
2276 * Write the HW MTU table with the supplied MTUs and the high-speed
2277 * congestion control table with the supplied alpha, beta, and MTUs.
2278 * We write the two tables together because the additive increments
2279 * depend on the MTUs.
2280 */
2281void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2282 const unsigned short *alpha, const unsigned short *beta)
2283{
2284 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2285 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2286 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2287 28672, 40960, 57344, 81920, 114688, 163840, 229376
2288 };
2289
2290 unsigned int i, w;
2291
2292 for (i = 0; i < NMTUS; ++i) {
2293 unsigned int mtu = mtus[i];
2294 unsigned int log2 = fls(mtu);
2295
2296 if (!(mtu & ((1 << log2) >> 2))) /* round */
2297 log2--;
2298 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2299 MTUWIDTH(log2) | MTUVALUE(mtu));
2300
2301 for (w = 0; w < NCCTRL_WIN; ++w) {
2302 unsigned int inc;
2303
2304 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2305 CC_MIN_INCR);
2306
2307 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2308 (w << 16) | (beta[w] << 13) | inc);
2309 }
2310 }
2311}
2312
2313/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002314 * get_mps_bg_map - return the buffer groups associated with a port
2315 * @adap: the adapter
2316 * @idx: the port index
2317 *
2318 * Returns a bitmap indicating which MPS buffer groups are associated
2319 * with the given port. Bit i is set if buffer group i is used by the
2320 * port.
2321 */
2322static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2323{
2324 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2325
2326 if (n == 0)
2327 return idx == 0 ? 0xf : 0;
2328 if (n == 1)
2329 return idx < 2 ? (3 << (2 * idx)) : 0;
2330 return 1 << idx;
2331}
2332
2333/**
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302334 * t4_get_port_type_description - return Port Type string description
2335 * @port_type: firmware Port Type enumeration
2336 */
2337const char *t4_get_port_type_description(enum fw_port_type port_type)
2338{
2339 static const char *const port_type_description[] = {
2340 "R XFI",
2341 "R XAUI",
2342 "T SGMII",
2343 "T XFI",
2344 "T XAUI",
2345 "KX4",
2346 "CX4",
2347 "KX",
2348 "KR",
2349 "R SFP+",
2350 "KR/KX",
2351 "KR/KX/KX4",
2352 "R QSFP_10G",
2353 "",
2354 "R QSFP",
2355 "R BP40_BA",
2356 };
2357
2358 if (port_type < ARRAY_SIZE(port_type_description))
2359 return port_type_description[port_type];
2360 return "UNKNOWN";
2361}
2362
2363/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002364 * t4_get_port_stats - collect port statistics
2365 * @adap: the adapter
2366 * @idx: the port index
2367 * @p: the stats structure to fill
2368 *
2369 * Collect statistics related to the given port from HW.
2370 */
2371void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2372{
2373 u32 bgmap = get_mps_bg_map(adap, idx);
2374
2375#define GET_STAT(name) \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002376 t4_read_reg64(adap, \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302377 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002378 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002379#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2380
2381 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2382 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2383 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2384 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2385 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2386 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2387 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2388 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2389 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2390 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2391 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2392 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2393 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2394 p->tx_drop = GET_STAT(TX_PORT_DROP);
2395 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2396 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2397 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2398 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2399 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2400 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2401 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2402 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2403 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2404
2405 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2406 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2407 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2408 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2409 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2410 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2411 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2412 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2413 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2414 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2415 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2416 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2417 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2418 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2419 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2420 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2421 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2422 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2423 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2424 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2425 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2426 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2427 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2428 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2429 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2430 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2431 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2432
2433 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2434 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2435 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2436 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2437 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2438 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2439 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2440 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2441
2442#undef GET_STAT
2443#undef GET_STAT_COM
2444}
2445
2446/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002447 * t4_wol_magic_enable - enable/disable magic packet WoL
2448 * @adap: the adapter
2449 * @port: the physical port index
2450 * @addr: MAC address expected in magic packets, %NULL to disable
2451 *
2452 * Enables/disables magic packet wake-on-LAN for the selected port.
2453 */
2454void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2455 const u8 *addr)
2456{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002457 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2458
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302459 if (is_t4(adap->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002460 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2461 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2462 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2463 } else {
2464 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2465 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2466 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2467 }
2468
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002469 if (addr) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002470 t4_write_reg(adap, mag_id_reg_l,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002471 (addr[2] << 24) | (addr[3] << 16) |
2472 (addr[4] << 8) | addr[5]);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002473 t4_write_reg(adap, mag_id_reg_h,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002474 (addr[0] << 8) | addr[1]);
2475 }
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002476 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002477 addr ? MAGICEN : 0);
2478}
2479
2480/**
2481 * t4_wol_pat_enable - enable/disable pattern-based WoL
2482 * @adap: the adapter
2483 * @port: the physical port index
2484 * @map: bitmap of which HW pattern filters to set
2485 * @mask0: byte mask for bytes 0-63 of a packet
2486 * @mask1: byte mask for bytes 64-127 of a packet
2487 * @crc: Ethernet CRC for selected bytes
2488 * @enable: enable/disable switch
2489 *
2490 * Sets the pattern filters indicated in @map to mask out the bytes
2491 * specified in @mask0/@mask1 in received packets and compare the CRC of
2492 * the resulting packet against @crc. If @enable is %true pattern-based
2493 * WoL is enabled, otherwise disabled.
2494 */
2495int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2496 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2497{
2498 int i;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002499 u32 port_cfg_reg;
2500
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302501 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002502 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2503 else
2504 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002505
2506 if (!enable) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002507 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002508 return 0;
2509 }
2510 if (map > 0xff)
2511 return -EINVAL;
2512
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002513#define EPIO_REG(name) \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302514 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002515 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002516
2517 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2518 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2519 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2520
2521 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2522 if (!(map & 1))
2523 continue;
2524
2525 /* write byte masks */
2526 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2527 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2528 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302529 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002530 return -ETIMEDOUT;
2531
2532 /* write CRC */
2533 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2534 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2535 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302536 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002537 return -ETIMEDOUT;
2538 }
2539#undef EPIO_REG
2540
2541 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2542 return 0;
2543}
2544
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002545/* t4_mk_filtdelwr - create a delete filter WR
2546 * @ftid: the filter ID
2547 * @wr: the filter work request to populate
2548 * @qid: ingress queue to receive the delete notification
2549 *
2550 * Creates a filter work request to delete the supplied filter. If @qid is
2551 * negative the delete notification is suppressed.
2552 */
2553void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2554{
2555 memset(wr, 0, sizeof(*wr));
2556 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2557 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2558 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2559 V_FW_FILTER_WR_NOREPLY(qid < 0));
2560 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2561 if (qid >= 0)
2562 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2563}
2564
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002565#define INIT_CMD(var, cmd, rd_wr) do { \
2566 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2567 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2568 (var).retval_len16 = htonl(FW_LEN16(var)); \
2569} while (0)
2570
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302571int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2572 u32 addr, u32 val)
2573{
2574 struct fw_ldst_cmd c;
2575
2576 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002577 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2578 FW_CMD_WRITE |
2579 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302580 c.cycles_to_len16 = htonl(FW_LEN16(c));
2581 c.u.addrval.addr = htonl(addr);
2582 c.u.addrval.val = htonl(val);
2583
2584 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2585}
2586
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002587/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002588 * t4_mdio_rd - read a PHY register through MDIO
2589 * @adap: the adapter
2590 * @mbox: mailbox to use for the FW command
2591 * @phy_addr: the PHY address
2592 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2593 * @reg: the register to read
2594 * @valp: where to store the value
2595 *
2596 * Issues a FW command through the given mailbox to read a PHY register.
2597 */
2598int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2599 unsigned int mmd, unsigned int reg, u16 *valp)
2600{
2601 int ret;
2602 struct fw_ldst_cmd c;
2603
2604 memset(&c, 0, sizeof(c));
2605 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2606 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2607 c.cycles_to_len16 = htonl(FW_LEN16(c));
2608 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2609 FW_LDST_CMD_MMD(mmd));
2610 c.u.mdio.raddr = htons(reg);
2611
2612 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2613 if (ret == 0)
2614 *valp = ntohs(c.u.mdio.rval);
2615 return ret;
2616}
2617
2618/**
2619 * t4_mdio_wr - write a PHY register through MDIO
2620 * @adap: the adapter
2621 * @mbox: mailbox to use for the FW command
2622 * @phy_addr: the PHY address
2623 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2624 * @reg: the register to write
2625 * @valp: value to write
2626 *
2627 * Issues a FW command through the given mailbox to write a PHY register.
2628 */
2629int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2630 unsigned int mmd, unsigned int reg, u16 val)
2631{
2632 struct fw_ldst_cmd c;
2633
2634 memset(&c, 0, sizeof(c));
2635 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2636 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2637 c.cycles_to_len16 = htonl(FW_LEN16(c));
2638 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2639 FW_LDST_CMD_MMD(mmd));
2640 c.u.mdio.raddr = htons(reg);
2641 c.u.mdio.rval = htons(val);
2642
2643 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2644}
2645
2646/**
Kumar Sanghvi68bce1922014-03-13 20:50:47 +05302647 * t4_sge_decode_idma_state - decode the idma state
2648 * @adap: the adapter
2649 * @state: the state idma is stuck in
2650 */
2651void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2652{
2653 static const char * const t4_decode[] = {
2654 "IDMA_IDLE",
2655 "IDMA_PUSH_MORE_CPL_FIFO",
2656 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2657 "Not used",
2658 "IDMA_PHYSADDR_SEND_PCIEHDR",
2659 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2660 "IDMA_PHYSADDR_SEND_PAYLOAD",
2661 "IDMA_SEND_FIFO_TO_IMSG",
2662 "IDMA_FL_REQ_DATA_FL_PREP",
2663 "IDMA_FL_REQ_DATA_FL",
2664 "IDMA_FL_DROP",
2665 "IDMA_FL_H_REQ_HEADER_FL",
2666 "IDMA_FL_H_SEND_PCIEHDR",
2667 "IDMA_FL_H_PUSH_CPL_FIFO",
2668 "IDMA_FL_H_SEND_CPL",
2669 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2670 "IDMA_FL_H_SEND_IP_HDR",
2671 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2672 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2673 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2674 "IDMA_FL_D_SEND_PCIEHDR",
2675 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2676 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2677 "IDMA_FL_SEND_PCIEHDR",
2678 "IDMA_FL_PUSH_CPL_FIFO",
2679 "IDMA_FL_SEND_CPL",
2680 "IDMA_FL_SEND_PAYLOAD_FIRST",
2681 "IDMA_FL_SEND_PAYLOAD",
2682 "IDMA_FL_REQ_NEXT_DATA_FL",
2683 "IDMA_FL_SEND_NEXT_PCIEHDR",
2684 "IDMA_FL_SEND_PADDING",
2685 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2686 "IDMA_FL_SEND_FIFO_TO_IMSG",
2687 "IDMA_FL_REQ_DATAFL_DONE",
2688 "IDMA_FL_REQ_HEADERFL_DONE",
2689 };
2690 static const char * const t5_decode[] = {
2691 "IDMA_IDLE",
2692 "IDMA_ALMOST_IDLE",
2693 "IDMA_PUSH_MORE_CPL_FIFO",
2694 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2695 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2696 "IDMA_PHYSADDR_SEND_PCIEHDR",
2697 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2698 "IDMA_PHYSADDR_SEND_PAYLOAD",
2699 "IDMA_SEND_FIFO_TO_IMSG",
2700 "IDMA_FL_REQ_DATA_FL",
2701 "IDMA_FL_DROP",
2702 "IDMA_FL_DROP_SEND_INC",
2703 "IDMA_FL_H_REQ_HEADER_FL",
2704 "IDMA_FL_H_SEND_PCIEHDR",
2705 "IDMA_FL_H_PUSH_CPL_FIFO",
2706 "IDMA_FL_H_SEND_CPL",
2707 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2708 "IDMA_FL_H_SEND_IP_HDR",
2709 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2710 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2711 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2712 "IDMA_FL_D_SEND_PCIEHDR",
2713 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2714 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2715 "IDMA_FL_SEND_PCIEHDR",
2716 "IDMA_FL_PUSH_CPL_FIFO",
2717 "IDMA_FL_SEND_CPL",
2718 "IDMA_FL_SEND_PAYLOAD_FIRST",
2719 "IDMA_FL_SEND_PAYLOAD",
2720 "IDMA_FL_REQ_NEXT_DATA_FL",
2721 "IDMA_FL_SEND_NEXT_PCIEHDR",
2722 "IDMA_FL_SEND_PADDING",
2723 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2724 };
2725 static const u32 sge_regs[] = {
2726 SGE_DEBUG_DATA_LOW_INDEX_2,
2727 SGE_DEBUG_DATA_LOW_INDEX_3,
2728 SGE_DEBUG_DATA_HIGH_INDEX_10,
2729 };
2730 const char **sge_idma_decode;
2731 int sge_idma_decode_nstates;
2732 int i;
2733
2734 if (is_t4(adapter->params.chip)) {
2735 sge_idma_decode = (const char **)t4_decode;
2736 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2737 } else {
2738 sge_idma_decode = (const char **)t5_decode;
2739 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2740 }
2741
2742 if (state < sge_idma_decode_nstates)
2743 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2744 else
2745 CH_WARN(adapter, "idma state %d unknown\n", state);
2746
2747 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2748 CH_WARN(adapter, "SGE register %#x value %#x\n",
2749 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2750}
2751
2752/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002753 * t4_fw_hello - establish communication with FW
2754 * @adap: the adapter
2755 * @mbox: mailbox to use for the FW command
2756 * @evt_mbox: mailbox to receive async FW events
2757 * @master: specifies the caller's willingness to be the device master
2758 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002759 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002760 * Issues a command to establish communication with FW. Returns either
2761 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002762 */
2763int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2764 enum dev_master master, enum dev_state *state)
2765{
2766 int ret;
2767 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002768 u32 v;
2769 unsigned int master_mbox;
2770 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002771
Vipul Pandya636f9d32012-09-26 02:39:39 +00002772retry:
2773 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002774 INIT_CMD(c, HELLO, WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302775 c.err_to_clearinit = htonl(
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002776 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2777 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002778 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2779 FW_HELLO_CMD_MBMASTER_MASK) |
2780 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2781 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2782 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002783
Vipul Pandya636f9d32012-09-26 02:39:39 +00002784 /*
2785 * Issue the HELLO command to the firmware. If it's not successful
2786 * but indicates that we got a "busy" or "timeout" condition, retry
Hariprasad Shenai31d55c22014-09-01 19:54:58 +05302787 * the HELLO until we exhaust our retry limit. If we do exceed our
2788 * retry limit, check to see if the firmware left us any error
2789 * information and report that if so.
Vipul Pandya636f9d32012-09-26 02:39:39 +00002790 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002791 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002792 if (ret < 0) {
2793 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2794 goto retry;
Hariprasad Shenai31d55c22014-09-01 19:54:58 +05302795 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2796 t4_report_fw_error(adap);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002797 return ret;
2798 }
2799
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302800 v = ntohl(c.err_to_clearinit);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002801 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2802 if (state) {
2803 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002804 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002805 else if (v & FW_HELLO_CMD_INIT)
2806 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002807 else
2808 *state = DEV_STATE_UNINIT;
2809 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002810
2811 /*
2812 * If we're not the Master PF then we need to wait around for the
2813 * Master PF Driver to finish setting up the adapter.
2814 *
2815 * Note that we also do this wait if we're a non-Master-capable PF and
2816 * there is no current Master PF; a Master PF may show up momentarily
2817 * and we wouldn't want to fail pointlessly. (This can happen when an
2818 * OS loads lots of different drivers rapidly at the same time). In
2819 * this case, the Master PF returned by the firmware will be
2820 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2821 */
2822 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2823 master_mbox != mbox) {
2824 int waiting = FW_CMD_HELLO_TIMEOUT;
2825
2826 /*
2827 * Wait for the firmware to either indicate an error or
2828 * initialized state. If we see either of these we bail out
2829 * and report the issue to the caller. If we exhaust the
2830 * "hello timeout" and we haven't exhausted our retries, try
2831 * again. Otherwise bail with a timeout error.
2832 */
2833 for (;;) {
2834 u32 pcie_fw;
2835
2836 msleep(50);
2837 waiting -= 50;
2838
2839 /*
2840 * If neither Error nor Initialialized are indicated
2841 * by the firmware keep waiting till we exaust our
2842 * timeout ... and then retry if we haven't exhausted
2843 * our retries ...
2844 */
2845 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2846 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2847 if (waiting <= 0) {
2848 if (retries-- > 0)
2849 goto retry;
2850
2851 return -ETIMEDOUT;
2852 }
2853 continue;
2854 }
2855
2856 /*
2857 * We either have an Error or Initialized condition
2858 * report errors preferentially.
2859 */
2860 if (state) {
2861 if (pcie_fw & FW_PCIE_FW_ERR)
2862 *state = DEV_STATE_ERR;
2863 else if (pcie_fw & FW_PCIE_FW_INIT)
2864 *state = DEV_STATE_INIT;
2865 }
2866
2867 /*
2868 * If we arrived before a Master PF was selected and
2869 * there's not a valid Master PF, grab its identity
2870 * for our caller.
2871 */
2872 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2873 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2874 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2875 break;
2876 }
2877 }
2878
2879 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002880}
2881
2882/**
2883 * t4_fw_bye - end communication with FW
2884 * @adap: the adapter
2885 * @mbox: mailbox to use for the FW command
2886 *
2887 * Issues a command to terminate communication with FW.
2888 */
2889int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2890{
2891 struct fw_bye_cmd c;
2892
Vipul Pandya0062b152012-11-06 03:37:09 +00002893 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002894 INIT_CMD(c, BYE, WRITE);
2895 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2896}
2897
2898/**
2899 * t4_init_cmd - ask FW to initialize the device
2900 * @adap: the adapter
2901 * @mbox: mailbox to use for the FW command
2902 *
2903 * Issues a command to FW to partially initialize the device. This
2904 * performs initialization that generally doesn't depend on user input.
2905 */
2906int t4_early_init(struct adapter *adap, unsigned int mbox)
2907{
2908 struct fw_initialize_cmd c;
2909
Vipul Pandya0062b152012-11-06 03:37:09 +00002910 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002911 INIT_CMD(c, INITIALIZE, WRITE);
2912 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2913}
2914
2915/**
2916 * t4_fw_reset - issue a reset to FW
2917 * @adap: the adapter
2918 * @mbox: mailbox to use for the FW command
2919 * @reset: specifies the type of reset to perform
2920 *
2921 * Issues a reset command of the specified type to FW.
2922 */
2923int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2924{
2925 struct fw_reset_cmd c;
2926
Vipul Pandya0062b152012-11-06 03:37:09 +00002927 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002928 INIT_CMD(c, RESET, WRITE);
2929 c.val = htonl(reset);
2930 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2931}
2932
2933/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002934 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2935 * @adap: the adapter
2936 * @mbox: mailbox to use for the FW RESET command (if desired)
2937 * @force: force uP into RESET even if FW RESET command fails
2938 *
2939 * Issues a RESET command to firmware (if desired) with a HALT indication
2940 * and then puts the microprocessor into RESET state. The RESET command
2941 * will only be issued if a legitimate mailbox is provided (mbox <=
2942 * FW_PCIE_FW_MASTER_MASK).
2943 *
2944 * This is generally used in order for the host to safely manipulate the
2945 * adapter without fear of conflicting with whatever the firmware might
2946 * be doing. The only way out of this state is to RESTART the firmware
2947 * ...
2948 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002949static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002950{
2951 int ret = 0;
2952
2953 /*
2954 * If a legitimate mailbox is provided, issue a RESET command
2955 * with a HALT indication.
2956 */
2957 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2958 struct fw_reset_cmd c;
2959
2960 memset(&c, 0, sizeof(c));
2961 INIT_CMD(c, RESET, WRITE);
2962 c.val = htonl(PIORST | PIORSTMODE);
2963 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2964 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2965 }
2966
2967 /*
2968 * Normally we won't complete the operation if the firmware RESET
2969 * command fails but if our caller insists we'll go ahead and put the
2970 * uP into RESET. This can be useful if the firmware is hung or even
2971 * missing ... We'll have to take the risk of putting the uP into
2972 * RESET without the cooperation of firmware in that case.
2973 *
2974 * We also force the firmware's HALT flag to be on in case we bypassed
2975 * the firmware RESET command above or we're dealing with old firmware
2976 * which doesn't have the HALT capability. This will serve as a flag
2977 * for the incoming firmware to know that it's coming out of a HALT
2978 * rather than a RESET ... if it's new enough to understand that ...
2979 */
2980 if (ret == 0 || force) {
2981 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2982 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2983 FW_PCIE_FW_HALT);
2984 }
2985
2986 /*
2987 * And we always return the result of the firmware RESET command
2988 * even when we force the uP into RESET ...
2989 */
2990 return ret;
2991}
2992
2993/**
2994 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2995 * @adap: the adapter
2996 * @reset: if we want to do a RESET to restart things
2997 *
2998 * Restart firmware previously halted by t4_fw_halt(). On successful
2999 * return the previous PF Master remains as the new PF Master and there
3000 * is no need to issue a new HELLO command, etc.
3001 *
3002 * We do this in two ways:
3003 *
3004 * 1. If we're dealing with newer firmware we'll simply want to take
3005 * the chip's microprocessor out of RESET. This will cause the
3006 * firmware to start up from its start vector. And then we'll loop
3007 * until the firmware indicates it's started again (PCIE_FW.HALT
3008 * reset to 0) or we timeout.
3009 *
3010 * 2. If we're dealing with older firmware then we'll need to RESET
3011 * the chip since older firmware won't recognize the PCIE_FW.HALT
3012 * flag and automatically RESET itself on startup.
3013 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08003014static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00003015{
3016 if (reset) {
3017 /*
3018 * Since we're directing the RESET instead of the firmware
3019 * doing it automatically, we need to clear the PCIE_FW.HALT
3020 * bit.
3021 */
3022 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3023
3024 /*
3025 * If we've been given a valid mailbox, first try to get the
3026 * firmware to do the RESET. If that works, great and we can
3027 * return success. Otherwise, if we haven't been given a
3028 * valid mailbox or the RESET command failed, fall back to
3029 * hitting the chip with a hammer.
3030 */
3031 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3032 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3033 msleep(100);
3034 if (t4_fw_reset(adap, mbox,
3035 PIORST | PIORSTMODE) == 0)
3036 return 0;
3037 }
3038
3039 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3040 msleep(2000);
3041 } else {
3042 int ms;
3043
3044 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3045 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3046 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3047 return 0;
3048 msleep(100);
3049 ms += 100;
3050 }
3051 return -ETIMEDOUT;
3052 }
3053 return 0;
3054}
3055
3056/**
3057 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3058 * @adap: the adapter
3059 * @mbox: mailbox to use for the FW RESET command (if desired)
3060 * @fw_data: the firmware image to write
3061 * @size: image size
3062 * @force: force upgrade even if firmware doesn't cooperate
3063 *
3064 * Perform all of the steps necessary for upgrading an adapter's
3065 * firmware image. Normally this requires the cooperation of the
3066 * existing firmware in order to halt all existing activities
3067 * but if an invalid mailbox token is passed in we skip that step
3068 * (though we'll still put the adapter microprocessor into RESET in
3069 * that case).
3070 *
3071 * On successful return the new firmware will have been loaded and
3072 * the adapter will have been fully RESET losing all previous setup
3073 * state. On unsuccessful return the adapter may be completely hosed ...
3074 * positive errno indicates that the adapter is ~probably~ intact, a
3075 * negative errno indicates that things are looking bad ...
3076 */
Hariprasad Shenai22c0b962014-10-15 01:54:14 +05303077int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3078 const u8 *fw_data, unsigned int size, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00003079{
3080 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3081 int reset, ret;
3082
3083 ret = t4_fw_halt(adap, mbox, force);
3084 if (ret < 0 && !force)
3085 return ret;
3086
3087 ret = t4_load_fw(adap, fw_data, size);
3088 if (ret < 0)
3089 return ret;
3090
3091 /*
3092 * Older versions of the firmware don't understand the new
3093 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3094 * restart. So for newly loaded older firmware we'll have to do the
3095 * RESET for it so it starts up on a clean slate. We can tell if
3096 * the newly loaded firmware will handle this right by checking
3097 * its header flags to see if it advertises the capability.
3098 */
3099 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3100 return t4_fw_restart(adap, mbox, reset);
3101}
3102
Vipul Pandya636f9d32012-09-26 02:39:39 +00003103/**
3104 * t4_fixup_host_params - fix up host-dependent parameters
3105 * @adap: the adapter
3106 * @page_size: the host's Base Page Size
3107 * @cache_line_size: the host's Cache Line Size
3108 *
3109 * Various registers in T4 contain values which are dependent on the
3110 * host's Base Page and Cache Line Sizes. This function will fix all of
3111 * those registers with the appropriate values as passed in ...
3112 */
3113int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3114 unsigned int cache_line_size)
3115{
3116 unsigned int page_shift = fls(page_size) - 1;
3117 unsigned int sge_hps = page_shift - 10;
3118 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3119 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3120 unsigned int fl_align_log = fls(fl_align) - 1;
3121
3122 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3123 HOSTPAGESIZEPF0(sge_hps) |
3124 HOSTPAGESIZEPF1(sge_hps) |
3125 HOSTPAGESIZEPF2(sge_hps) |
3126 HOSTPAGESIZEPF3(sge_hps) |
3127 HOSTPAGESIZEPF4(sge_hps) |
3128 HOSTPAGESIZEPF5(sge_hps) |
3129 HOSTPAGESIZEPF6(sge_hps) |
3130 HOSTPAGESIZEPF7(sge_hps));
3131
3132 t4_set_reg_field(adap, SGE_CONTROL,
Vipul Pandya0dad9e92012-11-07 03:45:46 +00003133 INGPADBOUNDARY_MASK |
Vipul Pandya636f9d32012-09-26 02:39:39 +00003134 EGRSTATUSPAGESIZE_MASK,
3135 INGPADBOUNDARY(fl_align_log - 5) |
3136 EGRSTATUSPAGESIZE(stat_len != 64));
3137
3138 /*
3139 * Adjust various SGE Free List Host Buffer Sizes.
3140 *
3141 * This is something of a crock since we're using fixed indices into
3142 * the array which are also known by the sge.c code and the T4
3143 * Firmware Configuration File. We need to come up with a much better
3144 * approach to managing this array. For now, the first four entries
3145 * are:
3146 *
3147 * 0: Host Page Size
3148 * 1: 64KB
3149 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3150 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3151 *
3152 * For the single-MTU buffers in unpacked mode we need to include
3153 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3154 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3155 * Padding boundry. All of these are accommodated in the Factory
3156 * Default Firmware Configuration File but we need to adjust it for
3157 * this host's cache line size.
3158 */
3159 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3160 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3161 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3162 & ~(fl_align-1));
3163 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3164 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3165 & ~(fl_align-1));
3166
3167 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3168
3169 return 0;
3170}
3171
3172/**
3173 * t4_fw_initialize - ask FW to initialize the device
3174 * @adap: the adapter
3175 * @mbox: mailbox to use for the FW command
3176 *
3177 * Issues a command to FW to partially initialize the device. This
3178 * performs initialization that generally doesn't depend on user input.
3179 */
3180int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3181{
3182 struct fw_initialize_cmd c;
3183
3184 memset(&c, 0, sizeof(c));
3185 INIT_CMD(c, INITIALIZE, WRITE);
3186 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3187}
3188
3189/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003190 * t4_query_params - query FW or device parameters
3191 * @adap: the adapter
3192 * @mbox: mailbox to use for the FW command
3193 * @pf: the PF
3194 * @vf: the VF
3195 * @nparams: the number of parameters
3196 * @params: the parameter names
3197 * @val: the parameter values
3198 *
3199 * Reads the value of FW or device parameters. Up to 7 parameters can be
3200 * queried at once.
3201 */
3202int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3203 unsigned int vf, unsigned int nparams, const u32 *params,
3204 u32 *val)
3205{
3206 int i, ret;
3207 struct fw_params_cmd c;
3208 __be32 *p = &c.param[0].mnem;
3209
3210 if (nparams > 7)
3211 return -EINVAL;
3212
3213 memset(&c, 0, sizeof(c));
3214 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3215 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3216 FW_PARAMS_CMD_VFN(vf));
3217 c.retval_len16 = htonl(FW_LEN16(c));
3218 for (i = 0; i < nparams; i++, p += 2)
3219 *p = htonl(*params++);
3220
3221 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3222 if (ret == 0)
3223 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3224 *val++ = ntohl(*p);
3225 return ret;
3226}
3227
3228/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003229 * t4_set_params_nosleep - sets FW or device parameters
3230 * @adap: the adapter
3231 * @mbox: mailbox to use for the FW command
3232 * @pf: the PF
3233 * @vf: the VF
3234 * @nparams: the number of parameters
3235 * @params: the parameter names
3236 * @val: the parameter values
3237 *
3238 * Does not ever sleep
3239 * Sets the value of FW or device parameters. Up to 7 parameters can be
3240 * specified at once.
3241 */
3242int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3243 unsigned int pf, unsigned int vf,
3244 unsigned int nparams, const u32 *params,
3245 const u32 *val)
3246{
3247 struct fw_params_cmd c;
3248 __be32 *p = &c.param[0].mnem;
3249
3250 if (nparams > 7)
3251 return -EINVAL;
3252
3253 memset(&c, 0, sizeof(c));
3254 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3255 FW_CMD_REQUEST | FW_CMD_WRITE |
3256 FW_PARAMS_CMD_PFN(pf) |
3257 FW_PARAMS_CMD_VFN(vf));
3258 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3259
3260 while (nparams--) {
3261 *p++ = cpu_to_be32(*params++);
3262 *p++ = cpu_to_be32(*val++);
3263 }
3264
3265 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3266}
3267
3268/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003269 * t4_set_params - sets FW or device parameters
3270 * @adap: the adapter
3271 * @mbox: mailbox to use for the FW command
3272 * @pf: the PF
3273 * @vf: the VF
3274 * @nparams: the number of parameters
3275 * @params: the parameter names
3276 * @val: the parameter values
3277 *
3278 * Sets the value of FW or device parameters. Up to 7 parameters can be
3279 * specified at once.
3280 */
3281int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3282 unsigned int vf, unsigned int nparams, const u32 *params,
3283 const u32 *val)
3284{
3285 struct fw_params_cmd c;
3286 __be32 *p = &c.param[0].mnem;
3287
3288 if (nparams > 7)
3289 return -EINVAL;
3290
3291 memset(&c, 0, sizeof(c));
3292 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3293 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3294 FW_PARAMS_CMD_VFN(vf));
3295 c.retval_len16 = htonl(FW_LEN16(c));
3296 while (nparams--) {
3297 *p++ = htonl(*params++);
3298 *p++ = htonl(*val++);
3299 }
3300
3301 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3302}
3303
3304/**
3305 * t4_cfg_pfvf - configure PF/VF resource limits
3306 * @adap: the adapter
3307 * @mbox: mailbox to use for the FW command
3308 * @pf: the PF being configured
3309 * @vf: the VF being configured
3310 * @txq: the max number of egress queues
3311 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3312 * @rxqi: the max number of interrupt-capable ingress queues
3313 * @rxq: the max number of interruptless ingress queues
3314 * @tc: the PCI traffic class
3315 * @vi: the max number of virtual interfaces
3316 * @cmask: the channel access rights mask for the PF/VF
3317 * @pmask: the port access rights mask for the PF/VF
3318 * @nexact: the maximum number of exact MPS filters
3319 * @rcaps: read capabilities
3320 * @wxcaps: write/execute capabilities
3321 *
3322 * Configures resource limits and capabilities for a physical or virtual
3323 * function.
3324 */
3325int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3326 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3327 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3328 unsigned int vi, unsigned int cmask, unsigned int pmask,
3329 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3330{
3331 struct fw_pfvf_cmd c;
3332
3333 memset(&c, 0, sizeof(c));
3334 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3335 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3336 FW_PFVF_CMD_VFN(vf));
3337 c.retval_len16 = htonl(FW_LEN16(c));
3338 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3339 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00003340 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003341 FW_PFVF_CMD_PMASK(pmask) |
3342 FW_PFVF_CMD_NEQ(txq));
3343 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3344 FW_PFVF_CMD_NEXACTF(nexact));
3345 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3346 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3347 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3348 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3349}
3350
3351/**
3352 * t4_alloc_vi - allocate a virtual interface
3353 * @adap: the adapter
3354 * @mbox: mailbox to use for the FW command
3355 * @port: physical port associated with the VI
3356 * @pf: the PF owning the VI
3357 * @vf: the VF owning the VI
3358 * @nmac: number of MAC addresses needed (1 to 5)
3359 * @mac: the MAC addresses of the VI
3360 * @rss_size: size of RSS table slice associated with this VI
3361 *
3362 * Allocates a virtual interface for the given physical port. If @mac is
3363 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3364 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3365 * stored consecutively so the space needed is @nmac * 6 bytes.
3366 * Returns a negative error number or the non-negative VI id.
3367 */
3368int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3369 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3370 unsigned int *rss_size)
3371{
3372 int ret;
3373 struct fw_vi_cmd c;
3374
3375 memset(&c, 0, sizeof(c));
3376 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3377 FW_CMD_WRITE | FW_CMD_EXEC |
3378 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3379 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3380 c.portid_pkd = FW_VI_CMD_PORTID(port);
3381 c.nmac = nmac - 1;
3382
3383 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3384 if (ret)
3385 return ret;
3386
3387 if (mac) {
3388 memcpy(mac, c.mac, sizeof(c.mac));
3389 switch (nmac) {
3390 case 5:
3391 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3392 case 4:
3393 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3394 case 3:
3395 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3396 case 2:
3397 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3398 }
3399 }
3400 if (rss_size)
3401 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003402 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003403}
3404
3405/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003406 * t4_set_rxmode - set Rx properties of a virtual interface
3407 * @adap: the adapter
3408 * @mbox: mailbox to use for the FW command
3409 * @viid: the VI id
3410 * @mtu: the new MTU or -1
3411 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3412 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3413 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003414 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003415 * @sleep_ok: if true we may sleep while awaiting command completion
3416 *
3417 * Sets Rx properties of a virtual interface.
3418 */
3419int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003420 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3421 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003422{
3423 struct fw_vi_rxmode_cmd c;
3424
3425 /* convert to FW values */
3426 if (mtu < 0)
3427 mtu = FW_RXMODE_MTU_NO_CHG;
3428 if (promisc < 0)
3429 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3430 if (all_multi < 0)
3431 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3432 if (bcast < 0)
3433 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003434 if (vlanex < 0)
3435 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003436
3437 memset(&c, 0, sizeof(c));
3438 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3439 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3440 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003441 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3442 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3443 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3444 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3445 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003446 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3447}
3448
3449/**
3450 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3451 * @adap: the adapter
3452 * @mbox: mailbox to use for the FW command
3453 * @viid: the VI id
3454 * @free: if true any existing filters for this VI id are first removed
3455 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3456 * @addr: the MAC address(es)
3457 * @idx: where to store the index of each allocated filter
3458 * @hash: pointer to hash address filter bitmap
3459 * @sleep_ok: call is allowed to sleep
3460 *
3461 * Allocates an exact-match filter for each of the supplied addresses and
3462 * sets it to the corresponding address. If @idx is not %NULL it should
3463 * have at least @naddr entries, each of which will be set to the index of
3464 * the filter allocated for the corresponding MAC address. If a filter
3465 * could not be allocated for an address its index is set to 0xffff.
3466 * If @hash is not %NULL addresses that fail to allocate an exact filter
3467 * are hashed and update the hash filter bitmap pointed at by @hash.
3468 *
3469 * Returns a negative error number or the number of filters allocated.
3470 */
3471int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3472 unsigned int viid, bool free, unsigned int naddr,
3473 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3474{
3475 int i, ret;
3476 struct fw_vi_mac_cmd c;
3477 struct fw_vi_mac_exact *p;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303478 unsigned int max_naddr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003479 NUM_MPS_CLS_SRAM_L_INSTANCES :
3480 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003481
3482 if (naddr > 7)
3483 return -EINVAL;
3484
3485 memset(&c, 0, sizeof(c));
3486 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3487 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3488 FW_VI_MAC_CMD_VIID(viid));
3489 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3490 FW_CMD_LEN16((naddr + 2) / 2));
3491
3492 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3493 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3494 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3495 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3496 }
3497
3498 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3499 if (ret)
3500 return ret;
3501
3502 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3503 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3504
3505 if (idx)
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003506 idx[i] = index >= max_naddr ? 0xffff : index;
3507 if (index < max_naddr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003508 ret++;
3509 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003510 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003511 }
3512 return ret;
3513}
3514
3515/**
3516 * t4_change_mac - modifies the exact-match filter for a MAC address
3517 * @adap: the adapter
3518 * @mbox: mailbox to use for the FW command
3519 * @viid: the VI id
3520 * @idx: index of existing filter for old value of MAC address, or -1
3521 * @addr: the new MAC address value
3522 * @persist: whether a new MAC allocation should be persistent
3523 * @add_smt: if true also add the address to the HW SMT
3524 *
3525 * Modifies an exact-match filter and sets it to the new MAC address.
3526 * Note that in general it is not possible to modify the value of a given
3527 * filter so the generic way to modify an address filter is to free the one
3528 * being used by the old address value and allocate a new filter for the
3529 * new address value. @idx can be -1 if the address is a new addition.
3530 *
3531 * Returns a negative error number or the index of the filter with the new
3532 * MAC value.
3533 */
3534int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3535 int idx, const u8 *addr, bool persist, bool add_smt)
3536{
3537 int ret, mode;
3538 struct fw_vi_mac_cmd c;
3539 struct fw_vi_mac_exact *p = c.u.exact;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303540 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003541 NUM_MPS_CLS_SRAM_L_INSTANCES :
3542 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003543
3544 if (idx < 0) /* new allocation */
3545 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3546 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3547
3548 memset(&c, 0, sizeof(c));
3549 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3550 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3551 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3552 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3553 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3554 FW_VI_MAC_CMD_IDX(idx));
3555 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3556
3557 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3558 if (ret == 0) {
3559 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003560 if (ret >= max_mac_addr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003561 ret = -ENOMEM;
3562 }
3563 return ret;
3564}
3565
3566/**
3567 * t4_set_addr_hash - program the MAC inexact-match hash filter
3568 * @adap: the adapter
3569 * @mbox: mailbox to use for the FW command
3570 * @viid: the VI id
3571 * @ucast: whether the hash filter should also match unicast addresses
3572 * @vec: the value to be written to the hash filter
3573 * @sleep_ok: call is allowed to sleep
3574 *
3575 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3576 */
3577int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3578 bool ucast, u64 vec, bool sleep_ok)
3579{
3580 struct fw_vi_mac_cmd c;
3581
3582 memset(&c, 0, sizeof(c));
3583 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3584 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3585 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3586 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3587 FW_CMD_LEN16(1));
3588 c.u.hash.hashvec = cpu_to_be64(vec);
3589 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3590}
3591
3592/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003593 * t4_enable_vi_params - enable/disable a virtual interface
3594 * @adap: the adapter
3595 * @mbox: mailbox to use for the FW command
3596 * @viid: the VI id
3597 * @rx_en: 1=enable Rx, 0=disable Rx
3598 * @tx_en: 1=enable Tx, 0=disable Tx
3599 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3600 *
3601 * Enables/disables a virtual interface. Note that setting DCB Enable
3602 * only makes sense when enabling a Virtual Interface ...
3603 */
3604int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3605 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3606{
3607 struct fw_vi_enable_cmd c;
3608
3609 memset(&c, 0, sizeof(c));
3610 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3611 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3612
3613 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3614 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3615 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
Anish Bhatt30f00842014-08-05 16:05:23 -07003616 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
Anish Bhatt688848b2014-06-19 21:37:13 -07003617}
3618
3619/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003620 * t4_enable_vi - enable/disable a virtual interface
3621 * @adap: the adapter
3622 * @mbox: mailbox to use for the FW command
3623 * @viid: the VI id
3624 * @rx_en: 1=enable Rx, 0=disable Rx
3625 * @tx_en: 1=enable Tx, 0=disable Tx
3626 *
3627 * Enables/disables a virtual interface.
3628 */
3629int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3630 bool rx_en, bool tx_en)
3631{
Anish Bhatt688848b2014-06-19 21:37:13 -07003632 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003633}
3634
3635/**
3636 * t4_identify_port - identify a VI's port by blinking its LED
3637 * @adap: the adapter
3638 * @mbox: mailbox to use for the FW command
3639 * @viid: the VI id
3640 * @nblinks: how many times to blink LED at 2.5 Hz
3641 *
3642 * Identifies a VI's port by blinking its LED.
3643 */
3644int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3645 unsigned int nblinks)
3646{
3647 struct fw_vi_enable_cmd c;
3648
Vipul Pandya0062b152012-11-06 03:37:09 +00003649 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003650 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3651 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3652 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3653 c.blinkdur = htons(nblinks);
3654 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3655}
3656
3657/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003658 * t4_iq_free - free an ingress queue and its FLs
3659 * @adap: the adapter
3660 * @mbox: mailbox to use for the FW command
3661 * @pf: the PF owning the queues
3662 * @vf: the VF owning the queues
3663 * @iqtype: the ingress queue type
3664 * @iqid: ingress queue id
3665 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3666 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3667 *
3668 * Frees an ingress queue and its associated FLs, if any.
3669 */
3670int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3671 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3672 unsigned int fl0id, unsigned int fl1id)
3673{
3674 struct fw_iq_cmd c;
3675
3676 memset(&c, 0, sizeof(c));
3677 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3678 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3679 FW_IQ_CMD_VFN(vf));
3680 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3681 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3682 c.iqid = htons(iqid);
3683 c.fl0id = htons(fl0id);
3684 c.fl1id = htons(fl1id);
3685 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3686}
3687
3688/**
3689 * t4_eth_eq_free - free an Ethernet egress queue
3690 * @adap: the adapter
3691 * @mbox: mailbox to use for the FW command
3692 * @pf: the PF owning the queue
3693 * @vf: the VF owning the queue
3694 * @eqid: egress queue id
3695 *
3696 * Frees an Ethernet egress queue.
3697 */
3698int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3699 unsigned int vf, unsigned int eqid)
3700{
3701 struct fw_eq_eth_cmd c;
3702
3703 memset(&c, 0, sizeof(c));
3704 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3705 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3706 FW_EQ_ETH_CMD_VFN(vf));
3707 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3708 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3709 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3710}
3711
3712/**
3713 * t4_ctrl_eq_free - free a control egress queue
3714 * @adap: the adapter
3715 * @mbox: mailbox to use for the FW command
3716 * @pf: the PF owning the queue
3717 * @vf: the VF owning the queue
3718 * @eqid: egress queue id
3719 *
3720 * Frees a control egress queue.
3721 */
3722int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3723 unsigned int vf, unsigned int eqid)
3724{
3725 struct fw_eq_ctrl_cmd c;
3726
3727 memset(&c, 0, sizeof(c));
3728 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3729 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3730 FW_EQ_CTRL_CMD_VFN(vf));
3731 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3732 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3733 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3734}
3735
3736/**
3737 * t4_ofld_eq_free - free an offload egress queue
3738 * @adap: the adapter
3739 * @mbox: mailbox to use for the FW command
3740 * @pf: the PF owning the queue
3741 * @vf: the VF owning the queue
3742 * @eqid: egress queue id
3743 *
3744 * Frees a control egress queue.
3745 */
3746int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3747 unsigned int vf, unsigned int eqid)
3748{
3749 struct fw_eq_ofld_cmd c;
3750
3751 memset(&c, 0, sizeof(c));
3752 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3753 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3754 FW_EQ_OFLD_CMD_VFN(vf));
3755 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3756 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3757 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3758}
3759
3760/**
3761 * t4_handle_fw_rpl - process a FW reply message
3762 * @adap: the adapter
3763 * @rpl: start of the FW message
3764 *
3765 * Processes a FW message, such as link state change messages.
3766 */
3767int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3768{
3769 u8 opcode = *(const u8 *)rpl;
3770
3771 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3772 int speed = 0, fc = 0;
3773 const struct fw_port_cmd *p = (void *)rpl;
3774 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3775 int port = adap->chan_map[chan];
3776 struct port_info *pi = adap2pinfo(adap, port);
3777 struct link_config *lc = &pi->link_cfg;
3778 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3779 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3780 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3781
3782 if (stat & FW_PORT_CMD_RXPAUSE)
3783 fc |= PAUSE_RX;
3784 if (stat & FW_PORT_CMD_TXPAUSE)
3785 fc |= PAUSE_TX;
3786 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003787 speed = 100;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003788 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003789 speed = 1000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003790 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003791 speed = 10000;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05303792 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003793 speed = 40000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003794
3795 if (link_ok != lc->link_ok || speed != lc->speed ||
3796 fc != lc->fc) { /* something changed */
3797 lc->link_ok = link_ok;
3798 lc->speed = speed;
3799 lc->fc = fc;
Hariprasad Shenai444018a2014-09-01 19:54:55 +05303800 lc->supported = be16_to_cpu(p->u.info.pcap);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003801 t4_os_link_changed(adap, port, link_ok);
3802 }
3803 if (mod != pi->mod_type) {
3804 pi->mod_type = mod;
3805 t4_os_portmod_changed(adap, port);
3806 }
3807 }
3808 return 0;
3809}
3810
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003811static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003812{
3813 u16 val;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003814
Jiang Liue5c8ae52012-08-20 13:53:19 -06003815 if (pci_is_pcie(adapter->pdev)) {
3816 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003817 p->speed = val & PCI_EXP_LNKSTA_CLS;
3818 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3819 }
3820}
3821
3822/**
3823 * init_link_config - initialize a link's SW state
3824 * @lc: structure holding the link state
3825 * @caps: link capabilities
3826 *
3827 * Initializes the SW state maintained for each link, including the link's
3828 * capabilities and default speed/flow-control/autonegotiation settings.
3829 */
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003830static void init_link_config(struct link_config *lc, unsigned int caps)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003831{
3832 lc->supported = caps;
3833 lc->requested_speed = 0;
3834 lc->speed = 0;
3835 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3836 if (lc->supported & FW_PORT_CAP_ANEG) {
3837 lc->advertising = lc->supported & ADVERT_MASK;
3838 lc->autoneg = AUTONEG_ENABLE;
3839 lc->requested_fc |= PAUSE_AUTONEG;
3840 } else {
3841 lc->advertising = 0;
3842 lc->autoneg = AUTONEG_DISABLE;
3843 }
3844}
3845
Hariprasad Shenai8203b502014-10-09 05:48:47 +05303846#define CIM_PF_NOACCESS 0xeeeeeeee
3847
3848int t4_wait_dev_ready(void __iomem *regs)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003849{
Hariprasad Shenai8203b502014-10-09 05:48:47 +05303850 u32 whoami;
3851
3852 whoami = readl(regs + PL_WHOAMI);
3853 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003854 return 0;
Hariprasad Shenai8203b502014-10-09 05:48:47 +05303855
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003856 msleep(500);
Hariprasad Shenai8203b502014-10-09 05:48:47 +05303857 whoami = readl(regs + PL_WHOAMI);
3858 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003859}
3860
Hariprasad Shenaife2ee132014-09-10 17:44:28 +05303861struct flash_desc {
3862 u32 vendor_and_model_id;
3863 u32 size_mb;
3864};
3865
Bill Pemberton91744942012-12-03 09:23:02 -05003866static int get_flash_params(struct adapter *adap)
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003867{
Hariprasad Shenaife2ee132014-09-10 17:44:28 +05303868 /* Table for non-Numonix supported flash parts. Numonix parts are left
3869 * to the preexisting code. All flash parts have 64KB sectors.
3870 */
3871 static struct flash_desc supported_flash[] = {
3872 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3873 };
3874
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003875 int ret;
3876 u32 info;
3877
3878 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3879 if (!ret)
3880 ret = sf1_read(adap, 3, 0, 1, &info);
3881 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3882 if (ret)
3883 return ret;
3884
Hariprasad Shenaife2ee132014-09-10 17:44:28 +05303885 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3886 if (supported_flash[ret].vendor_and_model_id == info) {
3887 adap->params.sf_size = supported_flash[ret].size_mb;
3888 adap->params.sf_nsec =
3889 adap->params.sf_size / SF_SEC_SIZE;
3890 return 0;
3891 }
3892
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003893 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3894 return -EINVAL;
3895 info >>= 16; /* log2 of size */
3896 if (info >= 0x14 && info < 0x18)
3897 adap->params.sf_nsec = 1 << (info - 16);
3898 else if (info == 0x18)
3899 adap->params.sf_nsec = 64;
3900 else
3901 return -EINVAL;
3902 adap->params.sf_size = 1 << info;
3903 adap->params.sf_fw_start =
3904 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
Hariprasad Shenaic2906072014-09-10 17:44:30 +05303905
3906 if (adap->params.sf_size < FLASH_MIN_SIZE)
3907 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3908 adap->params.sf_size, FLASH_MIN_SIZE);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003909 return 0;
3910}
3911
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003912/**
3913 * t4_prep_adapter - prepare SW and HW for operation
3914 * @adapter: the adapter
3915 * @reset: if true perform a HW reset
3916 *
3917 * Initialize adapter SW state for the various HW modules, set initial
3918 * values for some adapter tunables, take PHYs out of reset, and
3919 * initialize the MDIO interface.
3920 */
Bill Pemberton91744942012-12-03 09:23:02 -05003921int t4_prep_adapter(struct adapter *adapter)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003922{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003923 int ret, ver;
3924 uint16_t device_id;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303925 u32 pl_rev;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003926
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003927 get_pci_mode(adapter, &adapter->params.pci);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303928 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003929
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003930 ret = get_flash_params(adapter);
3931 if (ret < 0) {
3932 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3933 return ret;
3934 }
3935
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003936 /* Retrieve adapter's device ID
3937 */
3938 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3939 ver = device_id >> 12;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303940 adapter->params.chip = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003941 switch (ver) {
3942 case CHELSIO_T4:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303943 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003944 break;
3945 case CHELSIO_T5:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303946 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003947 break;
3948 default:
3949 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3950 device_id);
3951 return -EINVAL;
3952 }
3953
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003954 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3955
3956 /*
3957 * Default port for debugging in case we can't reach FW.
3958 */
3959 adapter->params.nports = 1;
3960 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003961 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003962 return 0;
3963}
3964
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303965/**
3966 * t4_init_tp_params - initialize adap->params.tp
3967 * @adap: the adapter
3968 *
3969 * Initialize various fields of the adapter's TP Parameters structure.
3970 */
3971int t4_init_tp_params(struct adapter *adap)
3972{
3973 int chan;
3974 u32 v;
3975
3976 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3977 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3978 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3979
3980 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3981 for (chan = 0; chan < NCHAN; chan++)
3982 adap->params.tp.tx_modq[chan] = chan;
3983
3984 /* Cache the adapter's Compressed Filter Mode and global Incress
3985 * Configuration.
3986 */
3987 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3988 &adap->params.tp.vlan_pri_map, 1,
3989 TP_VLAN_PRI_MAP);
3990 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3991 &adap->params.tp.ingress_config, 1,
3992 TP_INGRESS_CONFIG);
3993
3994 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3995 * shift positions of several elements of the Compressed Filter Tuple
3996 * for this adapter which we need frequently ...
3997 */
3998 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3999 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4000 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4001 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4002 F_PROTOCOL);
4003
4004 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4005 * represents the presense of an Outer VLAN instead of a VNIC ID.
4006 */
4007 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4008 adap->params.tp.vnic_shift = -1;
4009
4010 return 0;
4011}
4012
4013/**
4014 * t4_filter_field_shift - calculate filter field shift
4015 * @adap: the adapter
4016 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4017 *
4018 * Return the shift position of a filter field within the Compressed
4019 * Filter Tuple. The filter field is specified via its selection bit
4020 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4021 */
4022int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4023{
4024 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4025 unsigned int sel;
4026 int field_shift;
4027
4028 if ((filter_mode & filter_sel) == 0)
4029 return -1;
4030
4031 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4032 switch (filter_mode & sel) {
4033 case F_FCOE:
4034 field_shift += W_FT_FCOE;
4035 break;
4036 case F_PORT:
4037 field_shift += W_FT_PORT;
4038 break;
4039 case F_VNIC_ID:
4040 field_shift += W_FT_VNIC_ID;
4041 break;
4042 case F_VLAN:
4043 field_shift += W_FT_VLAN;
4044 break;
4045 case F_TOS:
4046 field_shift += W_FT_TOS;
4047 break;
4048 case F_PROTOCOL:
4049 field_shift += W_FT_PROTOCOL;
4050 break;
4051 case F_ETHERTYPE:
4052 field_shift += W_FT_ETHERTYPE;
4053 break;
4054 case F_MACMATCH:
4055 field_shift += W_FT_MACMATCH;
4056 break;
4057 case F_MPSHITTYPE:
4058 field_shift += W_FT_MPSHITTYPE;
4059 break;
4060 case F_FRAGMENTATION:
4061 field_shift += W_FT_FRAGMENTATION;
4062 break;
4063 }
4064 }
4065 return field_shift;
4066}
4067
Bill Pemberton91744942012-12-03 09:23:02 -05004068int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004069{
4070 u8 addr[6];
4071 int ret, i, j = 0;
4072 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004073 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004074
4075 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004076 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004077
4078 for_each_port(adap, i) {
4079 unsigned int rss_size;
4080 struct port_info *p = adap2pinfo(adap, i);
4081
4082 while ((adap->params.portvec & (1 << j)) == 0)
4083 j++;
4084
4085 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4086 FW_CMD_REQUEST | FW_CMD_READ |
4087 FW_PORT_CMD_PORTID(j));
4088 c.action_to_len16 = htonl(
4089 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4090 FW_LEN16(c));
4091 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4092 if (ret)
4093 return ret;
4094
4095 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4096 if (ret < 0)
4097 return ret;
4098
4099 p->viid = ret;
4100 p->tx_chan = j;
4101 p->lport = j;
4102 p->rss_size = rss_size;
4103 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
Thadeu Lima de Souza Cascardo40c9f8a2014-06-21 09:48:08 -03004104 adap->port[i]->dev_port = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004105
4106 ret = ntohl(c.u.info.lstatus_to_modtype);
4107 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4108 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4109 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004110 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004111
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004112 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4113 FW_CMD_REQUEST | FW_CMD_READ |
4114 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4115 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4116 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4117 if (ret)
4118 return ret;
4119 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4120
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004121 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4122 j++;
4123 }
4124 return 0;
4125}