blob: adf16a54da26b63e150935567ce61c98a0af548d [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000035#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
stephen hemmingerde5b8672013-12-18 14:16:47 -080040static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000042/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
Roland Dreierde498c82010-04-21 08:59:17 +000057static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000059{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
Roland Dreierde498c82010-04-21 08:59:17 +0000114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000146/*
Hariprasad Shenai0abfd152014-06-27 19:23:48 +0530147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
154 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155
156 if (is_t4(adap->params.chip))
157 req |= F_LOCALCFG;
158
159 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161
162 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 * Configuration Space read. (None of the other fields matter when
164 * ENABLE is 0 so a simple register write is easier than a
165 * read-modify-write via t4_set_reg_field().)
166 */
167 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168}
169
170/*
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000171 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172 */
173static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
174 u32 mbox_addr)
175{
176 for ( ; nflit; nflit--, mbox_addr += 8)
177 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
178}
179
180/*
181 * Handle a FW assertion reported in a mailbox.
182 */
183static void fw_asrt(struct adapter *adap, u32 mbox_addr)
184{
185 struct fw_debug_cmd asrt;
186
187 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
188 dev_alert(adap->pdev_dev,
189 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
190 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
191 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
192}
193
194static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
195{
196 dev_err(adap->pdev_dev,
197 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
198 (unsigned long long)t4_read_reg64(adap, data_reg),
199 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
200 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
201 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
202 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
203 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
204 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
205 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
206}
207
208/**
209 * t4_wr_mbox_meat - send a command to FW through the given mailbox
210 * @adap: the adapter
211 * @mbox: index of the mailbox to use
212 * @cmd: the command to write
213 * @size: command length in bytes
214 * @rpl: where to optionally store the reply
215 * @sleep_ok: if true we may sleep while awaiting command completion
216 *
217 * Sends the given command to FW through the selected mailbox and waits
218 * for the FW to execute the command. If @rpl is not %NULL it is used to
219 * store the FW's reply to the command. The command and its optional
220 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
221 * to respond. @sleep_ok determines whether we may sleep while awaiting
222 * the response. If sleeping is allowed we use progressive backoff
223 * otherwise we spin.
224 *
225 * The return value is 0 on success or a negative errno on failure. A
226 * failure can happen either because we are not able to execute the
227 * command or FW executes it but signals an error. In the latter case
228 * the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 void *rpl, bool sleep_ok)
232{
Joe Perches005b5712010-12-14 21:36:53 +0000233 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000234 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
235 };
236
237 u32 v;
238 u64 res;
239 int i, ms, delay_idx;
240 const __be64 *p = cmd;
241 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
242 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
243
244 if ((size & 15) || size > MBOX_LEN)
245 return -EINVAL;
246
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000247 /*
248 * If the device is off-line, as in EEH, commands will time out.
249 * Fail them early so we don't waste time waiting.
250 */
251 if (adap->pdev->error_state != pci_channel_io_normal)
252 return -EIO;
253
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000254 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
255 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
256 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
257
258 if (v != MBOX_OWNER_DRV)
259 return v ? -EBUSY : -ETIMEDOUT;
260
261 for (i = 0; i < size; i += 8)
262 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
263
264 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
265 t4_read_reg(adap, ctl_reg); /* flush write */
266
267 delay_idx = 0;
268 ms = delay[0];
269
270 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271 if (sleep_ok) {
272 ms = delay[delay_idx]; /* last element may repeat */
273 if (delay_idx < ARRAY_SIZE(delay) - 1)
274 delay_idx++;
275 msleep(ms);
276 } else
277 mdelay(ms);
278
279 v = t4_read_reg(adap, ctl_reg);
280 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
281 if (!(v & MBMSGVALID)) {
282 t4_write_reg(adap, ctl_reg, 0);
283 continue;
284 }
285
286 res = t4_read_reg64(adap, data_reg);
287 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
288 fw_asrt(adap, data_reg);
289 res = FW_CMD_RETVAL(EIO);
290 } else if (rpl)
291 get_mbox_rpl(adap, rpl, size / 8, data_reg);
292
293 if (FW_CMD_RETVAL_GET((int)res))
294 dump_mbox(adap, mbox, data_reg);
295 t4_write_reg(adap, ctl_reg, 0);
296 return -FW_CMD_RETVAL_GET((int)res);
297 }
298 }
299
300 dump_mbox(adap, mbox, data_reg);
301 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 *(const u8 *)cmd, mbox);
303 return -ETIMEDOUT;
304}
305
306/**
307 * t4_mc_read - read from MC through backdoor accesses
308 * @adap: the adapter
309 * @addr: address of first byte requested
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000310 * @idx: which MC to access
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000311 * @data: 64 bytes of data containing the requested address
312 * @ecc: where to store the corresponding 64-bit ECC word
313 *
314 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
315 * that covers the requested address @addr. If @parity is not %NULL it
316 * is assigned the 64-bit ECC word for the read data.
317 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000318int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000319{
320 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000321 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
322 u32 mc_bist_status_rdata, mc_bist_data_pattern;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000323
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530324 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000325 mc_bist_cmd = MC_BIST_CMD;
326 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
327 mc_bist_cmd_len = MC_BIST_CMD_LEN;
328 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
329 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
330 } else {
331 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
332 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
333 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
334 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
335 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
336 }
337
338 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000339 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000340 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
341 t4_write_reg(adap, mc_bist_cmd_len, 64);
342 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
343 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000344 BIST_CMD_GAP(1));
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000345 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000346 if (i)
347 return i;
348
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000349#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000350
351 for (i = 15; i >= 0; i--)
352 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
353 if (ecc)
354 *ecc = t4_read_reg64(adap, MC_DATA(16));
355#undef MC_DATA
356 return 0;
357}
358
359/**
360 * t4_edc_read - read from EDC through backdoor accesses
361 * @adap: the adapter
362 * @idx: which EDC to access
363 * @addr: address of first byte requested
364 * @data: 64 bytes of data containing the requested address
365 * @ecc: where to store the corresponding 64-bit ECC word
366 *
367 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
368 * that covers the requested address @addr. If @parity is not %NULL it
369 * is assigned the 64-bit ECC word for the read data.
370 */
371int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
372{
373 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000374 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
375 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000376
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530377 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000378 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
379 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
380 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
381 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
382 idx);
383 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
384 idx);
385 } else {
386 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
387 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
388 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
389 edc_bist_cmd_data_pattern =
390 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
391 edc_bist_status_rdata =
392 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
393 }
394
395 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000396 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000397 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
398 t4_write_reg(adap, edc_bist_cmd_len, 64);
399 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
400 t4_write_reg(adap, edc_bist_cmd,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000401 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000402 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000403 if (i)
404 return i;
405
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000406#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000407
408 for (i = 15; i >= 0; i--)
409 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
410 if (ecc)
411 *ecc = t4_read_reg64(adap, EDC_DATA(16));
412#undef EDC_DATA
413 return 0;
414}
415
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000416/**
417 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
418 * @adap: the adapter
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530419 * @win: PCI-E Memory Window to use
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000420 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
421 * @addr: address within indicated memory type
422 * @len: amount of memory to transfer
423 * @buf: host memory buffer
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530424 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000425 *
426 * Reads/writes an [almost] arbitrary memory region in the firmware: the
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530427 * firmware memory address and host buffer must be aligned on 32-bit
428 * boudaries; the length may be arbitrary. The memory is transferred as
429 * a raw byte sequence from/to the firmware's memory. If this memory
430 * contains data structures which contain multi-byte integers, it's the
431 * caller's responsibility to perform appropriate byte order conversions.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000432 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530433int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
434 u32 len, __be32 *buf, int dir)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000435{
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530436 u32 pos, offset, resid, memoffset;
437 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000438
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530439 /* Argument sanity checks ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000440 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530441 if (addr & 0x3)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000442 return -EINVAL;
443
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530444 /* It's convenient to be able to handle lengths which aren't a
445 * multiple of 32-bits because we often end up transferring files to
446 * the firmware. So we'll handle that by normalizing the length here
447 * and then handling any residual transfer at the end.
448 */
449 resid = len & 0x3;
450 len -= resid;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000451
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000452 /* Offset into the region of memory which is being accessed
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000453 * MEM_EDC0 = 0
454 * MEM_EDC1 = 1
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000455 * MEM_MC = 2 -- T4
456 * MEM_MC0 = 2 -- For T5
457 * MEM_MC1 = 3 -- For T5
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000458 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000459 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
460 if (mtype != MEM_MC1)
461 memoffset = (mtype * (edc_size * 1024 * 1024));
462 else {
463 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
464 MA_EXT_MEMORY_BAR));
465 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
466 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000467
468 /* Determine the PCIE_MEM_ACCESS_OFFSET */
469 addr = addr + memoffset;
470
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530471 /* Each PCI-E Memory Window is programmed with a window size -- or
472 * "aperture" -- which controls the granularity of its mapping onto
473 * adapter memory. We need to grab that aperture in order to know
474 * how to use the specified window. The window is also programmed
475 * with the base address of the Memory Window in BAR0's address
476 * space. For T4 this is an absolute PCI-E Bus Address. For T5
477 * the address is relative to BAR0.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000478 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530479 mem_reg = t4_read_reg(adap,
480 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
481 win));
482 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
483 mem_base = GET_PCIEOFST(mem_reg) << 10;
484 if (is_t4(adap->params.chip))
485 mem_base -= adap->t4_bar0;
486 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000487
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530488 /* Calculate our initial PCI-E Memory Window Position and Offset into
489 * that Window.
490 */
491 pos = addr & ~(mem_aperture-1);
492 offset = addr - pos;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000493
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530494 /* Set up initial PCI-E Memory Window to cover the start of our
495 * transfer. (Read it back to ensure that changes propagate before we
496 * attempt to use the new value.)
497 */
498 t4_write_reg(adap,
499 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
500 pos | win_pf);
501 t4_read_reg(adap,
502 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
503
504 /* Transfer data to/from the adapter as long as there's an integral
505 * number of 32-bit transfers to complete.
506 */
507 while (len > 0) {
508 if (dir == T4_MEMORY_READ)
509 *buf++ = (__force __be32) t4_read_reg(adap,
510 mem_base + offset);
511 else
512 t4_write_reg(adap, mem_base + offset,
513 (__force u32) *buf++);
514 offset += sizeof(__be32);
515 len -= sizeof(__be32);
516
517 /* If we've reached the end of our current window aperture,
518 * move the PCI-E Memory Window on to the next. Note that
519 * doing this here after "len" may be 0 allows us to set up
520 * the PCI-E Memory Window for a possible final residual
521 * transfer below ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000522 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530523 if (offset == mem_aperture) {
524 pos += mem_aperture;
525 offset = 0;
526 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
528 win), pos | win_pf);
529 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
531 win));
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000532 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000533 }
534
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530535 /* If the original transfer had a length which wasn't a multiple of
536 * 32-bits, now's where we need to finish off the transfer of the
537 * residual amount. The PCI-E Memory Window has already been moved
538 * above (if necessary) to cover this final transfer.
539 */
540 if (resid) {
541 union {
542 __be32 word;
543 char byte[4];
544 } last;
545 unsigned char *bp;
546 int i;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000547
Hariprasad Shenaic81576c2014-07-24 17:16:30 +0530548 if (dir == T4_MEMORY_READ) {
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530549 last.word = (__force __be32) t4_read_reg(adap,
550 mem_base + offset);
551 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
552 bp[i] = last.byte[i];
553 } else {
554 last.word = *buf;
555 for (i = resid; i < 4; i++)
556 last.byte[i] = 0;
557 t4_write_reg(adap, mem_base + offset,
558 (__force u32) last.word);
559 }
560 }
561
562 return 0;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000563}
564
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000565#define EEPROM_STAT_ADDR 0x7bfc
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000566#define VPD_BASE 0x400
567#define VPD_BASE_OLD 0
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000568#define VPD_LEN 1024
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530569#define CHELSIO_VPD_UNIQUE_ID 0x82
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000570
571/**
572 * t4_seeprom_wp - enable/disable EEPROM write protection
573 * @adapter: the adapter
574 * @enable: whether to enable or disable write protection
575 *
576 * Enables or disables write protection on the serial EEPROM.
577 */
578int t4_seeprom_wp(struct adapter *adapter, bool enable)
579{
580 unsigned int v = enable ? 0xc : 0;
581 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
582 return ret < 0 ? ret : 0;
583}
584
585/**
586 * get_vpd_params - read VPD parameters from VPD EEPROM
587 * @adapter: adapter to read
588 * @p: where to store the parameters
589 *
590 * Reads card parameters stored in VPD EEPROM.
591 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000592int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000593{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000594 u32 cclk_param, cclk_val;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000595 int i, ret, addr;
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530596 int ec, sn, pn;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000597 u8 *vpd, csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000598 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000599
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000600 vpd = vmalloc(VPD_LEN);
601 if (!vpd)
602 return -ENOMEM;
603
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000604 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
605 if (ret < 0)
606 goto out;
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530607
608 /* The VPD shall have a unique identifier specified by the PCI SIG.
609 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
610 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
611 * is expected to automatically put this entry at the
612 * beginning of the VPD.
613 */
614 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000615
616 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000617 if (ret < 0)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000618 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000619
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000620 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
621 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000622 ret = -EINVAL;
623 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000624 }
625
626 id_len = pci_vpd_lrdt_size(vpd);
627 if (id_len > ID_LEN)
628 id_len = ID_LEN;
629
630 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
631 if (i < 0) {
632 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000633 ret = -EINVAL;
634 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000635 }
636
637 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
638 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
639 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000640 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000641 ret = -EINVAL;
642 goto out;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000643 }
644
645#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000646 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000647 if (var < 0) { \
648 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000649 ret = -EINVAL; \
650 goto out; \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000651 } \
652 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
653} while (0)
654
655 FIND_VPD_KW(i, "RV");
656 for (csum = 0; i >= 0; i--)
657 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000658
659 if (csum) {
660 dev_err(adapter->pdev_dev,
661 "corrupted VPD EEPROM, actual csum %u\n", csum);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000662 ret = -EINVAL;
663 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000664 }
665
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000666 FIND_VPD_KW(ec, "EC");
667 FIND_VPD_KW(sn, "SN");
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530668 FIND_VPD_KW(pn, "PN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000669#undef FIND_VPD_KW
670
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000671 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000672 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000673 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000674 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000675 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
676 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000677 strim(p->sn);
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530678 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530679 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
680 strim(p->pn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000681
682 /*
683 * Ask firmware for the Core Clock since it knows how to translate the
684 * Reference Clock ('V2') VPD field into a Core Clock value ...
685 */
686 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
687 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
688 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
689 1, &cclk_param, &cclk_val);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000690
691out:
692 vfree(vpd);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000693 if (ret)
694 return ret;
695 p->cclk = cclk_val;
696
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000697 return 0;
698}
699
700/* serial flash and firmware constants */
701enum {
702 SF_ATTEMPTS = 10, /* max retries for SF operations */
703
704 /* flash command opcodes */
705 SF_PROG_PAGE = 2, /* program page */
706 SF_WR_DISABLE = 4, /* disable writes */
707 SF_RD_STATUS = 5, /* read status register */
708 SF_WR_ENABLE = 6, /* enable writes */
709 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000710 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000711 SF_ERASE_SECTOR = 0xd8, /* erase sector */
712
Steve Wise6f1d7212014-04-15 14:22:34 -0500713 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000714};
715
716/**
717 * sf1_read - read data from the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to read
720 * @cont: whether another operation will be chained
721 * @lock: whether to lock SF for PL access only
722 * @valp: where to store the read data
723 *
724 * Reads up to 4 bytes of data from the serial flash. The location of
725 * the read needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
727 */
728static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 int lock, u32 *valp)
730{
731 int ret;
732
733 if (!byte_cnt || byte_cnt > 4)
734 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530735 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000736 return -EBUSY;
737 cont = cont ? SF_CONT : 0;
738 lock = lock ? SF_LOCK : 0;
739 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530740 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000741 if (!ret)
742 *valp = t4_read_reg(adapter, SF_DATA);
743 return ret;
744}
745
746/**
747 * sf1_write - write data to the serial flash
748 * @adapter: the adapter
749 * @byte_cnt: number of bytes to write
750 * @cont: whether another operation will be chained
751 * @lock: whether to lock SF for PL access only
752 * @val: value to write
753 *
754 * Writes up to 4 bytes of data to the serial flash. The location of
755 * the write needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
757 */
758static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
759 int lock, u32 val)
760{
761 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000764 return -EBUSY;
765 cont = cont ? SF_CONT : 0;
766 lock = lock ? SF_LOCK : 0;
767 t4_write_reg(adapter, SF_DATA, val);
768 t4_write_reg(adapter, SF_OP, lock |
769 cont | BYTECNT(byte_cnt - 1) | OP_WR);
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530770 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000771}
772
773/**
774 * flash_wait_op - wait for a flash operation to complete
775 * @adapter: the adapter
776 * @attempts: max number of polls of the status register
777 * @delay: delay between polls in ms
778 *
779 * Wait for a flash operation to complete by polling the status register.
780 */
781static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
782{
783 int ret;
784 u32 status;
785
786 while (1) {
787 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
788 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
789 return ret;
790 if (!(status & 1))
791 return 0;
792 if (--attempts == 0)
793 return -EAGAIN;
794 if (delay)
795 msleep(delay);
796 }
797}
798
799/**
800 * t4_read_flash - read words from serial flash
801 * @adapter: the adapter
802 * @addr: the start address for the read
803 * @nwords: how many 32-bit words to read
804 * @data: where to store the read data
805 * @byte_oriented: whether to store data as bytes or as words
806 *
807 * Read the specified number of 32-bit words from the serial flash.
808 * If @byte_oriented is set the read data is stored as a byte array
809 * (i.e., big-endian), otherwise as 32-bit words in the platform's
810 * natural endianess.
811 */
Roland Dreierde498c82010-04-21 08:59:17 +0000812static int t4_read_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000814{
815 int ret;
816
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000817 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000818 return -EINVAL;
819
820 addr = swab32(addr) | SF_RD_DATA_FAST;
821
822 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
823 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
824 return ret;
825
826 for ( ; nwords; nwords--, data++) {
827 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
828 if (nwords == 1)
829 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
830 if (ret)
831 return ret;
832 if (byte_oriented)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000833 *data = (__force __u32) (htonl(*data));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000834 }
835 return 0;
836}
837
838/**
839 * t4_write_flash - write up to a page of data to the serial flash
840 * @adapter: the adapter
841 * @addr: the start address to write
842 * @n: length of data to write in bytes
843 * @data: the data to write
844 *
845 * Writes up to a page of data (256 bytes) to the serial flash starting
846 * at the given address. All the data must be written to the same page.
847 */
848static int t4_write_flash(struct adapter *adapter, unsigned int addr,
849 unsigned int n, const u8 *data)
850{
851 int ret;
852 u32 buf[64];
853 unsigned int i, c, left, val, offset = addr & 0xff;
854
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000855 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000856 return -EINVAL;
857
858 val = swab32(addr) | SF_PROG_PAGE;
859
860 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
861 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
862 goto unlock;
863
864 for (left = n; left; left -= c) {
865 c = min(left, 4U);
866 for (val = 0, i = 0; i < c; ++i)
867 val = (val << 8) + *data++;
868
869 ret = sf1_write(adapter, c, c != left, 1, val);
870 if (ret)
871 goto unlock;
872 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000873 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000874 if (ret)
875 goto unlock;
876
877 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
878
879 /* Read the page to verify the write succeeded */
880 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
881 if (ret)
882 return ret;
883
884 if (memcmp(data - n, (u8 *)buf + offset, n)) {
885 dev_err(adapter->pdev_dev,
886 "failed to correctly write the flash page at %#x\n",
887 addr);
888 return -EIO;
889 }
890 return 0;
891
892unlock:
893 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
894 return ret;
895}
896
897/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530898 * t4_get_fw_version - read the firmware version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000899 * @adapter: the adapter
900 * @vers: where to place the version
901 *
902 * Reads the FW version from flash.
903 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530904int t4_get_fw_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000905{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530906 return t4_read_flash(adapter, FLASH_FW_START +
907 offsetof(struct fw_hdr, fw_ver), 1,
908 vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000909}
910
911/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530912 * t4_get_tp_version - read the TP microcode version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000913 * @adapter: the adapter
914 * @vers: where to place the version
915 *
916 * Reads the TP microcode version from flash.
917 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530918int t4_get_tp_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000919{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530920 return t4_read_flash(adapter, FLASH_FW_START +
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000921 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000922 1, vers, 0);
923}
924
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530925/* Is the given firmware API compatible with the one the driver was compiled
926 * with?
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000927 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530928static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000929{
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000930
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530931 /* short circuit if it's the exact same firmware version */
932 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
933 return 1;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000934
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530935#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
936 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
937 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
938 return 1;
939#undef SAME_INTF
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000940
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530941 return 0;
942}
943
944/* The firmware in the filesystem is usable, but should it be installed?
945 * This routine explains itself in detail if it indicates the filesystem
946 * firmware should be installed.
947 */
948static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
949 int k, int c)
950{
951 const char *reason;
952
953 if (!card_fw_usable) {
954 reason = "incompatible or unusable";
955 goto install;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000956 }
957
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530958 if (k > c) {
959 reason = "older than the version supported with this driver";
960 goto install;
Jay Hernandeze69972f2013-05-30 03:24:14 +0000961 }
962
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530963 return 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000964
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530965install:
966 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
967 "installing firmware %u.%u.%u.%u on card.\n",
968 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
969 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
970 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
971 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000972
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000973 return 1;
974}
975
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530976int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
977 const u8 *fw_data, unsigned int fw_size,
978 struct fw_hdr *card_fw, enum dev_state state,
979 int *reset)
980{
981 int ret, card_fw_usable, fs_fw_usable;
982 const struct fw_hdr *fs_fw;
983 const struct fw_hdr *drv_fw;
984
985 drv_fw = &fw_info->fw_hdr;
986
987 /* Read the header of the firmware on the card */
988 ret = -t4_read_flash(adap, FLASH_FW_START,
989 sizeof(*card_fw) / sizeof(uint32_t),
990 (uint32_t *)card_fw, 1);
991 if (ret == 0) {
992 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
993 } else {
994 dev_err(adap->pdev_dev,
995 "Unable to read card's firmware header: %d\n", ret);
996 card_fw_usable = 0;
997 }
998
999 if (fw_data != NULL) {
1000 fs_fw = (const void *)fw_data;
1001 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1002 } else {
1003 fs_fw = NULL;
1004 fs_fw_usable = 0;
1005 }
1006
1007 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1008 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1009 /* Common case: the firmware on the card is an exact match and
1010 * the filesystem one is an exact match too, or the filesystem
1011 * one is absent/incompatible.
1012 */
1013 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1014 should_install_fs_fw(adap, card_fw_usable,
1015 be32_to_cpu(fs_fw->fw_ver),
1016 be32_to_cpu(card_fw->fw_ver))) {
1017 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1018 fw_size, 0);
1019 if (ret != 0) {
1020 dev_err(adap->pdev_dev,
1021 "failed to install firmware: %d\n", ret);
1022 goto bye;
1023 }
1024
1025 /* Installed successfully, update the cached header too. */
1026 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1027 card_fw_usable = 1;
1028 *reset = 0; /* already reset as part of load_fw */
1029 }
1030
1031 if (!card_fw_usable) {
1032 uint32_t d, c, k;
1033
1034 d = be32_to_cpu(drv_fw->fw_ver);
1035 c = be32_to_cpu(card_fw->fw_ver);
1036 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1037
1038 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1039 "chip state %d, "
1040 "driver compiled with %d.%d.%d.%d, "
1041 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1042 state,
1043 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1044 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1045 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1046 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1047 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1048 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1049 ret = EINVAL;
1050 goto bye;
1051 }
1052
1053 /* We're using whatever's on the card and it's known to be good. */
1054 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1055 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1056
1057bye:
1058 return ret;
1059}
1060
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001061/**
1062 * t4_flash_erase_sectors - erase a range of flash sectors
1063 * @adapter: the adapter
1064 * @start: the first sector to erase
1065 * @end: the last sector to erase
1066 *
1067 * Erases the sectors in the given inclusive range.
1068 */
1069static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1070{
1071 int ret = 0;
1072
1073 while (start <= end) {
1074 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1075 (ret = sf1_write(adapter, 4, 0, 1,
1076 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001077 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001078 dev_err(adapter->pdev_dev,
1079 "erase of flash sector %d failed, error %d\n",
1080 start, ret);
1081 break;
1082 }
1083 start++;
1084 }
1085 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1086 return ret;
1087}
1088
1089/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001090 * t4_flash_cfg_addr - return the address of the flash configuration file
1091 * @adapter: the adapter
1092 *
1093 * Return the address within the flash where the Firmware Configuration
1094 * File is stored.
1095 */
1096unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1097{
1098 if (adapter->params.sf_size == 0x100000)
1099 return FLASH_FPGA_CFG_START;
1100 else
1101 return FLASH_CFG_START;
1102}
1103
1104/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001105 * t4_load_fw - download firmware
1106 * @adap: the adapter
1107 * @fw_data: the firmware image to write
1108 * @size: image size
1109 *
1110 * Write the supplied firmware image to the card's serial flash.
1111 */
1112int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1113{
1114 u32 csum;
1115 int ret, addr;
1116 unsigned int i;
1117 u8 first_page[SF_PAGE_SIZE];
Vipul Pandya404d9e32012-10-08 02:59:43 +00001118 const __be32 *p = (const __be32 *)fw_data;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001119 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001120 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1121 unsigned int fw_img_start = adap->params.sf_fw_start;
1122 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001123
1124 if (!size) {
1125 dev_err(adap->pdev_dev, "FW image has no data\n");
1126 return -EINVAL;
1127 }
1128 if (size & 511) {
1129 dev_err(adap->pdev_dev,
1130 "FW image size not multiple of 512 bytes\n");
1131 return -EINVAL;
1132 }
1133 if (ntohs(hdr->len512) * 512 != size) {
1134 dev_err(adap->pdev_dev,
1135 "FW image size differs from size in FW header\n");
1136 return -EINVAL;
1137 }
1138 if (size > FW_MAX_SIZE) {
1139 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1140 FW_MAX_SIZE);
1141 return -EFBIG;
1142 }
1143
1144 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1145 csum += ntohl(p[i]);
1146
1147 if (csum != 0xffffffff) {
1148 dev_err(adap->pdev_dev,
1149 "corrupted firmware image, checksum %#x\n", csum);
1150 return -EINVAL;
1151 }
1152
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001153 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1154 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001155 if (ret)
1156 goto out;
1157
1158 /*
1159 * We write the correct version at the end so the driver can see a bad
1160 * version if the FW write fails. Start by writing a copy of the
1161 * first page with a bad version.
1162 */
1163 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1164 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001165 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001166 if (ret)
1167 goto out;
1168
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001169 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001170 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1171 addr += SF_PAGE_SIZE;
1172 fw_data += SF_PAGE_SIZE;
1173 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1174 if (ret)
1175 goto out;
1176 }
1177
1178 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001179 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001180 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1181out:
1182 if (ret)
1183 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1184 ret);
1185 return ret;
1186}
1187
1188#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05301189 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1190 FW_PORT_CAP_ANEG)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001191
1192/**
1193 * t4_link_start - apply link configuration to MAC/PHY
1194 * @phy: the PHY to setup
1195 * @mac: the MAC to setup
1196 * @lc: the requested link configuration
1197 *
1198 * Set up a port's MAC and PHY according to a desired link configuration.
1199 * - If the PHY can auto-negotiate first decide what to advertise, then
1200 * enable/disable auto-negotiation as desired, and reset.
1201 * - If the PHY does not auto-negotiate just reset it.
1202 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1203 * otherwise do it later based on the outcome of auto-negotiation.
1204 */
1205int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1206 struct link_config *lc)
1207{
1208 struct fw_port_cmd c;
1209 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1210
1211 lc->link_ok = 0;
1212 if (lc->requested_fc & PAUSE_RX)
1213 fc |= FW_PORT_CAP_FC_RX;
1214 if (lc->requested_fc & PAUSE_TX)
1215 fc |= FW_PORT_CAP_FC_TX;
1216
1217 memset(&c, 0, sizeof(c));
1218 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1219 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1220 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1221 FW_LEN16(c));
1222
1223 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1224 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1225 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1226 } else if (lc->autoneg == AUTONEG_DISABLE) {
1227 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1228 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1229 } else
1230 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1231
1232 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1233}
1234
1235/**
1236 * t4_restart_aneg - restart autonegotiation
1237 * @adap: the adapter
1238 * @mbox: mbox to use for the FW command
1239 * @port: the port id
1240 *
1241 * Restarts autonegotiation for the selected port.
1242 */
1243int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1244{
1245 struct fw_port_cmd c;
1246
1247 memset(&c, 0, sizeof(c));
1248 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1249 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1250 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1251 FW_LEN16(c));
1252 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1253 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1254}
1255
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301256typedef void (*int_handler_t)(struct adapter *adap);
1257
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001258struct intr_info {
1259 unsigned int mask; /* bits to check in interrupt status */
1260 const char *msg; /* message to print or NULL */
1261 short stat_idx; /* stat counter to increment or -1 */
1262 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301263 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001264};
1265
1266/**
1267 * t4_handle_intr_status - table driven interrupt handler
1268 * @adapter: the adapter that generated the interrupt
1269 * @reg: the interrupt status register to process
1270 * @acts: table of interrupt actions
1271 *
1272 * A table driven interrupt handler that applies a set of masks to an
1273 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001274 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001275 * optionally emitting a warning or alert message. The table is terminated
1276 * by an entry specifying mask 0. Returns the number of fatal interrupt
1277 * conditions.
1278 */
1279static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1280 const struct intr_info *acts)
1281{
1282 int fatal = 0;
1283 unsigned int mask = 0;
1284 unsigned int status = t4_read_reg(adapter, reg);
1285
1286 for ( ; acts->mask; ++acts) {
1287 if (!(status & acts->mask))
1288 continue;
1289 if (acts->fatal) {
1290 fatal++;
1291 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1292 status & acts->mask);
1293 } else if (acts->msg && printk_ratelimit())
1294 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1295 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301296 if (acts->int_handler)
1297 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001298 mask |= acts->mask;
1299 }
1300 status &= mask;
1301 if (status) /* clear processed interrupts */
1302 t4_write_reg(adapter, reg, status);
1303 return fatal;
1304}
1305
1306/*
1307 * Interrupt handler for the PCIE module.
1308 */
1309static void pcie_intr_handler(struct adapter *adapter)
1310{
Joe Perches005b5712010-12-14 21:36:53 +00001311 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001312 { RNPP, "RXNP array parity error", -1, 1 },
1313 { RPCP, "RXPC array parity error", -1, 1 },
1314 { RCIP, "RXCIF array parity error", -1, 1 },
1315 { RCCP, "Rx completions control array parity error", -1, 1 },
1316 { RFTP, "RXFT array parity error", -1, 1 },
1317 { 0 }
1318 };
Joe Perches005b5712010-12-14 21:36:53 +00001319 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001320 { TPCP, "TXPC array parity error", -1, 1 },
1321 { TNPP, "TXNP array parity error", -1, 1 },
1322 { TFTP, "TXFT array parity error", -1, 1 },
1323 { TCAP, "TXCA array parity error", -1, 1 },
1324 { TCIP, "TXCIF array parity error", -1, 1 },
1325 { RCAP, "RXCA array parity error", -1, 1 },
1326 { OTDD, "outbound request TLP discarded", -1, 1 },
1327 { RDPE, "Rx data parity error", -1, 1 },
1328 { TDUE, "Tx uncorrectable data error", -1, 1 },
1329 { 0 }
1330 };
Joe Perches005b5712010-12-14 21:36:53 +00001331 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001332 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1333 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1334 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1335 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1336 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1337 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1338 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1339 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1340 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1341 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1342 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1343 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1344 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1345 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1346 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1347 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1348 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1349 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1350 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1351 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1352 { FIDPERR, "PCI FID parity error", -1, 1 },
1353 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1354 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1355 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1356 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1357 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1358 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1359 { PCIESINT, "PCI core secondary fault", -1, 1 },
1360 { PCIEPINT, "PCI core primary fault", -1, 1 },
1361 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1362 { 0 }
1363 };
1364
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001365 static struct intr_info t5_pcie_intr_info[] = {
1366 { MSTGRPPERR, "Master Response Read Queue parity error",
1367 -1, 1 },
1368 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1369 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1370 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1371 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1372 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1373 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1374 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1375 -1, 1 },
1376 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1377 -1, 1 },
1378 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1379 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1380 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1381 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1382 { DREQWRPERR, "PCI DMA channel write request parity error",
1383 -1, 1 },
1384 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1385 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1386 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1387 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1388 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1389 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1390 { FIDPERR, "PCI FID parity error", -1, 1 },
1391 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1392 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1393 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1394 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1395 -1, 1 },
1396 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1397 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1398 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1399 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1400 { READRSPERR, "Outbound read error", -1, 0 },
1401 { 0 }
1402 };
1403
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001404 int fat;
1405
1406 fat = t4_handle_intr_status(adapter,
1407 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1408 sysbus_intr_info) +
1409 t4_handle_intr_status(adapter,
1410 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1411 pcie_port_intr_info) +
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001412 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301413 is_t4(adapter->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001414 pcie_intr_info : t5_pcie_intr_info);
1415
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001416 if (fat)
1417 t4_fatal_err(adapter);
1418}
1419
1420/*
1421 * TP interrupt handler.
1422 */
1423static void tp_intr_handler(struct adapter *adapter)
1424{
Joe Perches005b5712010-12-14 21:36:53 +00001425 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001426 { 0x3fffffff, "TP parity error", -1, 1 },
1427 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1428 { 0 }
1429 };
1430
1431 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1432 t4_fatal_err(adapter);
1433}
1434
1435/*
1436 * SGE interrupt handler.
1437 */
1438static void sge_intr_handler(struct adapter *adapter)
1439{
1440 u64 v;
1441
Joe Perches005b5712010-12-14 21:36:53 +00001442 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001443 { ERR_CPL_EXCEED_IQE_SIZE,
1444 "SGE received CPL exceeding IQE size", -1, 1 },
1445 { ERR_INVALID_CIDX_INC,
1446 "SGE GTS CIDX increment too large", -1, 0 },
1447 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001448 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1449 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1450 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001451 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1452 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1453 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1454 0 },
1455 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1456 0 },
1457 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1458 0 },
1459 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1460 0 },
1461 { ERR_ING_CTXT_PRIO,
1462 "SGE too many priority ingress contexts", -1, 0 },
1463 { ERR_EGR_CTXT_PRIO,
1464 "SGE too many priority egress contexts", -1, 0 },
1465 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1466 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1467 { 0 }
1468 };
1469
1470 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301471 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001472 if (v) {
1473 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301474 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001475 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1476 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1477 }
1478
1479 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1480 v != 0)
1481 t4_fatal_err(adapter);
1482}
1483
1484/*
1485 * CIM interrupt handler.
1486 */
1487static void cim_intr_handler(struct adapter *adapter)
1488{
Joe Perches005b5712010-12-14 21:36:53 +00001489 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001490 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1491 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1492 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1493 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1494 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1495 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1496 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1497 { 0 }
1498 };
Joe Perches005b5712010-12-14 21:36:53 +00001499 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001500 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1501 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1502 { ILLWRINT, "CIM illegal write", -1, 1 },
1503 { ILLRDINT, "CIM illegal read", -1, 1 },
1504 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1505 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1506 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1507 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1508 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1509 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1510 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1511 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1512 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1513 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1514 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1515 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1516 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1517 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1518 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1519 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1520 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1521 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1522 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1523 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1524 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1525 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1526 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1527 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1528 { 0 }
1529 };
1530
1531 int fat;
1532
1533 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1534 cim_intr_info) +
1535 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1536 cim_upintr_info);
1537 if (fat)
1538 t4_fatal_err(adapter);
1539}
1540
1541/*
1542 * ULP RX interrupt handler.
1543 */
1544static void ulprx_intr_handler(struct adapter *adapter)
1545{
Joe Perches005b5712010-12-14 21:36:53 +00001546 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001547 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001548 { 0x7fffff, "ULPRX parity error", -1, 1 },
1549 { 0 }
1550 };
1551
1552 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1553 t4_fatal_err(adapter);
1554}
1555
1556/*
1557 * ULP TX interrupt handler.
1558 */
1559static void ulptx_intr_handler(struct adapter *adapter)
1560{
Joe Perches005b5712010-12-14 21:36:53 +00001561 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001562 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1563 0 },
1564 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1565 0 },
1566 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1567 0 },
1568 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1569 0 },
1570 { 0xfffffff, "ULPTX parity error", -1, 1 },
1571 { 0 }
1572 };
1573
1574 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1575 t4_fatal_err(adapter);
1576}
1577
1578/*
1579 * PM TX interrupt handler.
1580 */
1581static void pmtx_intr_handler(struct adapter *adapter)
1582{
Joe Perches005b5712010-12-14 21:36:53 +00001583 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001584 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1585 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1586 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1587 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1588 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1589 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1590 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1591 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1592 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1593 { 0 }
1594 };
1595
1596 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1597 t4_fatal_err(adapter);
1598}
1599
1600/*
1601 * PM RX interrupt handler.
1602 */
1603static void pmrx_intr_handler(struct adapter *adapter)
1604{
Joe Perches005b5712010-12-14 21:36:53 +00001605 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001606 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1607 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1608 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1609 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1610 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1611 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1612 { 0 }
1613 };
1614
1615 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1616 t4_fatal_err(adapter);
1617}
1618
1619/*
1620 * CPL switch interrupt handler.
1621 */
1622static void cplsw_intr_handler(struct adapter *adapter)
1623{
Joe Perches005b5712010-12-14 21:36:53 +00001624 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001625 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1626 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1627 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1628 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1629 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1630 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1631 { 0 }
1632 };
1633
1634 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1635 t4_fatal_err(adapter);
1636}
1637
1638/*
1639 * LE interrupt handler.
1640 */
1641static void le_intr_handler(struct adapter *adap)
1642{
Joe Perches005b5712010-12-14 21:36:53 +00001643 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001644 { LIPMISS, "LE LIP miss", -1, 0 },
1645 { LIP0, "LE 0 LIP error", -1, 0 },
1646 { PARITYERR, "LE parity error", -1, 1 },
1647 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1648 { REQQPARERR, "LE request queue parity error", -1, 1 },
1649 { 0 }
1650 };
1651
1652 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1653 t4_fatal_err(adap);
1654}
1655
1656/*
1657 * MPS interrupt handler.
1658 */
1659static void mps_intr_handler(struct adapter *adapter)
1660{
Joe Perches005b5712010-12-14 21:36:53 +00001661 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001662 { 0xffffff, "MPS Rx parity error", -1, 1 },
1663 { 0 }
1664 };
Joe Perches005b5712010-12-14 21:36:53 +00001665 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001666 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1667 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1668 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1669 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1670 { BUBBLE, "MPS Tx underflow", -1, 1 },
1671 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1672 { FRMERR, "MPS Tx framing error", -1, 1 },
1673 { 0 }
1674 };
Joe Perches005b5712010-12-14 21:36:53 +00001675 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001676 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1677 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1678 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1679 { 0 }
1680 };
Joe Perches005b5712010-12-14 21:36:53 +00001681 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001682 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1683 { 0 }
1684 };
Joe Perches005b5712010-12-14 21:36:53 +00001685 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001686 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1687 { 0 }
1688 };
Joe Perches005b5712010-12-14 21:36:53 +00001689 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001690 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1691 { 0 }
1692 };
Joe Perches005b5712010-12-14 21:36:53 +00001693 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001694 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1695 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1696 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1697 { 0 }
1698 };
1699
1700 int fat;
1701
1702 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1703 mps_rx_intr_info) +
1704 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1705 mps_tx_intr_info) +
1706 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1707 mps_trc_intr_info) +
1708 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1709 mps_stat_sram_intr_info) +
1710 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1711 mps_stat_tx_intr_info) +
1712 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1713 mps_stat_rx_intr_info) +
1714 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1715 mps_cls_intr_info);
1716
1717 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1718 RXINT | TXINT | STATINT);
1719 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1720 if (fat)
1721 t4_fatal_err(adapter);
1722}
1723
1724#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1725
1726/*
1727 * EDC/MC interrupt handler.
1728 */
1729static void mem_intr_handler(struct adapter *adapter, int idx)
1730{
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301731 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001732
1733 unsigned int addr, cnt_addr, v;
1734
1735 if (idx <= MEM_EDC1) {
1736 addr = EDC_REG(EDC_INT_CAUSE, idx);
1737 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301738 } else if (idx == MEM_MC) {
1739 if (is_t4(adapter->params.chip)) {
1740 addr = MC_INT_CAUSE;
1741 cnt_addr = MC_ECC_STATUS;
1742 } else {
1743 addr = MC_P_INT_CAUSE;
1744 cnt_addr = MC_P_ECC_STATUS;
1745 }
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001746 } else {
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301747 addr = MC_REG(MC_P_INT_CAUSE, 1);
1748 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001749 }
1750
1751 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1752 if (v & PERR_INT_CAUSE)
1753 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1754 name[idx]);
1755 if (v & ECC_CE_INT_CAUSE) {
1756 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1757
1758 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1759 if (printk_ratelimit())
1760 dev_warn(adapter->pdev_dev,
1761 "%u %s correctable ECC data error%s\n",
1762 cnt, name[idx], cnt > 1 ? "s" : "");
1763 }
1764 if (v & ECC_UE_INT_CAUSE)
1765 dev_alert(adapter->pdev_dev,
1766 "%s uncorrectable ECC data error\n", name[idx]);
1767
1768 t4_write_reg(adapter, addr, v);
1769 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1770 t4_fatal_err(adapter);
1771}
1772
1773/*
1774 * MA interrupt handler.
1775 */
1776static void ma_intr_handler(struct adapter *adap)
1777{
1778 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1779
1780 if (status & MEM_PERR_INT_CAUSE)
1781 dev_alert(adap->pdev_dev,
1782 "MA parity error, parity status %#x\n",
1783 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1784 if (status & MEM_WRAP_INT_CAUSE) {
1785 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1786 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1787 "client %u to address %#x\n",
1788 MEM_WRAP_CLIENT_NUM_GET(v),
1789 MEM_WRAP_ADDRESS_GET(v) << 4);
1790 }
1791 t4_write_reg(adap, MA_INT_CAUSE, status);
1792 t4_fatal_err(adap);
1793}
1794
1795/*
1796 * SMB interrupt handler.
1797 */
1798static void smb_intr_handler(struct adapter *adap)
1799{
Joe Perches005b5712010-12-14 21:36:53 +00001800 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001801 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1802 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1803 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1804 { 0 }
1805 };
1806
1807 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1808 t4_fatal_err(adap);
1809}
1810
1811/*
1812 * NC-SI interrupt handler.
1813 */
1814static void ncsi_intr_handler(struct adapter *adap)
1815{
Joe Perches005b5712010-12-14 21:36:53 +00001816 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001817 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1818 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1819 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1820 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1821 { 0 }
1822 };
1823
1824 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1825 t4_fatal_err(adap);
1826}
1827
1828/*
1829 * XGMAC interrupt handler.
1830 */
1831static void xgmac_intr_handler(struct adapter *adap, int port)
1832{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001833 u32 v, int_cause_reg;
1834
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301835 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001836 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1837 else
1838 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1839
1840 v = t4_read_reg(adap, int_cause_reg);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001841
1842 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1843 if (!v)
1844 return;
1845
1846 if (v & TXFIFO_PRTY_ERR)
1847 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1848 port);
1849 if (v & RXFIFO_PRTY_ERR)
1850 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1851 port);
1852 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1853 t4_fatal_err(adap);
1854}
1855
1856/*
1857 * PL interrupt handler.
1858 */
1859static void pl_intr_handler(struct adapter *adap)
1860{
Joe Perches005b5712010-12-14 21:36:53 +00001861 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001862 { FATALPERR, "T4 fatal parity error", -1, 1 },
1863 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1864 { 0 }
1865 };
1866
1867 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1868 t4_fatal_err(adap);
1869}
1870
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001871#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001872#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1873 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1874 CPL_SWITCH | SGE | ULP_TX)
1875
1876/**
1877 * t4_slow_intr_handler - control path interrupt handler
1878 * @adapter: the adapter
1879 *
1880 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1881 * The designation 'slow' is because it involves register reads, while
1882 * data interrupts typically don't involve any MMIOs.
1883 */
1884int t4_slow_intr_handler(struct adapter *adapter)
1885{
1886 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1887
1888 if (!(cause & GLBL_INTR_MASK))
1889 return 0;
1890 if (cause & CIM)
1891 cim_intr_handler(adapter);
1892 if (cause & MPS)
1893 mps_intr_handler(adapter);
1894 if (cause & NCSI)
1895 ncsi_intr_handler(adapter);
1896 if (cause & PL)
1897 pl_intr_handler(adapter);
1898 if (cause & SMB)
1899 smb_intr_handler(adapter);
1900 if (cause & XGMAC0)
1901 xgmac_intr_handler(adapter, 0);
1902 if (cause & XGMAC1)
1903 xgmac_intr_handler(adapter, 1);
1904 if (cause & XGMAC_KR0)
1905 xgmac_intr_handler(adapter, 2);
1906 if (cause & XGMAC_KR1)
1907 xgmac_intr_handler(adapter, 3);
1908 if (cause & PCIE)
1909 pcie_intr_handler(adapter);
1910 if (cause & MC)
1911 mem_intr_handler(adapter, MEM_MC);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301912 if (!is_t4(adapter->params.chip) && (cause & MC1))
1913 mem_intr_handler(adapter, MEM_MC1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001914 if (cause & EDC0)
1915 mem_intr_handler(adapter, MEM_EDC0);
1916 if (cause & EDC1)
1917 mem_intr_handler(adapter, MEM_EDC1);
1918 if (cause & LE)
1919 le_intr_handler(adapter);
1920 if (cause & TP)
1921 tp_intr_handler(adapter);
1922 if (cause & MA)
1923 ma_intr_handler(adapter);
1924 if (cause & PM_TX)
1925 pmtx_intr_handler(adapter);
1926 if (cause & PM_RX)
1927 pmrx_intr_handler(adapter);
1928 if (cause & ULP_RX)
1929 ulprx_intr_handler(adapter);
1930 if (cause & CPL_SWITCH)
1931 cplsw_intr_handler(adapter);
1932 if (cause & SGE)
1933 sge_intr_handler(adapter);
1934 if (cause & ULP_TX)
1935 ulptx_intr_handler(adapter);
1936
1937 /* Clear the interrupts just processed for which we are the master. */
1938 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1939 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1940 return 1;
1941}
1942
1943/**
1944 * t4_intr_enable - enable interrupts
1945 * @adapter: the adapter whose interrupts should be enabled
1946 *
1947 * Enable PF-specific interrupts for the calling function and the top-level
1948 * interrupt concentrator for global interrupts. Interrupts are already
1949 * enabled at each module, here we just enable the roots of the interrupt
1950 * hierarchies.
1951 *
1952 * Note: this function should be called only when the driver manages
1953 * non PF-specific interrupts from the various HW modules. Only one PCI
1954 * function at a time should be doing this.
1955 */
1956void t4_intr_enable(struct adapter *adapter)
1957{
1958 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1959
1960 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1961 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1962 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1963 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1964 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1965 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1966 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001967 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001968 EGRESS_SIZE_ERR);
1969 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1970 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1971}
1972
1973/**
1974 * t4_intr_disable - disable interrupts
1975 * @adapter: the adapter whose interrupts should be disabled
1976 *
1977 * Disable interrupts. We only disable the top-level interrupt
1978 * concentrators. The caller must be a PCI function managing global
1979 * interrupts.
1980 */
1981void t4_intr_disable(struct adapter *adapter)
1982{
1983 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1984
1985 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1986 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1987}
1988
1989/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001990 * hash_mac_addr - return the hash value of a MAC address
1991 * @addr: the 48-bit Ethernet MAC address
1992 *
1993 * Hashes a MAC address according to the hash function used by HW inexact
1994 * (hash) address matching.
1995 */
1996static int hash_mac_addr(const u8 *addr)
1997{
1998 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1999 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2000 a ^= b;
2001 a ^= (a >> 12);
2002 a ^= (a >> 6);
2003 return a & 0x3f;
2004}
2005
2006/**
2007 * t4_config_rss_range - configure a portion of the RSS mapping table
2008 * @adapter: the adapter
2009 * @mbox: mbox to use for the FW command
2010 * @viid: virtual interface whose RSS subtable is to be written
2011 * @start: start entry in the table to write
2012 * @n: how many table entries to write
2013 * @rspq: values for the response queue lookup table
2014 * @nrspq: number of values in @rspq
2015 *
2016 * Programs the selected part of the VI's RSS mapping table with the
2017 * provided values. If @nrspq < @n the supplied values are used repeatedly
2018 * until the full table range is populated.
2019 *
2020 * The caller must ensure the values in @rspq are in the range allowed for
2021 * @viid.
2022 */
2023int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2024 int start, int n, const u16 *rspq, unsigned int nrspq)
2025{
2026 int ret;
2027 const u16 *rsp = rspq;
2028 const u16 *rsp_end = rspq + nrspq;
2029 struct fw_rss_ind_tbl_cmd cmd;
2030
2031 memset(&cmd, 0, sizeof(cmd));
2032 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2033 FW_CMD_REQUEST | FW_CMD_WRITE |
2034 FW_RSS_IND_TBL_CMD_VIID(viid));
2035 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2036
2037 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2038 while (n > 0) {
2039 int nq = min(n, 32);
2040 __be32 *qp = &cmd.iq0_to_iq2;
2041
2042 cmd.niqid = htons(nq);
2043 cmd.startidx = htons(start);
2044
2045 start += nq;
2046 n -= nq;
2047
2048 while (nq > 0) {
2049 unsigned int v;
2050
2051 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2052 if (++rsp >= rsp_end)
2053 rsp = rspq;
2054 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2055 if (++rsp >= rsp_end)
2056 rsp = rspq;
2057 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2058 if (++rsp >= rsp_end)
2059 rsp = rspq;
2060
2061 *qp++ = htonl(v);
2062 nq -= 3;
2063 }
2064
2065 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2066 if (ret)
2067 return ret;
2068 }
2069 return 0;
2070}
2071
2072/**
2073 * t4_config_glbl_rss - configure the global RSS mode
2074 * @adapter: the adapter
2075 * @mbox: mbox to use for the FW command
2076 * @mode: global RSS mode
2077 * @flags: mode-specific flags
2078 *
2079 * Sets the global RSS mode.
2080 */
2081int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2082 unsigned int flags)
2083{
2084 struct fw_rss_glb_config_cmd c;
2085
2086 memset(&c, 0, sizeof(c));
2087 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2088 FW_CMD_REQUEST | FW_CMD_WRITE);
2089 c.retval_len16 = htonl(FW_LEN16(c));
2090 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2091 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2092 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2093 c.u.basicvirtual.mode_pkd =
2094 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2095 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2096 } else
2097 return -EINVAL;
2098 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2099}
2100
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002101/**
2102 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2103 * @adap: the adapter
2104 * @v4: holds the TCP/IP counter values
2105 * @v6: holds the TCP/IPv6 counter values
2106 *
2107 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2108 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2109 */
2110void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2111 struct tp_tcp_stats *v6)
2112{
2113 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2114
2115#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2116#define STAT(x) val[STAT_IDX(x)]
2117#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2118
2119 if (v4) {
2120 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2121 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2122 v4->tcpOutRsts = STAT(OUT_RST);
2123 v4->tcpInSegs = STAT64(IN_SEG);
2124 v4->tcpOutSegs = STAT64(OUT_SEG);
2125 v4->tcpRetransSegs = STAT64(RXT_SEG);
2126 }
2127 if (v6) {
2128 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2129 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2130 v6->tcpOutRsts = STAT(OUT_RST);
2131 v6->tcpInSegs = STAT64(IN_SEG);
2132 v6->tcpOutSegs = STAT64(OUT_SEG);
2133 v6->tcpRetransSegs = STAT64(RXT_SEG);
2134 }
2135#undef STAT64
2136#undef STAT
2137#undef STAT_IDX
2138}
2139
2140/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002141 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2142 * @adap: the adapter
2143 * @mtus: where to store the MTU values
2144 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2145 *
2146 * Reads the HW path MTU table.
2147 */
2148void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2149{
2150 u32 v;
2151 int i;
2152
2153 for (i = 0; i < NMTUS; ++i) {
2154 t4_write_reg(adap, TP_MTU_TABLE,
2155 MTUINDEX(0xff) | MTUVALUE(i));
2156 v = t4_read_reg(adap, TP_MTU_TABLE);
2157 mtus[i] = MTUVALUE_GET(v);
2158 if (mtu_log)
2159 mtu_log[i] = MTUWIDTH_GET(v);
2160 }
2161}
2162
2163/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002164 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2165 * @adap: the adapter
2166 * @addr: the indirect TP register address
2167 * @mask: specifies the field within the register to modify
2168 * @val: new value for the field
2169 *
2170 * Sets a field of an indirect TP register to the given value.
2171 */
2172void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2173 unsigned int mask, unsigned int val)
2174{
2175 t4_write_reg(adap, TP_PIO_ADDR, addr);
2176 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2177 t4_write_reg(adap, TP_PIO_DATA, val);
2178}
2179
2180/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002181 * init_cong_ctrl - initialize congestion control parameters
2182 * @a: the alpha values for congestion control
2183 * @b: the beta values for congestion control
2184 *
2185 * Initialize the congestion control parameters.
2186 */
Bill Pemberton91744942012-12-03 09:23:02 -05002187static void init_cong_ctrl(unsigned short *a, unsigned short *b)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002188{
2189 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2190 a[9] = 2;
2191 a[10] = 3;
2192 a[11] = 4;
2193 a[12] = 5;
2194 a[13] = 6;
2195 a[14] = 7;
2196 a[15] = 8;
2197 a[16] = 9;
2198 a[17] = 10;
2199 a[18] = 14;
2200 a[19] = 17;
2201 a[20] = 21;
2202 a[21] = 25;
2203 a[22] = 30;
2204 a[23] = 35;
2205 a[24] = 45;
2206 a[25] = 60;
2207 a[26] = 80;
2208 a[27] = 100;
2209 a[28] = 200;
2210 a[29] = 300;
2211 a[30] = 400;
2212 a[31] = 500;
2213
2214 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2215 b[9] = b[10] = 1;
2216 b[11] = b[12] = 2;
2217 b[13] = b[14] = b[15] = b[16] = 3;
2218 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2219 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2220 b[28] = b[29] = 6;
2221 b[30] = b[31] = 7;
2222}
2223
2224/* The minimum additive increment value for the congestion control table */
2225#define CC_MIN_INCR 2U
2226
2227/**
2228 * t4_load_mtus - write the MTU and congestion control HW tables
2229 * @adap: the adapter
2230 * @mtus: the values for the MTU table
2231 * @alpha: the values for the congestion control alpha parameter
2232 * @beta: the values for the congestion control beta parameter
2233 *
2234 * Write the HW MTU table with the supplied MTUs and the high-speed
2235 * congestion control table with the supplied alpha, beta, and MTUs.
2236 * We write the two tables together because the additive increments
2237 * depend on the MTUs.
2238 */
2239void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2240 const unsigned short *alpha, const unsigned short *beta)
2241{
2242 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2243 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2244 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2245 28672, 40960, 57344, 81920, 114688, 163840, 229376
2246 };
2247
2248 unsigned int i, w;
2249
2250 for (i = 0; i < NMTUS; ++i) {
2251 unsigned int mtu = mtus[i];
2252 unsigned int log2 = fls(mtu);
2253
2254 if (!(mtu & ((1 << log2) >> 2))) /* round */
2255 log2--;
2256 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2257 MTUWIDTH(log2) | MTUVALUE(mtu));
2258
2259 for (w = 0; w < NCCTRL_WIN; ++w) {
2260 unsigned int inc;
2261
2262 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2263 CC_MIN_INCR);
2264
2265 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2266 (w << 16) | (beta[w] << 13) | inc);
2267 }
2268 }
2269}
2270
2271/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002272 * get_mps_bg_map - return the buffer groups associated with a port
2273 * @adap: the adapter
2274 * @idx: the port index
2275 *
2276 * Returns a bitmap indicating which MPS buffer groups are associated
2277 * with the given port. Bit i is set if buffer group i is used by the
2278 * port.
2279 */
2280static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2281{
2282 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2283
2284 if (n == 0)
2285 return idx == 0 ? 0xf : 0;
2286 if (n == 1)
2287 return idx < 2 ? (3 << (2 * idx)) : 0;
2288 return 1 << idx;
2289}
2290
2291/**
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302292 * t4_get_port_type_description - return Port Type string description
2293 * @port_type: firmware Port Type enumeration
2294 */
2295const char *t4_get_port_type_description(enum fw_port_type port_type)
2296{
2297 static const char *const port_type_description[] = {
2298 "R XFI",
2299 "R XAUI",
2300 "T SGMII",
2301 "T XFI",
2302 "T XAUI",
2303 "KX4",
2304 "CX4",
2305 "KX",
2306 "KR",
2307 "R SFP+",
2308 "KR/KX",
2309 "KR/KX/KX4",
2310 "R QSFP_10G",
2311 "",
2312 "R QSFP",
2313 "R BP40_BA",
2314 };
2315
2316 if (port_type < ARRAY_SIZE(port_type_description))
2317 return port_type_description[port_type];
2318 return "UNKNOWN";
2319}
2320
2321/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002322 * t4_get_port_stats - collect port statistics
2323 * @adap: the adapter
2324 * @idx: the port index
2325 * @p: the stats structure to fill
2326 *
2327 * Collect statistics related to the given port from HW.
2328 */
2329void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2330{
2331 u32 bgmap = get_mps_bg_map(adap, idx);
2332
2333#define GET_STAT(name) \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002334 t4_read_reg64(adap, \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302335 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002336 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002337#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2338
2339 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2340 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2341 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2342 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2343 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2344 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2345 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2346 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2347 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2348 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2349 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2350 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2351 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2352 p->tx_drop = GET_STAT(TX_PORT_DROP);
2353 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2354 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2355 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2356 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2357 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2358 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2359 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2360 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2361 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2362
2363 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2364 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2365 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2366 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2367 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2368 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2369 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2370 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2371 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2372 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2373 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2374 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2375 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2376 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2377 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2378 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2379 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2380 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2381 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2382 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2383 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2384 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2385 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2386 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2387 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2388 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2389 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2390
2391 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2392 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2393 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2394 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2395 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2396 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2397 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2398 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2399
2400#undef GET_STAT
2401#undef GET_STAT_COM
2402}
2403
2404/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002405 * t4_wol_magic_enable - enable/disable magic packet WoL
2406 * @adap: the adapter
2407 * @port: the physical port index
2408 * @addr: MAC address expected in magic packets, %NULL to disable
2409 *
2410 * Enables/disables magic packet wake-on-LAN for the selected port.
2411 */
2412void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2413 const u8 *addr)
2414{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002415 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2416
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302417 if (is_t4(adap->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002418 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2419 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2420 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2421 } else {
2422 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2423 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2424 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2425 }
2426
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002427 if (addr) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002428 t4_write_reg(adap, mag_id_reg_l,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002429 (addr[2] << 24) | (addr[3] << 16) |
2430 (addr[4] << 8) | addr[5]);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002431 t4_write_reg(adap, mag_id_reg_h,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002432 (addr[0] << 8) | addr[1]);
2433 }
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002434 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002435 addr ? MAGICEN : 0);
2436}
2437
2438/**
2439 * t4_wol_pat_enable - enable/disable pattern-based WoL
2440 * @adap: the adapter
2441 * @port: the physical port index
2442 * @map: bitmap of which HW pattern filters to set
2443 * @mask0: byte mask for bytes 0-63 of a packet
2444 * @mask1: byte mask for bytes 64-127 of a packet
2445 * @crc: Ethernet CRC for selected bytes
2446 * @enable: enable/disable switch
2447 *
2448 * Sets the pattern filters indicated in @map to mask out the bytes
2449 * specified in @mask0/@mask1 in received packets and compare the CRC of
2450 * the resulting packet against @crc. If @enable is %true pattern-based
2451 * WoL is enabled, otherwise disabled.
2452 */
2453int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2454 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2455{
2456 int i;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002457 u32 port_cfg_reg;
2458
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302459 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002460 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2461 else
2462 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002463
2464 if (!enable) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002465 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002466 return 0;
2467 }
2468 if (map > 0xff)
2469 return -EINVAL;
2470
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002471#define EPIO_REG(name) \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302472 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002473 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002474
2475 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2476 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2477 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2478
2479 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2480 if (!(map & 1))
2481 continue;
2482
2483 /* write byte masks */
2484 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2485 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2486 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302487 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002488 return -ETIMEDOUT;
2489
2490 /* write CRC */
2491 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2492 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2493 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302494 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002495 return -ETIMEDOUT;
2496 }
2497#undef EPIO_REG
2498
2499 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2500 return 0;
2501}
2502
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002503/* t4_mk_filtdelwr - create a delete filter WR
2504 * @ftid: the filter ID
2505 * @wr: the filter work request to populate
2506 * @qid: ingress queue to receive the delete notification
2507 *
2508 * Creates a filter work request to delete the supplied filter. If @qid is
2509 * negative the delete notification is suppressed.
2510 */
2511void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2512{
2513 memset(wr, 0, sizeof(*wr));
2514 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2515 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2516 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2517 V_FW_FILTER_WR_NOREPLY(qid < 0));
2518 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2519 if (qid >= 0)
2520 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2521}
2522
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002523#define INIT_CMD(var, cmd, rd_wr) do { \
2524 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2525 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2526 (var).retval_len16 = htonl(FW_LEN16(var)); \
2527} while (0)
2528
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302529int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2530 u32 addr, u32 val)
2531{
2532 struct fw_ldst_cmd c;
2533
2534 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002535 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2536 FW_CMD_WRITE |
2537 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302538 c.cycles_to_len16 = htonl(FW_LEN16(c));
2539 c.u.addrval.addr = htonl(addr);
2540 c.u.addrval.val = htonl(val);
2541
2542 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2543}
2544
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002545/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002546 * t4_mdio_rd - read a PHY register through MDIO
2547 * @adap: the adapter
2548 * @mbox: mailbox to use for the FW command
2549 * @phy_addr: the PHY address
2550 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2551 * @reg: the register to read
2552 * @valp: where to store the value
2553 *
2554 * Issues a FW command through the given mailbox to read a PHY register.
2555 */
2556int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2557 unsigned int mmd, unsigned int reg, u16 *valp)
2558{
2559 int ret;
2560 struct fw_ldst_cmd c;
2561
2562 memset(&c, 0, sizeof(c));
2563 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2564 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2565 c.cycles_to_len16 = htonl(FW_LEN16(c));
2566 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2567 FW_LDST_CMD_MMD(mmd));
2568 c.u.mdio.raddr = htons(reg);
2569
2570 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2571 if (ret == 0)
2572 *valp = ntohs(c.u.mdio.rval);
2573 return ret;
2574}
2575
2576/**
2577 * t4_mdio_wr - write a PHY register through MDIO
2578 * @adap: the adapter
2579 * @mbox: mailbox to use for the FW command
2580 * @phy_addr: the PHY address
2581 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2582 * @reg: the register to write
2583 * @valp: value to write
2584 *
2585 * Issues a FW command through the given mailbox to write a PHY register.
2586 */
2587int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2588 unsigned int mmd, unsigned int reg, u16 val)
2589{
2590 struct fw_ldst_cmd c;
2591
2592 memset(&c, 0, sizeof(c));
2593 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2594 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2595 c.cycles_to_len16 = htonl(FW_LEN16(c));
2596 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2597 FW_LDST_CMD_MMD(mmd));
2598 c.u.mdio.raddr = htons(reg);
2599 c.u.mdio.rval = htons(val);
2600
2601 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2602}
2603
2604/**
Kumar Sanghvi68bce1922014-03-13 20:50:47 +05302605 * t4_sge_decode_idma_state - decode the idma state
2606 * @adap: the adapter
2607 * @state: the state idma is stuck in
2608 */
2609void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2610{
2611 static const char * const t4_decode[] = {
2612 "IDMA_IDLE",
2613 "IDMA_PUSH_MORE_CPL_FIFO",
2614 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2615 "Not used",
2616 "IDMA_PHYSADDR_SEND_PCIEHDR",
2617 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2618 "IDMA_PHYSADDR_SEND_PAYLOAD",
2619 "IDMA_SEND_FIFO_TO_IMSG",
2620 "IDMA_FL_REQ_DATA_FL_PREP",
2621 "IDMA_FL_REQ_DATA_FL",
2622 "IDMA_FL_DROP",
2623 "IDMA_FL_H_REQ_HEADER_FL",
2624 "IDMA_FL_H_SEND_PCIEHDR",
2625 "IDMA_FL_H_PUSH_CPL_FIFO",
2626 "IDMA_FL_H_SEND_CPL",
2627 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2628 "IDMA_FL_H_SEND_IP_HDR",
2629 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2630 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2631 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2632 "IDMA_FL_D_SEND_PCIEHDR",
2633 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2634 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2635 "IDMA_FL_SEND_PCIEHDR",
2636 "IDMA_FL_PUSH_CPL_FIFO",
2637 "IDMA_FL_SEND_CPL",
2638 "IDMA_FL_SEND_PAYLOAD_FIRST",
2639 "IDMA_FL_SEND_PAYLOAD",
2640 "IDMA_FL_REQ_NEXT_DATA_FL",
2641 "IDMA_FL_SEND_NEXT_PCIEHDR",
2642 "IDMA_FL_SEND_PADDING",
2643 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2644 "IDMA_FL_SEND_FIFO_TO_IMSG",
2645 "IDMA_FL_REQ_DATAFL_DONE",
2646 "IDMA_FL_REQ_HEADERFL_DONE",
2647 };
2648 static const char * const t5_decode[] = {
2649 "IDMA_IDLE",
2650 "IDMA_ALMOST_IDLE",
2651 "IDMA_PUSH_MORE_CPL_FIFO",
2652 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2653 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2654 "IDMA_PHYSADDR_SEND_PCIEHDR",
2655 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2656 "IDMA_PHYSADDR_SEND_PAYLOAD",
2657 "IDMA_SEND_FIFO_TO_IMSG",
2658 "IDMA_FL_REQ_DATA_FL",
2659 "IDMA_FL_DROP",
2660 "IDMA_FL_DROP_SEND_INC",
2661 "IDMA_FL_H_REQ_HEADER_FL",
2662 "IDMA_FL_H_SEND_PCIEHDR",
2663 "IDMA_FL_H_PUSH_CPL_FIFO",
2664 "IDMA_FL_H_SEND_CPL",
2665 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2666 "IDMA_FL_H_SEND_IP_HDR",
2667 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2668 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2669 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2670 "IDMA_FL_D_SEND_PCIEHDR",
2671 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2672 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2673 "IDMA_FL_SEND_PCIEHDR",
2674 "IDMA_FL_PUSH_CPL_FIFO",
2675 "IDMA_FL_SEND_CPL",
2676 "IDMA_FL_SEND_PAYLOAD_FIRST",
2677 "IDMA_FL_SEND_PAYLOAD",
2678 "IDMA_FL_REQ_NEXT_DATA_FL",
2679 "IDMA_FL_SEND_NEXT_PCIEHDR",
2680 "IDMA_FL_SEND_PADDING",
2681 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2682 };
2683 static const u32 sge_regs[] = {
2684 SGE_DEBUG_DATA_LOW_INDEX_2,
2685 SGE_DEBUG_DATA_LOW_INDEX_3,
2686 SGE_DEBUG_DATA_HIGH_INDEX_10,
2687 };
2688 const char **sge_idma_decode;
2689 int sge_idma_decode_nstates;
2690 int i;
2691
2692 if (is_t4(adapter->params.chip)) {
2693 sge_idma_decode = (const char **)t4_decode;
2694 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2695 } else {
2696 sge_idma_decode = (const char **)t5_decode;
2697 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2698 }
2699
2700 if (state < sge_idma_decode_nstates)
2701 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2702 else
2703 CH_WARN(adapter, "idma state %d unknown\n", state);
2704
2705 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2706 CH_WARN(adapter, "SGE register %#x value %#x\n",
2707 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2708}
2709
2710/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002711 * t4_fw_hello - establish communication with FW
2712 * @adap: the adapter
2713 * @mbox: mailbox to use for the FW command
2714 * @evt_mbox: mailbox to receive async FW events
2715 * @master: specifies the caller's willingness to be the device master
2716 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002717 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002718 * Issues a command to establish communication with FW. Returns either
2719 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002720 */
2721int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2722 enum dev_master master, enum dev_state *state)
2723{
2724 int ret;
2725 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002726 u32 v;
2727 unsigned int master_mbox;
2728 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002729
Vipul Pandya636f9d32012-09-26 02:39:39 +00002730retry:
2731 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002732 INIT_CMD(c, HELLO, WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302733 c.err_to_clearinit = htonl(
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002734 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2735 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002736 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2737 FW_HELLO_CMD_MBMASTER_MASK) |
2738 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2739 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2740 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002741
Vipul Pandya636f9d32012-09-26 02:39:39 +00002742 /*
2743 * Issue the HELLO command to the firmware. If it's not successful
2744 * but indicates that we got a "busy" or "timeout" condition, retry
2745 * the HELLO until we exhaust our retry limit.
2746 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002747 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002748 if (ret < 0) {
2749 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2750 goto retry;
2751 return ret;
2752 }
2753
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302754 v = ntohl(c.err_to_clearinit);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002755 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2756 if (state) {
2757 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002758 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002759 else if (v & FW_HELLO_CMD_INIT)
2760 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002761 else
2762 *state = DEV_STATE_UNINIT;
2763 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002764
2765 /*
2766 * If we're not the Master PF then we need to wait around for the
2767 * Master PF Driver to finish setting up the adapter.
2768 *
2769 * Note that we also do this wait if we're a non-Master-capable PF and
2770 * there is no current Master PF; a Master PF may show up momentarily
2771 * and we wouldn't want to fail pointlessly. (This can happen when an
2772 * OS loads lots of different drivers rapidly at the same time). In
2773 * this case, the Master PF returned by the firmware will be
2774 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2775 */
2776 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2777 master_mbox != mbox) {
2778 int waiting = FW_CMD_HELLO_TIMEOUT;
2779
2780 /*
2781 * Wait for the firmware to either indicate an error or
2782 * initialized state. If we see either of these we bail out
2783 * and report the issue to the caller. If we exhaust the
2784 * "hello timeout" and we haven't exhausted our retries, try
2785 * again. Otherwise bail with a timeout error.
2786 */
2787 for (;;) {
2788 u32 pcie_fw;
2789
2790 msleep(50);
2791 waiting -= 50;
2792
2793 /*
2794 * If neither Error nor Initialialized are indicated
2795 * by the firmware keep waiting till we exaust our
2796 * timeout ... and then retry if we haven't exhausted
2797 * our retries ...
2798 */
2799 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2800 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2801 if (waiting <= 0) {
2802 if (retries-- > 0)
2803 goto retry;
2804
2805 return -ETIMEDOUT;
2806 }
2807 continue;
2808 }
2809
2810 /*
2811 * We either have an Error or Initialized condition
2812 * report errors preferentially.
2813 */
2814 if (state) {
2815 if (pcie_fw & FW_PCIE_FW_ERR)
2816 *state = DEV_STATE_ERR;
2817 else if (pcie_fw & FW_PCIE_FW_INIT)
2818 *state = DEV_STATE_INIT;
2819 }
2820
2821 /*
2822 * If we arrived before a Master PF was selected and
2823 * there's not a valid Master PF, grab its identity
2824 * for our caller.
2825 */
2826 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2827 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2828 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2829 break;
2830 }
2831 }
2832
2833 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002834}
2835
2836/**
2837 * t4_fw_bye - end communication with FW
2838 * @adap: the adapter
2839 * @mbox: mailbox to use for the FW command
2840 *
2841 * Issues a command to terminate communication with FW.
2842 */
2843int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2844{
2845 struct fw_bye_cmd c;
2846
Vipul Pandya0062b152012-11-06 03:37:09 +00002847 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002848 INIT_CMD(c, BYE, WRITE);
2849 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2850}
2851
2852/**
2853 * t4_init_cmd - ask FW to initialize the device
2854 * @adap: the adapter
2855 * @mbox: mailbox to use for the FW command
2856 *
2857 * Issues a command to FW to partially initialize the device. This
2858 * performs initialization that generally doesn't depend on user input.
2859 */
2860int t4_early_init(struct adapter *adap, unsigned int mbox)
2861{
2862 struct fw_initialize_cmd c;
2863
Vipul Pandya0062b152012-11-06 03:37:09 +00002864 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002865 INIT_CMD(c, INITIALIZE, WRITE);
2866 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2867}
2868
2869/**
2870 * t4_fw_reset - issue a reset to FW
2871 * @adap: the adapter
2872 * @mbox: mailbox to use for the FW command
2873 * @reset: specifies the type of reset to perform
2874 *
2875 * Issues a reset command of the specified type to FW.
2876 */
2877int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2878{
2879 struct fw_reset_cmd c;
2880
Vipul Pandya0062b152012-11-06 03:37:09 +00002881 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002882 INIT_CMD(c, RESET, WRITE);
2883 c.val = htonl(reset);
2884 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2885}
2886
2887/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002888 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2889 * @adap: the adapter
2890 * @mbox: mailbox to use for the FW RESET command (if desired)
2891 * @force: force uP into RESET even if FW RESET command fails
2892 *
2893 * Issues a RESET command to firmware (if desired) with a HALT indication
2894 * and then puts the microprocessor into RESET state. The RESET command
2895 * will only be issued if a legitimate mailbox is provided (mbox <=
2896 * FW_PCIE_FW_MASTER_MASK).
2897 *
2898 * This is generally used in order for the host to safely manipulate the
2899 * adapter without fear of conflicting with whatever the firmware might
2900 * be doing. The only way out of this state is to RESTART the firmware
2901 * ...
2902 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002903static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002904{
2905 int ret = 0;
2906
2907 /*
2908 * If a legitimate mailbox is provided, issue a RESET command
2909 * with a HALT indication.
2910 */
2911 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2912 struct fw_reset_cmd c;
2913
2914 memset(&c, 0, sizeof(c));
2915 INIT_CMD(c, RESET, WRITE);
2916 c.val = htonl(PIORST | PIORSTMODE);
2917 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2918 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2919 }
2920
2921 /*
2922 * Normally we won't complete the operation if the firmware RESET
2923 * command fails but if our caller insists we'll go ahead and put the
2924 * uP into RESET. This can be useful if the firmware is hung or even
2925 * missing ... We'll have to take the risk of putting the uP into
2926 * RESET without the cooperation of firmware in that case.
2927 *
2928 * We also force the firmware's HALT flag to be on in case we bypassed
2929 * the firmware RESET command above or we're dealing with old firmware
2930 * which doesn't have the HALT capability. This will serve as a flag
2931 * for the incoming firmware to know that it's coming out of a HALT
2932 * rather than a RESET ... if it's new enough to understand that ...
2933 */
2934 if (ret == 0 || force) {
2935 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2936 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2937 FW_PCIE_FW_HALT);
2938 }
2939
2940 /*
2941 * And we always return the result of the firmware RESET command
2942 * even when we force the uP into RESET ...
2943 */
2944 return ret;
2945}
2946
2947/**
2948 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2949 * @adap: the adapter
2950 * @reset: if we want to do a RESET to restart things
2951 *
2952 * Restart firmware previously halted by t4_fw_halt(). On successful
2953 * return the previous PF Master remains as the new PF Master and there
2954 * is no need to issue a new HELLO command, etc.
2955 *
2956 * We do this in two ways:
2957 *
2958 * 1. If we're dealing with newer firmware we'll simply want to take
2959 * the chip's microprocessor out of RESET. This will cause the
2960 * firmware to start up from its start vector. And then we'll loop
2961 * until the firmware indicates it's started again (PCIE_FW.HALT
2962 * reset to 0) or we timeout.
2963 *
2964 * 2. If we're dealing with older firmware then we'll need to RESET
2965 * the chip since older firmware won't recognize the PCIE_FW.HALT
2966 * flag and automatically RESET itself on startup.
2967 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002968static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002969{
2970 if (reset) {
2971 /*
2972 * Since we're directing the RESET instead of the firmware
2973 * doing it automatically, we need to clear the PCIE_FW.HALT
2974 * bit.
2975 */
2976 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2977
2978 /*
2979 * If we've been given a valid mailbox, first try to get the
2980 * firmware to do the RESET. If that works, great and we can
2981 * return success. Otherwise, if we haven't been given a
2982 * valid mailbox or the RESET command failed, fall back to
2983 * hitting the chip with a hammer.
2984 */
2985 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2986 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2987 msleep(100);
2988 if (t4_fw_reset(adap, mbox,
2989 PIORST | PIORSTMODE) == 0)
2990 return 0;
2991 }
2992
2993 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2994 msleep(2000);
2995 } else {
2996 int ms;
2997
2998 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2999 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3000 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3001 return 0;
3002 msleep(100);
3003 ms += 100;
3004 }
3005 return -ETIMEDOUT;
3006 }
3007 return 0;
3008}
3009
3010/**
3011 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3012 * @adap: the adapter
3013 * @mbox: mailbox to use for the FW RESET command (if desired)
3014 * @fw_data: the firmware image to write
3015 * @size: image size
3016 * @force: force upgrade even if firmware doesn't cooperate
3017 *
3018 * Perform all of the steps necessary for upgrading an adapter's
3019 * firmware image. Normally this requires the cooperation of the
3020 * existing firmware in order to halt all existing activities
3021 * but if an invalid mailbox token is passed in we skip that step
3022 * (though we'll still put the adapter microprocessor into RESET in
3023 * that case).
3024 *
3025 * On successful return the new firmware will have been loaded and
3026 * the adapter will have been fully RESET losing all previous setup
3027 * state. On unsuccessful return the adapter may be completely hosed ...
3028 * positive errno indicates that the adapter is ~probably~ intact, a
3029 * negative errno indicates that things are looking bad ...
3030 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08003031static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3032 const u8 *fw_data, unsigned int size, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00003033{
3034 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3035 int reset, ret;
3036
3037 ret = t4_fw_halt(adap, mbox, force);
3038 if (ret < 0 && !force)
3039 return ret;
3040
3041 ret = t4_load_fw(adap, fw_data, size);
3042 if (ret < 0)
3043 return ret;
3044
3045 /*
3046 * Older versions of the firmware don't understand the new
3047 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3048 * restart. So for newly loaded older firmware we'll have to do the
3049 * RESET for it so it starts up on a clean slate. We can tell if
3050 * the newly loaded firmware will handle this right by checking
3051 * its header flags to see if it advertises the capability.
3052 */
3053 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3054 return t4_fw_restart(adap, mbox, reset);
3055}
3056
Vipul Pandya636f9d32012-09-26 02:39:39 +00003057/**
3058 * t4_fixup_host_params - fix up host-dependent parameters
3059 * @adap: the adapter
3060 * @page_size: the host's Base Page Size
3061 * @cache_line_size: the host's Cache Line Size
3062 *
3063 * Various registers in T4 contain values which are dependent on the
3064 * host's Base Page and Cache Line Sizes. This function will fix all of
3065 * those registers with the appropriate values as passed in ...
3066 */
3067int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3068 unsigned int cache_line_size)
3069{
3070 unsigned int page_shift = fls(page_size) - 1;
3071 unsigned int sge_hps = page_shift - 10;
3072 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3073 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3074 unsigned int fl_align_log = fls(fl_align) - 1;
3075
3076 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3077 HOSTPAGESIZEPF0(sge_hps) |
3078 HOSTPAGESIZEPF1(sge_hps) |
3079 HOSTPAGESIZEPF2(sge_hps) |
3080 HOSTPAGESIZEPF3(sge_hps) |
3081 HOSTPAGESIZEPF4(sge_hps) |
3082 HOSTPAGESIZEPF5(sge_hps) |
3083 HOSTPAGESIZEPF6(sge_hps) |
3084 HOSTPAGESIZEPF7(sge_hps));
3085
3086 t4_set_reg_field(adap, SGE_CONTROL,
Vipul Pandya0dad9e92012-11-07 03:45:46 +00003087 INGPADBOUNDARY_MASK |
Vipul Pandya636f9d32012-09-26 02:39:39 +00003088 EGRSTATUSPAGESIZE_MASK,
3089 INGPADBOUNDARY(fl_align_log - 5) |
3090 EGRSTATUSPAGESIZE(stat_len != 64));
3091
3092 /*
3093 * Adjust various SGE Free List Host Buffer Sizes.
3094 *
3095 * This is something of a crock since we're using fixed indices into
3096 * the array which are also known by the sge.c code and the T4
3097 * Firmware Configuration File. We need to come up with a much better
3098 * approach to managing this array. For now, the first four entries
3099 * are:
3100 *
3101 * 0: Host Page Size
3102 * 1: 64KB
3103 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3104 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3105 *
3106 * For the single-MTU buffers in unpacked mode we need to include
3107 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3108 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3109 * Padding boundry. All of these are accommodated in the Factory
3110 * Default Firmware Configuration File but we need to adjust it for
3111 * this host's cache line size.
3112 */
3113 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3114 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3115 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3116 & ~(fl_align-1));
3117 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3118 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3119 & ~(fl_align-1));
3120
3121 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3122
3123 return 0;
3124}
3125
3126/**
3127 * t4_fw_initialize - ask FW to initialize the device
3128 * @adap: the adapter
3129 * @mbox: mailbox to use for the FW command
3130 *
3131 * Issues a command to FW to partially initialize the device. This
3132 * performs initialization that generally doesn't depend on user input.
3133 */
3134int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3135{
3136 struct fw_initialize_cmd c;
3137
3138 memset(&c, 0, sizeof(c));
3139 INIT_CMD(c, INITIALIZE, WRITE);
3140 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3141}
3142
3143/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003144 * t4_query_params - query FW or device parameters
3145 * @adap: the adapter
3146 * @mbox: mailbox to use for the FW command
3147 * @pf: the PF
3148 * @vf: the VF
3149 * @nparams: the number of parameters
3150 * @params: the parameter names
3151 * @val: the parameter values
3152 *
3153 * Reads the value of FW or device parameters. Up to 7 parameters can be
3154 * queried at once.
3155 */
3156int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3157 unsigned int vf, unsigned int nparams, const u32 *params,
3158 u32 *val)
3159{
3160 int i, ret;
3161 struct fw_params_cmd c;
3162 __be32 *p = &c.param[0].mnem;
3163
3164 if (nparams > 7)
3165 return -EINVAL;
3166
3167 memset(&c, 0, sizeof(c));
3168 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3169 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3170 FW_PARAMS_CMD_VFN(vf));
3171 c.retval_len16 = htonl(FW_LEN16(c));
3172 for (i = 0; i < nparams; i++, p += 2)
3173 *p = htonl(*params++);
3174
3175 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3176 if (ret == 0)
3177 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3178 *val++ = ntohl(*p);
3179 return ret;
3180}
3181
3182/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003183 * t4_set_params_nosleep - sets FW or device parameters
3184 * @adap: the adapter
3185 * @mbox: mailbox to use for the FW command
3186 * @pf: the PF
3187 * @vf: the VF
3188 * @nparams: the number of parameters
3189 * @params: the parameter names
3190 * @val: the parameter values
3191 *
3192 * Does not ever sleep
3193 * Sets the value of FW or device parameters. Up to 7 parameters can be
3194 * specified at once.
3195 */
3196int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3197 unsigned int pf, unsigned int vf,
3198 unsigned int nparams, const u32 *params,
3199 const u32 *val)
3200{
3201 struct fw_params_cmd c;
3202 __be32 *p = &c.param[0].mnem;
3203
3204 if (nparams > 7)
3205 return -EINVAL;
3206
3207 memset(&c, 0, sizeof(c));
3208 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3209 FW_CMD_REQUEST | FW_CMD_WRITE |
3210 FW_PARAMS_CMD_PFN(pf) |
3211 FW_PARAMS_CMD_VFN(vf));
3212 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3213
3214 while (nparams--) {
3215 *p++ = cpu_to_be32(*params++);
3216 *p++ = cpu_to_be32(*val++);
3217 }
3218
3219 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3220}
3221
3222/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003223 * t4_set_params - sets FW or device parameters
3224 * @adap: the adapter
3225 * @mbox: mailbox to use for the FW command
3226 * @pf: the PF
3227 * @vf: the VF
3228 * @nparams: the number of parameters
3229 * @params: the parameter names
3230 * @val: the parameter values
3231 *
3232 * Sets the value of FW or device parameters. Up to 7 parameters can be
3233 * specified at once.
3234 */
3235int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3236 unsigned int vf, unsigned int nparams, const u32 *params,
3237 const u32 *val)
3238{
3239 struct fw_params_cmd c;
3240 __be32 *p = &c.param[0].mnem;
3241
3242 if (nparams > 7)
3243 return -EINVAL;
3244
3245 memset(&c, 0, sizeof(c));
3246 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3247 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3248 FW_PARAMS_CMD_VFN(vf));
3249 c.retval_len16 = htonl(FW_LEN16(c));
3250 while (nparams--) {
3251 *p++ = htonl(*params++);
3252 *p++ = htonl(*val++);
3253 }
3254
3255 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3256}
3257
3258/**
3259 * t4_cfg_pfvf - configure PF/VF resource limits
3260 * @adap: the adapter
3261 * @mbox: mailbox to use for the FW command
3262 * @pf: the PF being configured
3263 * @vf: the VF being configured
3264 * @txq: the max number of egress queues
3265 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3266 * @rxqi: the max number of interrupt-capable ingress queues
3267 * @rxq: the max number of interruptless ingress queues
3268 * @tc: the PCI traffic class
3269 * @vi: the max number of virtual interfaces
3270 * @cmask: the channel access rights mask for the PF/VF
3271 * @pmask: the port access rights mask for the PF/VF
3272 * @nexact: the maximum number of exact MPS filters
3273 * @rcaps: read capabilities
3274 * @wxcaps: write/execute capabilities
3275 *
3276 * Configures resource limits and capabilities for a physical or virtual
3277 * function.
3278 */
3279int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3280 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3281 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3282 unsigned int vi, unsigned int cmask, unsigned int pmask,
3283 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3284{
3285 struct fw_pfvf_cmd c;
3286
3287 memset(&c, 0, sizeof(c));
3288 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3289 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3290 FW_PFVF_CMD_VFN(vf));
3291 c.retval_len16 = htonl(FW_LEN16(c));
3292 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3293 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00003294 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003295 FW_PFVF_CMD_PMASK(pmask) |
3296 FW_PFVF_CMD_NEQ(txq));
3297 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3298 FW_PFVF_CMD_NEXACTF(nexact));
3299 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3300 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3301 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3302 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3303}
3304
3305/**
3306 * t4_alloc_vi - allocate a virtual interface
3307 * @adap: the adapter
3308 * @mbox: mailbox to use for the FW command
3309 * @port: physical port associated with the VI
3310 * @pf: the PF owning the VI
3311 * @vf: the VF owning the VI
3312 * @nmac: number of MAC addresses needed (1 to 5)
3313 * @mac: the MAC addresses of the VI
3314 * @rss_size: size of RSS table slice associated with this VI
3315 *
3316 * Allocates a virtual interface for the given physical port. If @mac is
3317 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3318 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3319 * stored consecutively so the space needed is @nmac * 6 bytes.
3320 * Returns a negative error number or the non-negative VI id.
3321 */
3322int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3323 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3324 unsigned int *rss_size)
3325{
3326 int ret;
3327 struct fw_vi_cmd c;
3328
3329 memset(&c, 0, sizeof(c));
3330 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3331 FW_CMD_WRITE | FW_CMD_EXEC |
3332 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3333 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3334 c.portid_pkd = FW_VI_CMD_PORTID(port);
3335 c.nmac = nmac - 1;
3336
3337 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3338 if (ret)
3339 return ret;
3340
3341 if (mac) {
3342 memcpy(mac, c.mac, sizeof(c.mac));
3343 switch (nmac) {
3344 case 5:
3345 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3346 case 4:
3347 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3348 case 3:
3349 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3350 case 2:
3351 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3352 }
3353 }
3354 if (rss_size)
3355 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003356 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003357}
3358
3359/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003360 * t4_set_rxmode - set Rx properties of a virtual interface
3361 * @adap: the adapter
3362 * @mbox: mailbox to use for the FW command
3363 * @viid: the VI id
3364 * @mtu: the new MTU or -1
3365 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3366 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3367 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003368 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003369 * @sleep_ok: if true we may sleep while awaiting command completion
3370 *
3371 * Sets Rx properties of a virtual interface.
3372 */
3373int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003374 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3375 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003376{
3377 struct fw_vi_rxmode_cmd c;
3378
3379 /* convert to FW values */
3380 if (mtu < 0)
3381 mtu = FW_RXMODE_MTU_NO_CHG;
3382 if (promisc < 0)
3383 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3384 if (all_multi < 0)
3385 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3386 if (bcast < 0)
3387 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003388 if (vlanex < 0)
3389 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003390
3391 memset(&c, 0, sizeof(c));
3392 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3393 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3394 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003395 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3396 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3397 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3398 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3399 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003400 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3401}
3402
3403/**
3404 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3405 * @adap: the adapter
3406 * @mbox: mailbox to use for the FW command
3407 * @viid: the VI id
3408 * @free: if true any existing filters for this VI id are first removed
3409 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3410 * @addr: the MAC address(es)
3411 * @idx: where to store the index of each allocated filter
3412 * @hash: pointer to hash address filter bitmap
3413 * @sleep_ok: call is allowed to sleep
3414 *
3415 * Allocates an exact-match filter for each of the supplied addresses and
3416 * sets it to the corresponding address. If @idx is not %NULL it should
3417 * have at least @naddr entries, each of which will be set to the index of
3418 * the filter allocated for the corresponding MAC address. If a filter
3419 * could not be allocated for an address its index is set to 0xffff.
3420 * If @hash is not %NULL addresses that fail to allocate an exact filter
3421 * are hashed and update the hash filter bitmap pointed at by @hash.
3422 *
3423 * Returns a negative error number or the number of filters allocated.
3424 */
3425int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3426 unsigned int viid, bool free, unsigned int naddr,
3427 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3428{
3429 int i, ret;
3430 struct fw_vi_mac_cmd c;
3431 struct fw_vi_mac_exact *p;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303432 unsigned int max_naddr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003433 NUM_MPS_CLS_SRAM_L_INSTANCES :
3434 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003435
3436 if (naddr > 7)
3437 return -EINVAL;
3438
3439 memset(&c, 0, sizeof(c));
3440 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3441 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3442 FW_VI_MAC_CMD_VIID(viid));
3443 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3444 FW_CMD_LEN16((naddr + 2) / 2));
3445
3446 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3447 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3448 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3449 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3450 }
3451
3452 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3453 if (ret)
3454 return ret;
3455
3456 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3457 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3458
3459 if (idx)
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003460 idx[i] = index >= max_naddr ? 0xffff : index;
3461 if (index < max_naddr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003462 ret++;
3463 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003464 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003465 }
3466 return ret;
3467}
3468
3469/**
3470 * t4_change_mac - modifies the exact-match filter for a MAC address
3471 * @adap: the adapter
3472 * @mbox: mailbox to use for the FW command
3473 * @viid: the VI id
3474 * @idx: index of existing filter for old value of MAC address, or -1
3475 * @addr: the new MAC address value
3476 * @persist: whether a new MAC allocation should be persistent
3477 * @add_smt: if true also add the address to the HW SMT
3478 *
3479 * Modifies an exact-match filter and sets it to the new MAC address.
3480 * Note that in general it is not possible to modify the value of a given
3481 * filter so the generic way to modify an address filter is to free the one
3482 * being used by the old address value and allocate a new filter for the
3483 * new address value. @idx can be -1 if the address is a new addition.
3484 *
3485 * Returns a negative error number or the index of the filter with the new
3486 * MAC value.
3487 */
3488int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3489 int idx, const u8 *addr, bool persist, bool add_smt)
3490{
3491 int ret, mode;
3492 struct fw_vi_mac_cmd c;
3493 struct fw_vi_mac_exact *p = c.u.exact;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303494 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003495 NUM_MPS_CLS_SRAM_L_INSTANCES :
3496 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003497
3498 if (idx < 0) /* new allocation */
3499 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3500 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3501
3502 memset(&c, 0, sizeof(c));
3503 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3504 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3505 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3506 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3507 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3508 FW_VI_MAC_CMD_IDX(idx));
3509 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3510
3511 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3512 if (ret == 0) {
3513 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003514 if (ret >= max_mac_addr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003515 ret = -ENOMEM;
3516 }
3517 return ret;
3518}
3519
3520/**
3521 * t4_set_addr_hash - program the MAC inexact-match hash filter
3522 * @adap: the adapter
3523 * @mbox: mailbox to use for the FW command
3524 * @viid: the VI id
3525 * @ucast: whether the hash filter should also match unicast addresses
3526 * @vec: the value to be written to the hash filter
3527 * @sleep_ok: call is allowed to sleep
3528 *
3529 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3530 */
3531int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3532 bool ucast, u64 vec, bool sleep_ok)
3533{
3534 struct fw_vi_mac_cmd c;
3535
3536 memset(&c, 0, sizeof(c));
3537 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3538 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3539 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3540 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3541 FW_CMD_LEN16(1));
3542 c.u.hash.hashvec = cpu_to_be64(vec);
3543 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3544}
3545
3546/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003547 * t4_enable_vi_params - enable/disable a virtual interface
3548 * @adap: the adapter
3549 * @mbox: mailbox to use for the FW command
3550 * @viid: the VI id
3551 * @rx_en: 1=enable Rx, 0=disable Rx
3552 * @tx_en: 1=enable Tx, 0=disable Tx
3553 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3554 *
3555 * Enables/disables a virtual interface. Note that setting DCB Enable
3556 * only makes sense when enabling a Virtual Interface ...
3557 */
3558int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3559 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3560{
3561 struct fw_vi_enable_cmd c;
3562
3563 memset(&c, 0, sizeof(c));
3564 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3565 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3566
3567 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3568 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3569 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
Anish Bhatt30f00842014-08-05 16:05:23 -07003570 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
Anish Bhatt688848b2014-06-19 21:37:13 -07003571}
3572
3573/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003574 * t4_enable_vi - enable/disable a virtual interface
3575 * @adap: the adapter
3576 * @mbox: mailbox to use for the FW command
3577 * @viid: the VI id
3578 * @rx_en: 1=enable Rx, 0=disable Rx
3579 * @tx_en: 1=enable Tx, 0=disable Tx
3580 *
3581 * Enables/disables a virtual interface.
3582 */
3583int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3584 bool rx_en, bool tx_en)
3585{
Anish Bhatt688848b2014-06-19 21:37:13 -07003586 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003587}
3588
3589/**
3590 * t4_identify_port - identify a VI's port by blinking its LED
3591 * @adap: the adapter
3592 * @mbox: mailbox to use for the FW command
3593 * @viid: the VI id
3594 * @nblinks: how many times to blink LED at 2.5 Hz
3595 *
3596 * Identifies a VI's port by blinking its LED.
3597 */
3598int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3599 unsigned int nblinks)
3600{
3601 struct fw_vi_enable_cmd c;
3602
Vipul Pandya0062b152012-11-06 03:37:09 +00003603 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003604 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3605 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3606 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3607 c.blinkdur = htons(nblinks);
3608 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3609}
3610
3611/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003612 * t4_iq_free - free an ingress queue and its FLs
3613 * @adap: the adapter
3614 * @mbox: mailbox to use for the FW command
3615 * @pf: the PF owning the queues
3616 * @vf: the VF owning the queues
3617 * @iqtype: the ingress queue type
3618 * @iqid: ingress queue id
3619 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3620 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3621 *
3622 * Frees an ingress queue and its associated FLs, if any.
3623 */
3624int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3625 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3626 unsigned int fl0id, unsigned int fl1id)
3627{
3628 struct fw_iq_cmd c;
3629
3630 memset(&c, 0, sizeof(c));
3631 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3632 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3633 FW_IQ_CMD_VFN(vf));
3634 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3635 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3636 c.iqid = htons(iqid);
3637 c.fl0id = htons(fl0id);
3638 c.fl1id = htons(fl1id);
3639 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3640}
3641
3642/**
3643 * t4_eth_eq_free - free an Ethernet egress queue
3644 * @adap: the adapter
3645 * @mbox: mailbox to use for the FW command
3646 * @pf: the PF owning the queue
3647 * @vf: the VF owning the queue
3648 * @eqid: egress queue id
3649 *
3650 * Frees an Ethernet egress queue.
3651 */
3652int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3653 unsigned int vf, unsigned int eqid)
3654{
3655 struct fw_eq_eth_cmd c;
3656
3657 memset(&c, 0, sizeof(c));
3658 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3659 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3660 FW_EQ_ETH_CMD_VFN(vf));
3661 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3662 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3663 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3664}
3665
3666/**
3667 * t4_ctrl_eq_free - free a control egress queue
3668 * @adap: the adapter
3669 * @mbox: mailbox to use for the FW command
3670 * @pf: the PF owning the queue
3671 * @vf: the VF owning the queue
3672 * @eqid: egress queue id
3673 *
3674 * Frees a control egress queue.
3675 */
3676int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3677 unsigned int vf, unsigned int eqid)
3678{
3679 struct fw_eq_ctrl_cmd c;
3680
3681 memset(&c, 0, sizeof(c));
3682 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3683 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3684 FW_EQ_CTRL_CMD_VFN(vf));
3685 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3686 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3687 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3688}
3689
3690/**
3691 * t4_ofld_eq_free - free an offload egress queue
3692 * @adap: the adapter
3693 * @mbox: mailbox to use for the FW command
3694 * @pf: the PF owning the queue
3695 * @vf: the VF owning the queue
3696 * @eqid: egress queue id
3697 *
3698 * Frees a control egress queue.
3699 */
3700int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3701 unsigned int vf, unsigned int eqid)
3702{
3703 struct fw_eq_ofld_cmd c;
3704
3705 memset(&c, 0, sizeof(c));
3706 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3707 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3708 FW_EQ_OFLD_CMD_VFN(vf));
3709 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3710 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3711 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3712}
3713
3714/**
3715 * t4_handle_fw_rpl - process a FW reply message
3716 * @adap: the adapter
3717 * @rpl: start of the FW message
3718 *
3719 * Processes a FW message, such as link state change messages.
3720 */
3721int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3722{
3723 u8 opcode = *(const u8 *)rpl;
3724
3725 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3726 int speed = 0, fc = 0;
3727 const struct fw_port_cmd *p = (void *)rpl;
3728 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3729 int port = adap->chan_map[chan];
3730 struct port_info *pi = adap2pinfo(adap, port);
3731 struct link_config *lc = &pi->link_cfg;
3732 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3733 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3734 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3735
3736 if (stat & FW_PORT_CMD_RXPAUSE)
3737 fc |= PAUSE_RX;
3738 if (stat & FW_PORT_CMD_TXPAUSE)
3739 fc |= PAUSE_TX;
3740 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003741 speed = 100;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003742 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003743 speed = 1000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003744 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003745 speed = 10000;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05303746 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003747 speed = 40000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003748
3749 if (link_ok != lc->link_ok || speed != lc->speed ||
3750 fc != lc->fc) { /* something changed */
3751 lc->link_ok = link_ok;
3752 lc->speed = speed;
3753 lc->fc = fc;
Hariprasad Shenai444018a2014-09-01 19:54:55 +05303754 lc->supported = be16_to_cpu(p->u.info.pcap);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003755 t4_os_link_changed(adap, port, link_ok);
3756 }
3757 if (mod != pi->mod_type) {
3758 pi->mod_type = mod;
3759 t4_os_portmod_changed(adap, port);
3760 }
3761 }
3762 return 0;
3763}
3764
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003765static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003766{
3767 u16 val;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003768
Jiang Liue5c8ae52012-08-20 13:53:19 -06003769 if (pci_is_pcie(adapter->pdev)) {
3770 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003771 p->speed = val & PCI_EXP_LNKSTA_CLS;
3772 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3773 }
3774}
3775
3776/**
3777 * init_link_config - initialize a link's SW state
3778 * @lc: structure holding the link state
3779 * @caps: link capabilities
3780 *
3781 * Initializes the SW state maintained for each link, including the link's
3782 * capabilities and default speed/flow-control/autonegotiation settings.
3783 */
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003784static void init_link_config(struct link_config *lc, unsigned int caps)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003785{
3786 lc->supported = caps;
3787 lc->requested_speed = 0;
3788 lc->speed = 0;
3789 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3790 if (lc->supported & FW_PORT_CAP_ANEG) {
3791 lc->advertising = lc->supported & ADVERT_MASK;
3792 lc->autoneg = AUTONEG_ENABLE;
3793 lc->requested_fc |= PAUSE_AUTONEG;
3794 } else {
3795 lc->advertising = 0;
3796 lc->autoneg = AUTONEG_DISABLE;
3797 }
3798}
3799
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003800int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003801{
3802 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3803 return 0;
3804 msleep(500);
3805 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3806}
3807
Bill Pemberton91744942012-12-03 09:23:02 -05003808static int get_flash_params(struct adapter *adap)
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003809{
3810 int ret;
3811 u32 info;
3812
3813 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3814 if (!ret)
3815 ret = sf1_read(adap, 3, 0, 1, &info);
3816 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3817 if (ret)
3818 return ret;
3819
3820 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3821 return -EINVAL;
3822 info >>= 16; /* log2 of size */
3823 if (info >= 0x14 && info < 0x18)
3824 adap->params.sf_nsec = 1 << (info - 16);
3825 else if (info == 0x18)
3826 adap->params.sf_nsec = 64;
3827 else
3828 return -EINVAL;
3829 adap->params.sf_size = 1 << info;
3830 adap->params.sf_fw_start =
3831 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3832 return 0;
3833}
3834
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003835/**
3836 * t4_prep_adapter - prepare SW and HW for operation
3837 * @adapter: the adapter
3838 * @reset: if true perform a HW reset
3839 *
3840 * Initialize adapter SW state for the various HW modules, set initial
3841 * values for some adapter tunables, take PHYs out of reset, and
3842 * initialize the MDIO interface.
3843 */
Bill Pemberton91744942012-12-03 09:23:02 -05003844int t4_prep_adapter(struct adapter *adapter)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003845{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003846 int ret, ver;
3847 uint16_t device_id;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303848 u32 pl_rev;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003849
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003850 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003851 if (ret < 0)
3852 return ret;
3853
3854 get_pci_mode(adapter, &adapter->params.pci);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303855 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003856
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003857 ret = get_flash_params(adapter);
3858 if (ret < 0) {
3859 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3860 return ret;
3861 }
3862
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003863 /* Retrieve adapter's device ID
3864 */
3865 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3866 ver = device_id >> 12;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303867 adapter->params.chip = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003868 switch (ver) {
3869 case CHELSIO_T4:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303870 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003871 break;
3872 case CHELSIO_T5:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303873 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003874 break;
3875 default:
3876 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3877 device_id);
3878 return -EINVAL;
3879 }
3880
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003881 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3882
3883 /*
3884 * Default port for debugging in case we can't reach FW.
3885 */
3886 adapter->params.nports = 1;
3887 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003888 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003889 return 0;
3890}
3891
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303892/**
3893 * t4_init_tp_params - initialize adap->params.tp
3894 * @adap: the adapter
3895 *
3896 * Initialize various fields of the adapter's TP Parameters structure.
3897 */
3898int t4_init_tp_params(struct adapter *adap)
3899{
3900 int chan;
3901 u32 v;
3902
3903 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3904 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3905 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3906
3907 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3908 for (chan = 0; chan < NCHAN; chan++)
3909 adap->params.tp.tx_modq[chan] = chan;
3910
3911 /* Cache the adapter's Compressed Filter Mode and global Incress
3912 * Configuration.
3913 */
3914 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3915 &adap->params.tp.vlan_pri_map, 1,
3916 TP_VLAN_PRI_MAP);
3917 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3918 &adap->params.tp.ingress_config, 1,
3919 TP_INGRESS_CONFIG);
3920
3921 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3922 * shift positions of several elements of the Compressed Filter Tuple
3923 * for this adapter which we need frequently ...
3924 */
3925 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3926 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3927 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3928 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3929 F_PROTOCOL);
3930
3931 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3932 * represents the presense of an Outer VLAN instead of a VNIC ID.
3933 */
3934 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3935 adap->params.tp.vnic_shift = -1;
3936
3937 return 0;
3938}
3939
3940/**
3941 * t4_filter_field_shift - calculate filter field shift
3942 * @adap: the adapter
3943 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3944 *
3945 * Return the shift position of a filter field within the Compressed
3946 * Filter Tuple. The filter field is specified via its selection bit
3947 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3948 */
3949int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3950{
3951 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3952 unsigned int sel;
3953 int field_shift;
3954
3955 if ((filter_mode & filter_sel) == 0)
3956 return -1;
3957
3958 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3959 switch (filter_mode & sel) {
3960 case F_FCOE:
3961 field_shift += W_FT_FCOE;
3962 break;
3963 case F_PORT:
3964 field_shift += W_FT_PORT;
3965 break;
3966 case F_VNIC_ID:
3967 field_shift += W_FT_VNIC_ID;
3968 break;
3969 case F_VLAN:
3970 field_shift += W_FT_VLAN;
3971 break;
3972 case F_TOS:
3973 field_shift += W_FT_TOS;
3974 break;
3975 case F_PROTOCOL:
3976 field_shift += W_FT_PROTOCOL;
3977 break;
3978 case F_ETHERTYPE:
3979 field_shift += W_FT_ETHERTYPE;
3980 break;
3981 case F_MACMATCH:
3982 field_shift += W_FT_MACMATCH;
3983 break;
3984 case F_MPSHITTYPE:
3985 field_shift += W_FT_MPSHITTYPE;
3986 break;
3987 case F_FRAGMENTATION:
3988 field_shift += W_FT_FRAGMENTATION;
3989 break;
3990 }
3991 }
3992 return field_shift;
3993}
3994
Bill Pemberton91744942012-12-03 09:23:02 -05003995int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003996{
3997 u8 addr[6];
3998 int ret, i, j = 0;
3999 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004000 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004001
4002 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004003 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004004
4005 for_each_port(adap, i) {
4006 unsigned int rss_size;
4007 struct port_info *p = adap2pinfo(adap, i);
4008
4009 while ((adap->params.portvec & (1 << j)) == 0)
4010 j++;
4011
4012 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4013 FW_CMD_REQUEST | FW_CMD_READ |
4014 FW_PORT_CMD_PORTID(j));
4015 c.action_to_len16 = htonl(
4016 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4017 FW_LEN16(c));
4018 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4019 if (ret)
4020 return ret;
4021
4022 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4023 if (ret < 0)
4024 return ret;
4025
4026 p->viid = ret;
4027 p->tx_chan = j;
4028 p->lport = j;
4029 p->rss_size = rss_size;
4030 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
Thadeu Lima de Souza Cascardo40c9f8a2014-06-21 09:48:08 -03004031 adap->port[i]->dev_port = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004032
4033 ret = ntohl(c.u.info.lstatus_to_modtype);
4034 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4035 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4036 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004037 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004038
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004039 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4040 FW_CMD_REQUEST | FW_CMD_READ |
4041 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4042 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4043 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4044 if (ret)
4045 return ret;
4046 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4047
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004048 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4049 j++;
4050 }
4051 return 0;
4052}