blob: 0250a9deb29289f79b88ace0344c2b2ef9cdb0ee [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000035#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
stephen hemmingerde5b8672013-12-18 14:16:47 -080040static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000042/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
Roland Dreierde498c82010-04-21 08:59:17 +000057static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000059{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
Roland Dreierde498c82010-04-21 08:59:17 +0000114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000146/*
Hariprasad Shenai0abfd152014-06-27 19:23:48 +0530147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
154 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155
156 if (is_t4(adap->params.chip))
157 req |= F_LOCALCFG;
158
159 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161
162 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 * Configuration Space read. (None of the other fields matter when
164 * ENABLE is 0 so a simple register write is easier than a
165 * read-modify-write via t4_set_reg_field().)
166 */
167 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168}
169
170/*
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000171 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172 */
173static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
174 u32 mbox_addr)
175{
176 for ( ; nflit; nflit--, mbox_addr += 8)
177 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
178}
179
180/*
181 * Handle a FW assertion reported in a mailbox.
182 */
183static void fw_asrt(struct adapter *adap, u32 mbox_addr)
184{
185 struct fw_debug_cmd asrt;
186
187 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
188 dev_alert(adap->pdev_dev,
189 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
190 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
191 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
192}
193
194static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
195{
196 dev_err(adap->pdev_dev,
197 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
198 (unsigned long long)t4_read_reg64(adap, data_reg),
199 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
200 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
201 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
202 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
203 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
204 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
205 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
206}
207
208/**
209 * t4_wr_mbox_meat - send a command to FW through the given mailbox
210 * @adap: the adapter
211 * @mbox: index of the mailbox to use
212 * @cmd: the command to write
213 * @size: command length in bytes
214 * @rpl: where to optionally store the reply
215 * @sleep_ok: if true we may sleep while awaiting command completion
216 *
217 * Sends the given command to FW through the selected mailbox and waits
218 * for the FW to execute the command. If @rpl is not %NULL it is used to
219 * store the FW's reply to the command. The command and its optional
220 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
221 * to respond. @sleep_ok determines whether we may sleep while awaiting
222 * the response. If sleeping is allowed we use progressive backoff
223 * otherwise we spin.
224 *
225 * The return value is 0 on success or a negative errno on failure. A
226 * failure can happen either because we are not able to execute the
227 * command or FW executes it but signals an error. In the latter case
228 * the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 void *rpl, bool sleep_ok)
232{
Joe Perches005b5712010-12-14 21:36:53 +0000233 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000234 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
235 };
236
237 u32 v;
238 u64 res;
239 int i, ms, delay_idx;
240 const __be64 *p = cmd;
241 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
242 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
243
244 if ((size & 15) || size > MBOX_LEN)
245 return -EINVAL;
246
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000247 /*
248 * If the device is off-line, as in EEH, commands will time out.
249 * Fail them early so we don't waste time waiting.
250 */
251 if (adap->pdev->error_state != pci_channel_io_normal)
252 return -EIO;
253
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000254 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
255 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
256 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
257
258 if (v != MBOX_OWNER_DRV)
259 return v ? -EBUSY : -ETIMEDOUT;
260
261 for (i = 0; i < size; i += 8)
262 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
263
264 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
265 t4_read_reg(adap, ctl_reg); /* flush write */
266
267 delay_idx = 0;
268 ms = delay[0];
269
270 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271 if (sleep_ok) {
272 ms = delay[delay_idx]; /* last element may repeat */
273 if (delay_idx < ARRAY_SIZE(delay) - 1)
274 delay_idx++;
275 msleep(ms);
276 } else
277 mdelay(ms);
278
279 v = t4_read_reg(adap, ctl_reg);
280 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
281 if (!(v & MBMSGVALID)) {
282 t4_write_reg(adap, ctl_reg, 0);
283 continue;
284 }
285
286 res = t4_read_reg64(adap, data_reg);
287 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
288 fw_asrt(adap, data_reg);
289 res = FW_CMD_RETVAL(EIO);
290 } else if (rpl)
291 get_mbox_rpl(adap, rpl, size / 8, data_reg);
292
293 if (FW_CMD_RETVAL_GET((int)res))
294 dump_mbox(adap, mbox, data_reg);
295 t4_write_reg(adap, ctl_reg, 0);
296 return -FW_CMD_RETVAL_GET((int)res);
297 }
298 }
299
300 dump_mbox(adap, mbox, data_reg);
301 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 *(const u8 *)cmd, mbox);
303 return -ETIMEDOUT;
304}
305
306/**
307 * t4_mc_read - read from MC through backdoor accesses
308 * @adap: the adapter
309 * @addr: address of first byte requested
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000310 * @idx: which MC to access
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000311 * @data: 64 bytes of data containing the requested address
312 * @ecc: where to store the corresponding 64-bit ECC word
313 *
314 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
315 * that covers the requested address @addr. If @parity is not %NULL it
316 * is assigned the 64-bit ECC word for the read data.
317 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000318int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000319{
320 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000321 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
322 u32 mc_bist_status_rdata, mc_bist_data_pattern;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000323
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530324 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000325 mc_bist_cmd = MC_BIST_CMD;
326 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
327 mc_bist_cmd_len = MC_BIST_CMD_LEN;
328 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
329 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
330 } else {
331 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
332 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
333 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
334 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
335 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
336 }
337
338 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000339 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000340 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
341 t4_write_reg(adap, mc_bist_cmd_len, 64);
342 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
343 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000344 BIST_CMD_GAP(1));
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000345 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000346 if (i)
347 return i;
348
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000349#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000350
351 for (i = 15; i >= 0; i--)
352 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
353 if (ecc)
354 *ecc = t4_read_reg64(adap, MC_DATA(16));
355#undef MC_DATA
356 return 0;
357}
358
359/**
360 * t4_edc_read - read from EDC through backdoor accesses
361 * @adap: the adapter
362 * @idx: which EDC to access
363 * @addr: address of first byte requested
364 * @data: 64 bytes of data containing the requested address
365 * @ecc: where to store the corresponding 64-bit ECC word
366 *
367 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
368 * that covers the requested address @addr. If @parity is not %NULL it
369 * is assigned the 64-bit ECC word for the read data.
370 */
371int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
372{
373 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000374 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
375 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000376
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530377 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000378 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
379 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
380 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
381 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
382 idx);
383 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
384 idx);
385 } else {
386 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
387 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
388 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
389 edc_bist_cmd_data_pattern =
390 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
391 edc_bist_status_rdata =
392 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
393 }
394
395 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000396 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000397 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
398 t4_write_reg(adap, edc_bist_cmd_len, 64);
399 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
400 t4_write_reg(adap, edc_bist_cmd,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000401 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000402 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000403 if (i)
404 return i;
405
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000406#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000407
408 for (i = 15; i >= 0; i--)
409 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
410 if (ecc)
411 *ecc = t4_read_reg64(adap, EDC_DATA(16));
412#undef EDC_DATA
413 return 0;
414}
415
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000416/**
417 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
418 * @adap: the adapter
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530419 * @win: PCI-E Memory Window to use
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000420 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
421 * @addr: address within indicated memory type
422 * @len: amount of memory to transfer
423 * @buf: host memory buffer
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530424 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000425 *
426 * Reads/writes an [almost] arbitrary memory region in the firmware: the
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530427 * firmware memory address and host buffer must be aligned on 32-bit
428 * boudaries; the length may be arbitrary. The memory is transferred as
429 * a raw byte sequence from/to the firmware's memory. If this memory
430 * contains data structures which contain multi-byte integers, it's the
431 * caller's responsibility to perform appropriate byte order conversions.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000432 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530433int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
434 u32 len, __be32 *buf, int dir)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000435{
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530436 u32 pos, offset, resid, memoffset;
437 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000438
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530439 /* Argument sanity checks ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000440 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530441 if (addr & 0x3)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000442 return -EINVAL;
443
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530444 /* It's convenient to be able to handle lengths which aren't a
445 * multiple of 32-bits because we often end up transferring files to
446 * the firmware. So we'll handle that by normalizing the length here
447 * and then handling any residual transfer at the end.
448 */
449 resid = len & 0x3;
450 len -= resid;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000451
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000452 /* Offset into the region of memory which is being accessed
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000453 * MEM_EDC0 = 0
454 * MEM_EDC1 = 1
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000455 * MEM_MC = 2 -- T4
456 * MEM_MC0 = 2 -- For T5
457 * MEM_MC1 = 3 -- For T5
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000458 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000459 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
460 if (mtype != MEM_MC1)
461 memoffset = (mtype * (edc_size * 1024 * 1024));
462 else {
463 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
464 MA_EXT_MEMORY_BAR));
465 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
466 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000467
468 /* Determine the PCIE_MEM_ACCESS_OFFSET */
469 addr = addr + memoffset;
470
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530471 /* Each PCI-E Memory Window is programmed with a window size -- or
472 * "aperture" -- which controls the granularity of its mapping onto
473 * adapter memory. We need to grab that aperture in order to know
474 * how to use the specified window. The window is also programmed
475 * with the base address of the Memory Window in BAR0's address
476 * space. For T4 this is an absolute PCI-E Bus Address. For T5
477 * the address is relative to BAR0.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000478 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530479 mem_reg = t4_read_reg(adap,
480 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
481 win));
482 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
483 mem_base = GET_PCIEOFST(mem_reg) << 10;
484 if (is_t4(adap->params.chip))
485 mem_base -= adap->t4_bar0;
486 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000487
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530488 /* Calculate our initial PCI-E Memory Window Position and Offset into
489 * that Window.
490 */
491 pos = addr & ~(mem_aperture-1);
492 offset = addr - pos;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000493
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530494 /* Set up initial PCI-E Memory Window to cover the start of our
495 * transfer. (Read it back to ensure that changes propagate before we
496 * attempt to use the new value.)
497 */
498 t4_write_reg(adap,
499 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
500 pos | win_pf);
501 t4_read_reg(adap,
502 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
503
504 /* Transfer data to/from the adapter as long as there's an integral
505 * number of 32-bit transfers to complete.
506 */
507 while (len > 0) {
508 if (dir == T4_MEMORY_READ)
509 *buf++ = (__force __be32) t4_read_reg(adap,
510 mem_base + offset);
511 else
512 t4_write_reg(adap, mem_base + offset,
513 (__force u32) *buf++);
514 offset += sizeof(__be32);
515 len -= sizeof(__be32);
516
517 /* If we've reached the end of our current window aperture,
518 * move the PCI-E Memory Window on to the next. Note that
519 * doing this here after "len" may be 0 allows us to set up
520 * the PCI-E Memory Window for a possible final residual
521 * transfer below ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000522 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530523 if (offset == mem_aperture) {
524 pos += mem_aperture;
525 offset = 0;
526 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
528 win), pos | win_pf);
529 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
531 win));
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000532 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000533 }
534
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530535 /* If the original transfer had a length which wasn't a multiple of
536 * 32-bits, now's where we need to finish off the transfer of the
537 * residual amount. The PCI-E Memory Window has already been moved
538 * above (if necessary) to cover this final transfer.
539 */
540 if (resid) {
541 union {
542 __be32 word;
543 char byte[4];
544 } last;
545 unsigned char *bp;
546 int i;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000547
Hariprasad Shenaic81576c2014-07-24 17:16:30 +0530548 if (dir == T4_MEMORY_READ) {
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530549 last.word = (__force __be32) t4_read_reg(adap,
550 mem_base + offset);
551 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
552 bp[i] = last.byte[i];
553 } else {
554 last.word = *buf;
555 for (i = resid; i < 4; i++)
556 last.byte[i] = 0;
557 t4_write_reg(adap, mem_base + offset,
558 (__force u32) last.word);
559 }
560 }
561
562 return 0;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000563}
564
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000565#define EEPROM_STAT_ADDR 0x7bfc
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000566#define VPD_BASE 0x400
567#define VPD_BASE_OLD 0
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000568#define VPD_LEN 1024
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530569#define CHELSIO_VPD_UNIQUE_ID 0x82
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000570
571/**
572 * t4_seeprom_wp - enable/disable EEPROM write protection
573 * @adapter: the adapter
574 * @enable: whether to enable or disable write protection
575 *
576 * Enables or disables write protection on the serial EEPROM.
577 */
578int t4_seeprom_wp(struct adapter *adapter, bool enable)
579{
580 unsigned int v = enable ? 0xc : 0;
581 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
582 return ret < 0 ? ret : 0;
583}
584
585/**
586 * get_vpd_params - read VPD parameters from VPD EEPROM
587 * @adapter: adapter to read
588 * @p: where to store the parameters
589 *
590 * Reads card parameters stored in VPD EEPROM.
591 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000592int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000593{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000594 u32 cclk_param, cclk_val;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000595 int i, ret, addr;
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530596 int ec, sn, pn;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000597 u8 *vpd, csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000598 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000599
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000600 vpd = vmalloc(VPD_LEN);
601 if (!vpd)
602 return -ENOMEM;
603
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000604 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
605 if (ret < 0)
606 goto out;
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530607
608 /* The VPD shall have a unique identifier specified by the PCI SIG.
609 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
610 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
611 * is expected to automatically put this entry at the
612 * beginning of the VPD.
613 */
614 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000615
616 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000617 if (ret < 0)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000618 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000619
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000620 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
621 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000622 ret = -EINVAL;
623 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000624 }
625
626 id_len = pci_vpd_lrdt_size(vpd);
627 if (id_len > ID_LEN)
628 id_len = ID_LEN;
629
630 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
631 if (i < 0) {
632 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000633 ret = -EINVAL;
634 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000635 }
636
637 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
638 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
639 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000640 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000641 ret = -EINVAL;
642 goto out;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000643 }
644
645#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000646 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000647 if (var < 0) { \
648 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000649 ret = -EINVAL; \
650 goto out; \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000651 } \
652 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
653} while (0)
654
655 FIND_VPD_KW(i, "RV");
656 for (csum = 0; i >= 0; i--)
657 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000658
659 if (csum) {
660 dev_err(adapter->pdev_dev,
661 "corrupted VPD EEPROM, actual csum %u\n", csum);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000662 ret = -EINVAL;
663 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000664 }
665
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000666 FIND_VPD_KW(ec, "EC");
667 FIND_VPD_KW(sn, "SN");
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530668 FIND_VPD_KW(pn, "PN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000669#undef FIND_VPD_KW
670
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000671 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000672 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000673 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000674 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000675 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
676 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000677 strim(p->sn);
Hariprasad Shenai63a92fe2014-09-01 19:54:56 +0530678 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530679 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
680 strim(p->pn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000681
682 /*
683 * Ask firmware for the Core Clock since it knows how to translate the
684 * Reference Clock ('V2') VPD field into a Core Clock value ...
685 */
686 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
687 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
688 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
689 1, &cclk_param, &cclk_val);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000690
691out:
692 vfree(vpd);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000693 if (ret)
694 return ret;
695 p->cclk = cclk_val;
696
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000697 return 0;
698}
699
700/* serial flash and firmware constants */
701enum {
702 SF_ATTEMPTS = 10, /* max retries for SF operations */
703
704 /* flash command opcodes */
705 SF_PROG_PAGE = 2, /* program page */
706 SF_WR_DISABLE = 4, /* disable writes */
707 SF_RD_STATUS = 5, /* read status register */
708 SF_WR_ENABLE = 6, /* enable writes */
709 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000710 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000711 SF_ERASE_SECTOR = 0xd8, /* erase sector */
712
Steve Wise6f1d7212014-04-15 14:22:34 -0500713 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000714};
715
716/**
717 * sf1_read - read data from the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to read
720 * @cont: whether another operation will be chained
721 * @lock: whether to lock SF for PL access only
722 * @valp: where to store the read data
723 *
724 * Reads up to 4 bytes of data from the serial flash. The location of
725 * the read needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
727 */
728static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 int lock, u32 *valp)
730{
731 int ret;
732
733 if (!byte_cnt || byte_cnt > 4)
734 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530735 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000736 return -EBUSY;
737 cont = cont ? SF_CONT : 0;
738 lock = lock ? SF_LOCK : 0;
739 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530740 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000741 if (!ret)
742 *valp = t4_read_reg(adapter, SF_DATA);
743 return ret;
744}
745
746/**
747 * sf1_write - write data to the serial flash
748 * @adapter: the adapter
749 * @byte_cnt: number of bytes to write
750 * @cont: whether another operation will be chained
751 * @lock: whether to lock SF for PL access only
752 * @val: value to write
753 *
754 * Writes up to 4 bytes of data to the serial flash. The location of
755 * the write needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
757 */
758static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
759 int lock, u32 val)
760{
761 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000764 return -EBUSY;
765 cont = cont ? SF_CONT : 0;
766 lock = lock ? SF_LOCK : 0;
767 t4_write_reg(adapter, SF_DATA, val);
768 t4_write_reg(adapter, SF_OP, lock |
769 cont | BYTECNT(byte_cnt - 1) | OP_WR);
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530770 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000771}
772
773/**
774 * flash_wait_op - wait for a flash operation to complete
775 * @adapter: the adapter
776 * @attempts: max number of polls of the status register
777 * @delay: delay between polls in ms
778 *
779 * Wait for a flash operation to complete by polling the status register.
780 */
781static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
782{
783 int ret;
784 u32 status;
785
786 while (1) {
787 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
788 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
789 return ret;
790 if (!(status & 1))
791 return 0;
792 if (--attempts == 0)
793 return -EAGAIN;
794 if (delay)
795 msleep(delay);
796 }
797}
798
799/**
800 * t4_read_flash - read words from serial flash
801 * @adapter: the adapter
802 * @addr: the start address for the read
803 * @nwords: how many 32-bit words to read
804 * @data: where to store the read data
805 * @byte_oriented: whether to store data as bytes or as words
806 *
807 * Read the specified number of 32-bit words from the serial flash.
808 * If @byte_oriented is set the read data is stored as a byte array
809 * (i.e., big-endian), otherwise as 32-bit words in the platform's
810 * natural endianess.
811 */
Roland Dreierde498c82010-04-21 08:59:17 +0000812static int t4_read_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000814{
815 int ret;
816
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000817 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000818 return -EINVAL;
819
820 addr = swab32(addr) | SF_RD_DATA_FAST;
821
822 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
823 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
824 return ret;
825
826 for ( ; nwords; nwords--, data++) {
827 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
828 if (nwords == 1)
829 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
830 if (ret)
831 return ret;
832 if (byte_oriented)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000833 *data = (__force __u32) (htonl(*data));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000834 }
835 return 0;
836}
837
838/**
839 * t4_write_flash - write up to a page of data to the serial flash
840 * @adapter: the adapter
841 * @addr: the start address to write
842 * @n: length of data to write in bytes
843 * @data: the data to write
844 *
845 * Writes up to a page of data (256 bytes) to the serial flash starting
846 * at the given address. All the data must be written to the same page.
847 */
848static int t4_write_flash(struct adapter *adapter, unsigned int addr,
849 unsigned int n, const u8 *data)
850{
851 int ret;
852 u32 buf[64];
853 unsigned int i, c, left, val, offset = addr & 0xff;
854
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000855 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000856 return -EINVAL;
857
858 val = swab32(addr) | SF_PROG_PAGE;
859
860 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
861 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
862 goto unlock;
863
864 for (left = n; left; left -= c) {
865 c = min(left, 4U);
866 for (val = 0, i = 0; i < c; ++i)
867 val = (val << 8) + *data++;
868
869 ret = sf1_write(adapter, c, c != left, 1, val);
870 if (ret)
871 goto unlock;
872 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000873 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000874 if (ret)
875 goto unlock;
876
877 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
878
879 /* Read the page to verify the write succeeded */
880 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
881 if (ret)
882 return ret;
883
884 if (memcmp(data - n, (u8 *)buf + offset, n)) {
885 dev_err(adapter->pdev_dev,
886 "failed to correctly write the flash page at %#x\n",
887 addr);
888 return -EIO;
889 }
890 return 0;
891
892unlock:
893 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
894 return ret;
895}
896
897/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530898 * t4_get_fw_version - read the firmware version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000899 * @adapter: the adapter
900 * @vers: where to place the version
901 *
902 * Reads the FW version from flash.
903 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530904int t4_get_fw_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000905{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530906 return t4_read_flash(adapter, FLASH_FW_START +
907 offsetof(struct fw_hdr, fw_ver), 1,
908 vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000909}
910
911/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530912 * t4_get_tp_version - read the TP microcode version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000913 * @adapter: the adapter
914 * @vers: where to place the version
915 *
916 * Reads the TP microcode version from flash.
917 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530918int t4_get_tp_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000919{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530920 return t4_read_flash(adapter, FLASH_FW_START +
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000921 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000922 1, vers, 0);
923}
924
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530925/* Is the given firmware API compatible with the one the driver was compiled
926 * with?
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000927 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530928static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000929{
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000930
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530931 /* short circuit if it's the exact same firmware version */
932 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
933 return 1;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000934
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530935#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
936 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
937 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
938 return 1;
939#undef SAME_INTF
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000940
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530941 return 0;
942}
943
944/* The firmware in the filesystem is usable, but should it be installed?
945 * This routine explains itself in detail if it indicates the filesystem
946 * firmware should be installed.
947 */
948static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
949 int k, int c)
950{
951 const char *reason;
952
953 if (!card_fw_usable) {
954 reason = "incompatible or unusable";
955 goto install;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000956 }
957
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530958 if (k > c) {
959 reason = "older than the version supported with this driver";
960 goto install;
Jay Hernandeze69972f2013-05-30 03:24:14 +0000961 }
962
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530963 return 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000964
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530965install:
966 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
967 "installing firmware %u.%u.%u.%u on card.\n",
968 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
969 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
970 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
971 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000972
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000973 return 1;
974}
975
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530976int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
977 const u8 *fw_data, unsigned int fw_size,
978 struct fw_hdr *card_fw, enum dev_state state,
979 int *reset)
980{
981 int ret, card_fw_usable, fs_fw_usable;
982 const struct fw_hdr *fs_fw;
983 const struct fw_hdr *drv_fw;
984
985 drv_fw = &fw_info->fw_hdr;
986
987 /* Read the header of the firmware on the card */
988 ret = -t4_read_flash(adap, FLASH_FW_START,
989 sizeof(*card_fw) / sizeof(uint32_t),
990 (uint32_t *)card_fw, 1);
991 if (ret == 0) {
992 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
993 } else {
994 dev_err(adap->pdev_dev,
995 "Unable to read card's firmware header: %d\n", ret);
996 card_fw_usable = 0;
997 }
998
999 if (fw_data != NULL) {
1000 fs_fw = (const void *)fw_data;
1001 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1002 } else {
1003 fs_fw = NULL;
1004 fs_fw_usable = 0;
1005 }
1006
1007 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1008 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1009 /* Common case: the firmware on the card is an exact match and
1010 * the filesystem one is an exact match too, or the filesystem
1011 * one is absent/incompatible.
1012 */
1013 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1014 should_install_fs_fw(adap, card_fw_usable,
1015 be32_to_cpu(fs_fw->fw_ver),
1016 be32_to_cpu(card_fw->fw_ver))) {
1017 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1018 fw_size, 0);
1019 if (ret != 0) {
1020 dev_err(adap->pdev_dev,
1021 "failed to install firmware: %d\n", ret);
1022 goto bye;
1023 }
1024
1025 /* Installed successfully, update the cached header too. */
1026 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1027 card_fw_usable = 1;
1028 *reset = 0; /* already reset as part of load_fw */
1029 }
1030
1031 if (!card_fw_usable) {
1032 uint32_t d, c, k;
1033
1034 d = be32_to_cpu(drv_fw->fw_ver);
1035 c = be32_to_cpu(card_fw->fw_ver);
1036 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1037
1038 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1039 "chip state %d, "
1040 "driver compiled with %d.%d.%d.%d, "
1041 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1042 state,
1043 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1044 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1045 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1046 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1047 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1048 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1049 ret = EINVAL;
1050 goto bye;
1051 }
1052
1053 /* We're using whatever's on the card and it's known to be good. */
1054 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1055 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1056
1057bye:
1058 return ret;
1059}
1060
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001061/**
1062 * t4_flash_erase_sectors - erase a range of flash sectors
1063 * @adapter: the adapter
1064 * @start: the first sector to erase
1065 * @end: the last sector to erase
1066 *
1067 * Erases the sectors in the given inclusive range.
1068 */
1069static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1070{
1071 int ret = 0;
1072
1073 while (start <= end) {
1074 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1075 (ret = sf1_write(adapter, 4, 0, 1,
1076 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001077 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001078 dev_err(adapter->pdev_dev,
1079 "erase of flash sector %d failed, error %d\n",
1080 start, ret);
1081 break;
1082 }
1083 start++;
1084 }
1085 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1086 return ret;
1087}
1088
1089/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001090 * t4_flash_cfg_addr - return the address of the flash configuration file
1091 * @adapter: the adapter
1092 *
1093 * Return the address within the flash where the Firmware Configuration
1094 * File is stored.
1095 */
1096unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1097{
1098 if (adapter->params.sf_size == 0x100000)
1099 return FLASH_FPGA_CFG_START;
1100 else
1101 return FLASH_CFG_START;
1102}
1103
1104/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001105 * t4_load_fw - download firmware
1106 * @adap: the adapter
1107 * @fw_data: the firmware image to write
1108 * @size: image size
1109 *
1110 * Write the supplied firmware image to the card's serial flash.
1111 */
1112int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1113{
1114 u32 csum;
1115 int ret, addr;
1116 unsigned int i;
1117 u8 first_page[SF_PAGE_SIZE];
Vipul Pandya404d9e32012-10-08 02:59:43 +00001118 const __be32 *p = (const __be32 *)fw_data;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001119 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001120 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1121 unsigned int fw_img_start = adap->params.sf_fw_start;
1122 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001123
1124 if (!size) {
1125 dev_err(adap->pdev_dev, "FW image has no data\n");
1126 return -EINVAL;
1127 }
1128 if (size & 511) {
1129 dev_err(adap->pdev_dev,
1130 "FW image size not multiple of 512 bytes\n");
1131 return -EINVAL;
1132 }
1133 if (ntohs(hdr->len512) * 512 != size) {
1134 dev_err(adap->pdev_dev,
1135 "FW image size differs from size in FW header\n");
1136 return -EINVAL;
1137 }
1138 if (size > FW_MAX_SIZE) {
1139 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1140 FW_MAX_SIZE);
1141 return -EFBIG;
1142 }
1143
1144 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1145 csum += ntohl(p[i]);
1146
1147 if (csum != 0xffffffff) {
1148 dev_err(adap->pdev_dev,
1149 "corrupted firmware image, checksum %#x\n", csum);
1150 return -EINVAL;
1151 }
1152
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001153 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1154 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001155 if (ret)
1156 goto out;
1157
1158 /*
1159 * We write the correct version at the end so the driver can see a bad
1160 * version if the FW write fails. Start by writing a copy of the
1161 * first page with a bad version.
1162 */
1163 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1164 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001165 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001166 if (ret)
1167 goto out;
1168
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001169 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001170 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1171 addr += SF_PAGE_SIZE;
1172 fw_data += SF_PAGE_SIZE;
1173 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1174 if (ret)
1175 goto out;
1176 }
1177
1178 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001179 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001180 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1181out:
1182 if (ret)
1183 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1184 ret);
1185 return ret;
1186}
1187
1188#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05301189 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1190 FW_PORT_CAP_ANEG)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001191
1192/**
1193 * t4_link_start - apply link configuration to MAC/PHY
1194 * @phy: the PHY to setup
1195 * @mac: the MAC to setup
1196 * @lc: the requested link configuration
1197 *
1198 * Set up a port's MAC and PHY according to a desired link configuration.
1199 * - If the PHY can auto-negotiate first decide what to advertise, then
1200 * enable/disable auto-negotiation as desired, and reset.
1201 * - If the PHY does not auto-negotiate just reset it.
1202 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1203 * otherwise do it later based on the outcome of auto-negotiation.
1204 */
1205int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1206 struct link_config *lc)
1207{
1208 struct fw_port_cmd c;
1209 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1210
1211 lc->link_ok = 0;
1212 if (lc->requested_fc & PAUSE_RX)
1213 fc |= FW_PORT_CAP_FC_RX;
1214 if (lc->requested_fc & PAUSE_TX)
1215 fc |= FW_PORT_CAP_FC_TX;
1216
1217 memset(&c, 0, sizeof(c));
1218 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1219 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1220 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1221 FW_LEN16(c));
1222
1223 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1224 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1225 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1226 } else if (lc->autoneg == AUTONEG_DISABLE) {
1227 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1228 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1229 } else
1230 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1231
1232 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1233}
1234
1235/**
1236 * t4_restart_aneg - restart autonegotiation
1237 * @adap: the adapter
1238 * @mbox: mbox to use for the FW command
1239 * @port: the port id
1240 *
1241 * Restarts autonegotiation for the selected port.
1242 */
1243int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1244{
1245 struct fw_port_cmd c;
1246
1247 memset(&c, 0, sizeof(c));
1248 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1249 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1250 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1251 FW_LEN16(c));
1252 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1253 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1254}
1255
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301256typedef void (*int_handler_t)(struct adapter *adap);
1257
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001258struct intr_info {
1259 unsigned int mask; /* bits to check in interrupt status */
1260 const char *msg; /* message to print or NULL */
1261 short stat_idx; /* stat counter to increment or -1 */
1262 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301263 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001264};
1265
1266/**
1267 * t4_handle_intr_status - table driven interrupt handler
1268 * @adapter: the adapter that generated the interrupt
1269 * @reg: the interrupt status register to process
1270 * @acts: table of interrupt actions
1271 *
1272 * A table driven interrupt handler that applies a set of masks to an
1273 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001274 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001275 * optionally emitting a warning or alert message. The table is terminated
1276 * by an entry specifying mask 0. Returns the number of fatal interrupt
1277 * conditions.
1278 */
1279static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1280 const struct intr_info *acts)
1281{
1282 int fatal = 0;
1283 unsigned int mask = 0;
1284 unsigned int status = t4_read_reg(adapter, reg);
1285
1286 for ( ; acts->mask; ++acts) {
1287 if (!(status & acts->mask))
1288 continue;
1289 if (acts->fatal) {
1290 fatal++;
1291 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1292 status & acts->mask);
1293 } else if (acts->msg && printk_ratelimit())
1294 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1295 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301296 if (acts->int_handler)
1297 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001298 mask |= acts->mask;
1299 }
1300 status &= mask;
1301 if (status) /* clear processed interrupts */
1302 t4_write_reg(adapter, reg, status);
1303 return fatal;
1304}
1305
1306/*
1307 * Interrupt handler for the PCIE module.
1308 */
1309static void pcie_intr_handler(struct adapter *adapter)
1310{
Joe Perches005b5712010-12-14 21:36:53 +00001311 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001312 { RNPP, "RXNP array parity error", -1, 1 },
1313 { RPCP, "RXPC array parity error", -1, 1 },
1314 { RCIP, "RXCIF array parity error", -1, 1 },
1315 { RCCP, "Rx completions control array parity error", -1, 1 },
1316 { RFTP, "RXFT array parity error", -1, 1 },
1317 { 0 }
1318 };
Joe Perches005b5712010-12-14 21:36:53 +00001319 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001320 { TPCP, "TXPC array parity error", -1, 1 },
1321 { TNPP, "TXNP array parity error", -1, 1 },
1322 { TFTP, "TXFT array parity error", -1, 1 },
1323 { TCAP, "TXCA array parity error", -1, 1 },
1324 { TCIP, "TXCIF array parity error", -1, 1 },
1325 { RCAP, "RXCA array parity error", -1, 1 },
1326 { OTDD, "outbound request TLP discarded", -1, 1 },
1327 { RDPE, "Rx data parity error", -1, 1 },
1328 { TDUE, "Tx uncorrectable data error", -1, 1 },
1329 { 0 }
1330 };
Joe Perches005b5712010-12-14 21:36:53 +00001331 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001332 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1333 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1334 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1335 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1336 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1337 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1338 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1339 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1340 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1341 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1342 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1343 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1344 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1345 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1346 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1347 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1348 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1349 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1350 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1351 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1352 { FIDPERR, "PCI FID parity error", -1, 1 },
1353 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1354 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1355 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1356 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1357 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1358 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1359 { PCIESINT, "PCI core secondary fault", -1, 1 },
1360 { PCIEPINT, "PCI core primary fault", -1, 1 },
1361 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1362 { 0 }
1363 };
1364
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001365 static struct intr_info t5_pcie_intr_info[] = {
1366 { MSTGRPPERR, "Master Response Read Queue parity error",
1367 -1, 1 },
1368 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1369 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1370 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1371 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1372 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1373 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1374 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1375 -1, 1 },
1376 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1377 -1, 1 },
1378 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1379 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1380 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1381 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1382 { DREQWRPERR, "PCI DMA channel write request parity error",
1383 -1, 1 },
1384 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1385 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1386 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1387 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1388 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1389 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1390 { FIDPERR, "PCI FID parity error", -1, 1 },
1391 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1392 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1393 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1394 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1395 -1, 1 },
1396 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1397 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1398 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1399 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1400 { READRSPERR, "Outbound read error", -1, 0 },
1401 { 0 }
1402 };
1403
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001404 int fat;
1405
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301406 if (is_t4(adapter->params.chip))
1407 fat = t4_handle_intr_status(adapter,
1408 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1409 sysbus_intr_info) +
1410 t4_handle_intr_status(adapter,
1411 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1412 pcie_port_intr_info) +
1413 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1414 pcie_intr_info);
1415 else
1416 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1417 t5_pcie_intr_info);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001418
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001419 if (fat)
1420 t4_fatal_err(adapter);
1421}
1422
1423/*
1424 * TP interrupt handler.
1425 */
1426static void tp_intr_handler(struct adapter *adapter)
1427{
Joe Perches005b5712010-12-14 21:36:53 +00001428 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001429 { 0x3fffffff, "TP parity error", -1, 1 },
1430 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1431 { 0 }
1432 };
1433
1434 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1435 t4_fatal_err(adapter);
1436}
1437
1438/*
1439 * SGE interrupt handler.
1440 */
1441static void sge_intr_handler(struct adapter *adapter)
1442{
1443 u64 v;
1444
Joe Perches005b5712010-12-14 21:36:53 +00001445 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001446 { ERR_CPL_EXCEED_IQE_SIZE,
1447 "SGE received CPL exceeding IQE size", -1, 1 },
1448 { ERR_INVALID_CIDX_INC,
1449 "SGE GTS CIDX increment too large", -1, 0 },
1450 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001451 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1452 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1453 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001454 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1455 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1456 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1457 0 },
1458 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1459 0 },
1460 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1461 0 },
1462 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1463 0 },
1464 { ERR_ING_CTXT_PRIO,
1465 "SGE too many priority ingress contexts", -1, 0 },
1466 { ERR_EGR_CTXT_PRIO,
1467 "SGE too many priority egress contexts", -1, 0 },
1468 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1469 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1470 { 0 }
1471 };
1472
1473 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301474 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001475 if (v) {
1476 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301477 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001478 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1479 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1480 }
1481
1482 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1483 v != 0)
1484 t4_fatal_err(adapter);
1485}
1486
1487/*
1488 * CIM interrupt handler.
1489 */
1490static void cim_intr_handler(struct adapter *adapter)
1491{
Joe Perches005b5712010-12-14 21:36:53 +00001492 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001493 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1494 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1495 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1496 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1497 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1498 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1499 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1500 { 0 }
1501 };
Joe Perches005b5712010-12-14 21:36:53 +00001502 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001503 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1504 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1505 { ILLWRINT, "CIM illegal write", -1, 1 },
1506 { ILLRDINT, "CIM illegal read", -1, 1 },
1507 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1508 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1509 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1510 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1511 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1512 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1513 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1514 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1515 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1516 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1517 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1518 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1519 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1520 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1521 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1522 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1523 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1524 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1525 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1526 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1527 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1528 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1529 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1530 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1531 { 0 }
1532 };
1533
1534 int fat;
1535
1536 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1537 cim_intr_info) +
1538 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1539 cim_upintr_info);
1540 if (fat)
1541 t4_fatal_err(adapter);
1542}
1543
1544/*
1545 * ULP RX interrupt handler.
1546 */
1547static void ulprx_intr_handler(struct adapter *adapter)
1548{
Joe Perches005b5712010-12-14 21:36:53 +00001549 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001550 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001551 { 0x7fffff, "ULPRX parity error", -1, 1 },
1552 { 0 }
1553 };
1554
1555 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1556 t4_fatal_err(adapter);
1557}
1558
1559/*
1560 * ULP TX interrupt handler.
1561 */
1562static void ulptx_intr_handler(struct adapter *adapter)
1563{
Joe Perches005b5712010-12-14 21:36:53 +00001564 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001565 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1566 0 },
1567 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1568 0 },
1569 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1570 0 },
1571 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1572 0 },
1573 { 0xfffffff, "ULPTX parity error", -1, 1 },
1574 { 0 }
1575 };
1576
1577 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1578 t4_fatal_err(adapter);
1579}
1580
1581/*
1582 * PM TX interrupt handler.
1583 */
1584static void pmtx_intr_handler(struct adapter *adapter)
1585{
Joe Perches005b5712010-12-14 21:36:53 +00001586 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001587 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1588 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1589 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1590 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1591 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1592 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1593 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1594 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1595 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1596 { 0 }
1597 };
1598
1599 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1600 t4_fatal_err(adapter);
1601}
1602
1603/*
1604 * PM RX interrupt handler.
1605 */
1606static void pmrx_intr_handler(struct adapter *adapter)
1607{
Joe Perches005b5712010-12-14 21:36:53 +00001608 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001609 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1610 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1611 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1612 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1613 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1614 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1615 { 0 }
1616 };
1617
1618 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1619 t4_fatal_err(adapter);
1620}
1621
1622/*
1623 * CPL switch interrupt handler.
1624 */
1625static void cplsw_intr_handler(struct adapter *adapter)
1626{
Joe Perches005b5712010-12-14 21:36:53 +00001627 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001628 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1629 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1630 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1631 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1632 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1633 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1634 { 0 }
1635 };
1636
1637 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1638 t4_fatal_err(adapter);
1639}
1640
1641/*
1642 * LE interrupt handler.
1643 */
1644static void le_intr_handler(struct adapter *adap)
1645{
Joe Perches005b5712010-12-14 21:36:53 +00001646 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001647 { LIPMISS, "LE LIP miss", -1, 0 },
1648 { LIP0, "LE 0 LIP error", -1, 0 },
1649 { PARITYERR, "LE parity error", -1, 1 },
1650 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1651 { REQQPARERR, "LE request queue parity error", -1, 1 },
1652 { 0 }
1653 };
1654
1655 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1656 t4_fatal_err(adap);
1657}
1658
1659/*
1660 * MPS interrupt handler.
1661 */
1662static void mps_intr_handler(struct adapter *adapter)
1663{
Joe Perches005b5712010-12-14 21:36:53 +00001664 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001665 { 0xffffff, "MPS Rx parity error", -1, 1 },
1666 { 0 }
1667 };
Joe Perches005b5712010-12-14 21:36:53 +00001668 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001669 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1670 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1671 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1672 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1673 { BUBBLE, "MPS Tx underflow", -1, 1 },
1674 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1675 { FRMERR, "MPS Tx framing error", -1, 1 },
1676 { 0 }
1677 };
Joe Perches005b5712010-12-14 21:36:53 +00001678 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001679 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1680 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1681 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1682 { 0 }
1683 };
Joe Perches005b5712010-12-14 21:36:53 +00001684 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001685 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1686 { 0 }
1687 };
Joe Perches005b5712010-12-14 21:36:53 +00001688 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001689 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1690 { 0 }
1691 };
Joe Perches005b5712010-12-14 21:36:53 +00001692 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001693 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1694 { 0 }
1695 };
Joe Perches005b5712010-12-14 21:36:53 +00001696 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001697 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1698 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1699 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1700 { 0 }
1701 };
1702
1703 int fat;
1704
1705 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1706 mps_rx_intr_info) +
1707 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1708 mps_tx_intr_info) +
1709 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1710 mps_trc_intr_info) +
1711 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1712 mps_stat_sram_intr_info) +
1713 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1714 mps_stat_tx_intr_info) +
1715 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1716 mps_stat_rx_intr_info) +
1717 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1718 mps_cls_intr_info);
1719
1720 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1721 RXINT | TXINT | STATINT);
1722 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1723 if (fat)
1724 t4_fatal_err(adapter);
1725}
1726
1727#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1728
1729/*
1730 * EDC/MC interrupt handler.
1731 */
1732static void mem_intr_handler(struct adapter *adapter, int idx)
1733{
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301734 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001735
1736 unsigned int addr, cnt_addr, v;
1737
1738 if (idx <= MEM_EDC1) {
1739 addr = EDC_REG(EDC_INT_CAUSE, idx);
1740 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301741 } else if (idx == MEM_MC) {
1742 if (is_t4(adapter->params.chip)) {
1743 addr = MC_INT_CAUSE;
1744 cnt_addr = MC_ECC_STATUS;
1745 } else {
1746 addr = MC_P_INT_CAUSE;
1747 cnt_addr = MC_P_ECC_STATUS;
1748 }
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001749 } else {
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301750 addr = MC_REG(MC_P_INT_CAUSE, 1);
1751 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001752 }
1753
1754 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1755 if (v & PERR_INT_CAUSE)
1756 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1757 name[idx]);
1758 if (v & ECC_CE_INT_CAUSE) {
1759 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1760
1761 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1762 if (printk_ratelimit())
1763 dev_warn(adapter->pdev_dev,
1764 "%u %s correctable ECC data error%s\n",
1765 cnt, name[idx], cnt > 1 ? "s" : "");
1766 }
1767 if (v & ECC_UE_INT_CAUSE)
1768 dev_alert(adapter->pdev_dev,
1769 "%s uncorrectable ECC data error\n", name[idx]);
1770
1771 t4_write_reg(adapter, addr, v);
1772 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1773 t4_fatal_err(adapter);
1774}
1775
1776/*
1777 * MA interrupt handler.
1778 */
1779static void ma_intr_handler(struct adapter *adap)
1780{
1781 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1782
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301783 if (status & MEM_PERR_INT_CAUSE) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001784 dev_alert(adap->pdev_dev,
1785 "MA parity error, parity status %#x\n",
1786 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
Hariprasad Shenai9bb59b92014-09-01 19:54:57 +05301787 if (is_t5(adap->params.chip))
1788 dev_alert(adap->pdev_dev,
1789 "MA parity error, parity status %#x\n",
1790 t4_read_reg(adap,
1791 MA_PARITY_ERROR_STATUS2));
1792 }
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001793 if (status & MEM_WRAP_INT_CAUSE) {
1794 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1795 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1796 "client %u to address %#x\n",
1797 MEM_WRAP_CLIENT_NUM_GET(v),
1798 MEM_WRAP_ADDRESS_GET(v) << 4);
1799 }
1800 t4_write_reg(adap, MA_INT_CAUSE, status);
1801 t4_fatal_err(adap);
1802}
1803
1804/*
1805 * SMB interrupt handler.
1806 */
1807static void smb_intr_handler(struct adapter *adap)
1808{
Joe Perches005b5712010-12-14 21:36:53 +00001809 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001810 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1811 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1812 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1813 { 0 }
1814 };
1815
1816 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1817 t4_fatal_err(adap);
1818}
1819
1820/*
1821 * NC-SI interrupt handler.
1822 */
1823static void ncsi_intr_handler(struct adapter *adap)
1824{
Joe Perches005b5712010-12-14 21:36:53 +00001825 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001826 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1827 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1828 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1829 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1830 { 0 }
1831 };
1832
1833 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1834 t4_fatal_err(adap);
1835}
1836
1837/*
1838 * XGMAC interrupt handler.
1839 */
1840static void xgmac_intr_handler(struct adapter *adap, int port)
1841{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001842 u32 v, int_cause_reg;
1843
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301844 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001845 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1846 else
1847 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1848
1849 v = t4_read_reg(adap, int_cause_reg);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001850
1851 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1852 if (!v)
1853 return;
1854
1855 if (v & TXFIFO_PRTY_ERR)
1856 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1857 port);
1858 if (v & RXFIFO_PRTY_ERR)
1859 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1860 port);
1861 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1862 t4_fatal_err(adap);
1863}
1864
1865/*
1866 * PL interrupt handler.
1867 */
1868static void pl_intr_handler(struct adapter *adap)
1869{
Joe Perches005b5712010-12-14 21:36:53 +00001870 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001871 { FATALPERR, "T4 fatal parity error", -1, 1 },
1872 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1873 { 0 }
1874 };
1875
1876 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1877 t4_fatal_err(adap);
1878}
1879
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001880#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001881#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1882 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1883 CPL_SWITCH | SGE | ULP_TX)
1884
1885/**
1886 * t4_slow_intr_handler - control path interrupt handler
1887 * @adapter: the adapter
1888 *
1889 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1890 * The designation 'slow' is because it involves register reads, while
1891 * data interrupts typically don't involve any MMIOs.
1892 */
1893int t4_slow_intr_handler(struct adapter *adapter)
1894{
1895 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1896
1897 if (!(cause & GLBL_INTR_MASK))
1898 return 0;
1899 if (cause & CIM)
1900 cim_intr_handler(adapter);
1901 if (cause & MPS)
1902 mps_intr_handler(adapter);
1903 if (cause & NCSI)
1904 ncsi_intr_handler(adapter);
1905 if (cause & PL)
1906 pl_intr_handler(adapter);
1907 if (cause & SMB)
1908 smb_intr_handler(adapter);
1909 if (cause & XGMAC0)
1910 xgmac_intr_handler(adapter, 0);
1911 if (cause & XGMAC1)
1912 xgmac_intr_handler(adapter, 1);
1913 if (cause & XGMAC_KR0)
1914 xgmac_intr_handler(adapter, 2);
1915 if (cause & XGMAC_KR1)
1916 xgmac_intr_handler(adapter, 3);
1917 if (cause & PCIE)
1918 pcie_intr_handler(adapter);
1919 if (cause & MC)
1920 mem_intr_handler(adapter, MEM_MC);
Hariprasad Shenai822dd8a2014-07-21 20:55:12 +05301921 if (!is_t4(adapter->params.chip) && (cause & MC1))
1922 mem_intr_handler(adapter, MEM_MC1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001923 if (cause & EDC0)
1924 mem_intr_handler(adapter, MEM_EDC0);
1925 if (cause & EDC1)
1926 mem_intr_handler(adapter, MEM_EDC1);
1927 if (cause & LE)
1928 le_intr_handler(adapter);
1929 if (cause & TP)
1930 tp_intr_handler(adapter);
1931 if (cause & MA)
1932 ma_intr_handler(adapter);
1933 if (cause & PM_TX)
1934 pmtx_intr_handler(adapter);
1935 if (cause & PM_RX)
1936 pmrx_intr_handler(adapter);
1937 if (cause & ULP_RX)
1938 ulprx_intr_handler(adapter);
1939 if (cause & CPL_SWITCH)
1940 cplsw_intr_handler(adapter);
1941 if (cause & SGE)
1942 sge_intr_handler(adapter);
1943 if (cause & ULP_TX)
1944 ulptx_intr_handler(adapter);
1945
1946 /* Clear the interrupts just processed for which we are the master. */
1947 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1948 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1949 return 1;
1950}
1951
1952/**
1953 * t4_intr_enable - enable interrupts
1954 * @adapter: the adapter whose interrupts should be enabled
1955 *
1956 * Enable PF-specific interrupts for the calling function and the top-level
1957 * interrupt concentrator for global interrupts. Interrupts are already
1958 * enabled at each module, here we just enable the roots of the interrupt
1959 * hierarchies.
1960 *
1961 * Note: this function should be called only when the driver manages
1962 * non PF-specific interrupts from the various HW modules. Only one PCI
1963 * function at a time should be doing this.
1964 */
1965void t4_intr_enable(struct adapter *adapter)
1966{
1967 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1968
1969 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1970 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1971 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1972 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1973 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1974 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1975 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001976 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001977 EGRESS_SIZE_ERR);
1978 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1979 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1980}
1981
1982/**
1983 * t4_intr_disable - disable interrupts
1984 * @adapter: the adapter whose interrupts should be disabled
1985 *
1986 * Disable interrupts. We only disable the top-level interrupt
1987 * concentrators. The caller must be a PCI function managing global
1988 * interrupts.
1989 */
1990void t4_intr_disable(struct adapter *adapter)
1991{
1992 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1993
1994 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1995 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1996}
1997
1998/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001999 * hash_mac_addr - return the hash value of a MAC address
2000 * @addr: the 48-bit Ethernet MAC address
2001 *
2002 * Hashes a MAC address according to the hash function used by HW inexact
2003 * (hash) address matching.
2004 */
2005static int hash_mac_addr(const u8 *addr)
2006{
2007 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2008 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2009 a ^= b;
2010 a ^= (a >> 12);
2011 a ^= (a >> 6);
2012 return a & 0x3f;
2013}
2014
2015/**
2016 * t4_config_rss_range - configure a portion of the RSS mapping table
2017 * @adapter: the adapter
2018 * @mbox: mbox to use for the FW command
2019 * @viid: virtual interface whose RSS subtable is to be written
2020 * @start: start entry in the table to write
2021 * @n: how many table entries to write
2022 * @rspq: values for the response queue lookup table
2023 * @nrspq: number of values in @rspq
2024 *
2025 * Programs the selected part of the VI's RSS mapping table with the
2026 * provided values. If @nrspq < @n the supplied values are used repeatedly
2027 * until the full table range is populated.
2028 *
2029 * The caller must ensure the values in @rspq are in the range allowed for
2030 * @viid.
2031 */
2032int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2033 int start, int n, const u16 *rspq, unsigned int nrspq)
2034{
2035 int ret;
2036 const u16 *rsp = rspq;
2037 const u16 *rsp_end = rspq + nrspq;
2038 struct fw_rss_ind_tbl_cmd cmd;
2039
2040 memset(&cmd, 0, sizeof(cmd));
2041 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2042 FW_CMD_REQUEST | FW_CMD_WRITE |
2043 FW_RSS_IND_TBL_CMD_VIID(viid));
2044 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2045
2046 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2047 while (n > 0) {
2048 int nq = min(n, 32);
2049 __be32 *qp = &cmd.iq0_to_iq2;
2050
2051 cmd.niqid = htons(nq);
2052 cmd.startidx = htons(start);
2053
2054 start += nq;
2055 n -= nq;
2056
2057 while (nq > 0) {
2058 unsigned int v;
2059
2060 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2061 if (++rsp >= rsp_end)
2062 rsp = rspq;
2063 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2064 if (++rsp >= rsp_end)
2065 rsp = rspq;
2066 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2067 if (++rsp >= rsp_end)
2068 rsp = rspq;
2069
2070 *qp++ = htonl(v);
2071 nq -= 3;
2072 }
2073
2074 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2075 if (ret)
2076 return ret;
2077 }
2078 return 0;
2079}
2080
2081/**
2082 * t4_config_glbl_rss - configure the global RSS mode
2083 * @adapter: the adapter
2084 * @mbox: mbox to use for the FW command
2085 * @mode: global RSS mode
2086 * @flags: mode-specific flags
2087 *
2088 * Sets the global RSS mode.
2089 */
2090int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2091 unsigned int flags)
2092{
2093 struct fw_rss_glb_config_cmd c;
2094
2095 memset(&c, 0, sizeof(c));
2096 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2097 FW_CMD_REQUEST | FW_CMD_WRITE);
2098 c.retval_len16 = htonl(FW_LEN16(c));
2099 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2100 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2101 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2102 c.u.basicvirtual.mode_pkd =
2103 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2104 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2105 } else
2106 return -EINVAL;
2107 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2108}
2109
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002110/**
2111 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2112 * @adap: the adapter
2113 * @v4: holds the TCP/IP counter values
2114 * @v6: holds the TCP/IPv6 counter values
2115 *
2116 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2117 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2118 */
2119void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2120 struct tp_tcp_stats *v6)
2121{
2122 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2123
2124#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2125#define STAT(x) val[STAT_IDX(x)]
2126#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2127
2128 if (v4) {
2129 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2130 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2131 v4->tcpOutRsts = STAT(OUT_RST);
2132 v4->tcpInSegs = STAT64(IN_SEG);
2133 v4->tcpOutSegs = STAT64(OUT_SEG);
2134 v4->tcpRetransSegs = STAT64(RXT_SEG);
2135 }
2136 if (v6) {
2137 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2138 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2139 v6->tcpOutRsts = STAT(OUT_RST);
2140 v6->tcpInSegs = STAT64(IN_SEG);
2141 v6->tcpOutSegs = STAT64(OUT_SEG);
2142 v6->tcpRetransSegs = STAT64(RXT_SEG);
2143 }
2144#undef STAT64
2145#undef STAT
2146#undef STAT_IDX
2147}
2148
2149/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002150 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2151 * @adap: the adapter
2152 * @mtus: where to store the MTU values
2153 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2154 *
2155 * Reads the HW path MTU table.
2156 */
2157void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2158{
2159 u32 v;
2160 int i;
2161
2162 for (i = 0; i < NMTUS; ++i) {
2163 t4_write_reg(adap, TP_MTU_TABLE,
2164 MTUINDEX(0xff) | MTUVALUE(i));
2165 v = t4_read_reg(adap, TP_MTU_TABLE);
2166 mtus[i] = MTUVALUE_GET(v);
2167 if (mtu_log)
2168 mtu_log[i] = MTUWIDTH_GET(v);
2169 }
2170}
2171
2172/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002173 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2174 * @adap: the adapter
2175 * @addr: the indirect TP register address
2176 * @mask: specifies the field within the register to modify
2177 * @val: new value for the field
2178 *
2179 * Sets a field of an indirect TP register to the given value.
2180 */
2181void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2182 unsigned int mask, unsigned int val)
2183{
2184 t4_write_reg(adap, TP_PIO_ADDR, addr);
2185 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2186 t4_write_reg(adap, TP_PIO_DATA, val);
2187}
2188
2189/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002190 * init_cong_ctrl - initialize congestion control parameters
2191 * @a: the alpha values for congestion control
2192 * @b: the beta values for congestion control
2193 *
2194 * Initialize the congestion control parameters.
2195 */
Bill Pemberton91744942012-12-03 09:23:02 -05002196static void init_cong_ctrl(unsigned short *a, unsigned short *b)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002197{
2198 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2199 a[9] = 2;
2200 a[10] = 3;
2201 a[11] = 4;
2202 a[12] = 5;
2203 a[13] = 6;
2204 a[14] = 7;
2205 a[15] = 8;
2206 a[16] = 9;
2207 a[17] = 10;
2208 a[18] = 14;
2209 a[19] = 17;
2210 a[20] = 21;
2211 a[21] = 25;
2212 a[22] = 30;
2213 a[23] = 35;
2214 a[24] = 45;
2215 a[25] = 60;
2216 a[26] = 80;
2217 a[27] = 100;
2218 a[28] = 200;
2219 a[29] = 300;
2220 a[30] = 400;
2221 a[31] = 500;
2222
2223 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2224 b[9] = b[10] = 1;
2225 b[11] = b[12] = 2;
2226 b[13] = b[14] = b[15] = b[16] = 3;
2227 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2228 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2229 b[28] = b[29] = 6;
2230 b[30] = b[31] = 7;
2231}
2232
2233/* The minimum additive increment value for the congestion control table */
2234#define CC_MIN_INCR 2U
2235
2236/**
2237 * t4_load_mtus - write the MTU and congestion control HW tables
2238 * @adap: the adapter
2239 * @mtus: the values for the MTU table
2240 * @alpha: the values for the congestion control alpha parameter
2241 * @beta: the values for the congestion control beta parameter
2242 *
2243 * Write the HW MTU table with the supplied MTUs and the high-speed
2244 * congestion control table with the supplied alpha, beta, and MTUs.
2245 * We write the two tables together because the additive increments
2246 * depend on the MTUs.
2247 */
2248void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2249 const unsigned short *alpha, const unsigned short *beta)
2250{
2251 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2252 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2253 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2254 28672, 40960, 57344, 81920, 114688, 163840, 229376
2255 };
2256
2257 unsigned int i, w;
2258
2259 for (i = 0; i < NMTUS; ++i) {
2260 unsigned int mtu = mtus[i];
2261 unsigned int log2 = fls(mtu);
2262
2263 if (!(mtu & ((1 << log2) >> 2))) /* round */
2264 log2--;
2265 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2266 MTUWIDTH(log2) | MTUVALUE(mtu));
2267
2268 for (w = 0; w < NCCTRL_WIN; ++w) {
2269 unsigned int inc;
2270
2271 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2272 CC_MIN_INCR);
2273
2274 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2275 (w << 16) | (beta[w] << 13) | inc);
2276 }
2277 }
2278}
2279
2280/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002281 * get_mps_bg_map - return the buffer groups associated with a port
2282 * @adap: the adapter
2283 * @idx: the port index
2284 *
2285 * Returns a bitmap indicating which MPS buffer groups are associated
2286 * with the given port. Bit i is set if buffer group i is used by the
2287 * port.
2288 */
2289static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2290{
2291 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2292
2293 if (n == 0)
2294 return idx == 0 ? 0xf : 0;
2295 if (n == 1)
2296 return idx < 2 ? (3 << (2 * idx)) : 0;
2297 return 1 << idx;
2298}
2299
2300/**
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302301 * t4_get_port_type_description - return Port Type string description
2302 * @port_type: firmware Port Type enumeration
2303 */
2304const char *t4_get_port_type_description(enum fw_port_type port_type)
2305{
2306 static const char *const port_type_description[] = {
2307 "R XFI",
2308 "R XAUI",
2309 "T SGMII",
2310 "T XFI",
2311 "T XAUI",
2312 "KX4",
2313 "CX4",
2314 "KX",
2315 "KR",
2316 "R SFP+",
2317 "KR/KX",
2318 "KR/KX/KX4",
2319 "R QSFP_10G",
2320 "",
2321 "R QSFP",
2322 "R BP40_BA",
2323 };
2324
2325 if (port_type < ARRAY_SIZE(port_type_description))
2326 return port_type_description[port_type];
2327 return "UNKNOWN";
2328}
2329
2330/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002331 * t4_get_port_stats - collect port statistics
2332 * @adap: the adapter
2333 * @idx: the port index
2334 * @p: the stats structure to fill
2335 *
2336 * Collect statistics related to the given port from HW.
2337 */
2338void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2339{
2340 u32 bgmap = get_mps_bg_map(adap, idx);
2341
2342#define GET_STAT(name) \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002343 t4_read_reg64(adap, \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302344 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002345 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002346#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2347
2348 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2349 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2350 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2351 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2352 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2353 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2354 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2355 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2356 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2357 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2358 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2359 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2360 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2361 p->tx_drop = GET_STAT(TX_PORT_DROP);
2362 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2363 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2364 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2365 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2366 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2367 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2368 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2369 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2370 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2371
2372 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2373 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2374 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2375 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2376 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2377 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2378 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2379 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2380 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2381 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2382 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2383 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2384 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2385 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2386 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2387 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2388 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2389 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2390 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2391 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2392 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2393 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2394 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2395 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2396 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2397 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2398 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2399
2400 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2401 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2402 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2403 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2404 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2405 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2406 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2407 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2408
2409#undef GET_STAT
2410#undef GET_STAT_COM
2411}
2412
2413/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002414 * t4_wol_magic_enable - enable/disable magic packet WoL
2415 * @adap: the adapter
2416 * @port: the physical port index
2417 * @addr: MAC address expected in magic packets, %NULL to disable
2418 *
2419 * Enables/disables magic packet wake-on-LAN for the selected port.
2420 */
2421void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2422 const u8 *addr)
2423{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002424 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2425
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302426 if (is_t4(adap->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002427 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2428 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2429 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2430 } else {
2431 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2432 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2433 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2434 }
2435
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002436 if (addr) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002437 t4_write_reg(adap, mag_id_reg_l,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002438 (addr[2] << 24) | (addr[3] << 16) |
2439 (addr[4] << 8) | addr[5]);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002440 t4_write_reg(adap, mag_id_reg_h,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002441 (addr[0] << 8) | addr[1]);
2442 }
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002443 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002444 addr ? MAGICEN : 0);
2445}
2446
2447/**
2448 * t4_wol_pat_enable - enable/disable pattern-based WoL
2449 * @adap: the adapter
2450 * @port: the physical port index
2451 * @map: bitmap of which HW pattern filters to set
2452 * @mask0: byte mask for bytes 0-63 of a packet
2453 * @mask1: byte mask for bytes 64-127 of a packet
2454 * @crc: Ethernet CRC for selected bytes
2455 * @enable: enable/disable switch
2456 *
2457 * Sets the pattern filters indicated in @map to mask out the bytes
2458 * specified in @mask0/@mask1 in received packets and compare the CRC of
2459 * the resulting packet against @crc. If @enable is %true pattern-based
2460 * WoL is enabled, otherwise disabled.
2461 */
2462int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2463 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2464{
2465 int i;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002466 u32 port_cfg_reg;
2467
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302468 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002469 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2470 else
2471 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002472
2473 if (!enable) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002474 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002475 return 0;
2476 }
2477 if (map > 0xff)
2478 return -EINVAL;
2479
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002480#define EPIO_REG(name) \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302481 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002482 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002483
2484 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2485 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2486 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2487
2488 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2489 if (!(map & 1))
2490 continue;
2491
2492 /* write byte masks */
2493 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2494 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2495 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302496 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002497 return -ETIMEDOUT;
2498
2499 /* write CRC */
2500 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2501 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2502 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302503 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002504 return -ETIMEDOUT;
2505 }
2506#undef EPIO_REG
2507
2508 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2509 return 0;
2510}
2511
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002512/* t4_mk_filtdelwr - create a delete filter WR
2513 * @ftid: the filter ID
2514 * @wr: the filter work request to populate
2515 * @qid: ingress queue to receive the delete notification
2516 *
2517 * Creates a filter work request to delete the supplied filter. If @qid is
2518 * negative the delete notification is suppressed.
2519 */
2520void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2521{
2522 memset(wr, 0, sizeof(*wr));
2523 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2524 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2525 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2526 V_FW_FILTER_WR_NOREPLY(qid < 0));
2527 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2528 if (qid >= 0)
2529 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2530}
2531
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002532#define INIT_CMD(var, cmd, rd_wr) do { \
2533 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2534 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2535 (var).retval_len16 = htonl(FW_LEN16(var)); \
2536} while (0)
2537
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302538int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2539 u32 addr, u32 val)
2540{
2541 struct fw_ldst_cmd c;
2542
2543 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002544 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2545 FW_CMD_WRITE |
2546 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302547 c.cycles_to_len16 = htonl(FW_LEN16(c));
2548 c.u.addrval.addr = htonl(addr);
2549 c.u.addrval.val = htonl(val);
2550
2551 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2552}
2553
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002554/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002555 * t4_mdio_rd - read a PHY register through MDIO
2556 * @adap: the adapter
2557 * @mbox: mailbox to use for the FW command
2558 * @phy_addr: the PHY address
2559 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2560 * @reg: the register to read
2561 * @valp: where to store the value
2562 *
2563 * Issues a FW command through the given mailbox to read a PHY register.
2564 */
2565int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2566 unsigned int mmd, unsigned int reg, u16 *valp)
2567{
2568 int ret;
2569 struct fw_ldst_cmd c;
2570
2571 memset(&c, 0, sizeof(c));
2572 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2573 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2574 c.cycles_to_len16 = htonl(FW_LEN16(c));
2575 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2576 FW_LDST_CMD_MMD(mmd));
2577 c.u.mdio.raddr = htons(reg);
2578
2579 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2580 if (ret == 0)
2581 *valp = ntohs(c.u.mdio.rval);
2582 return ret;
2583}
2584
2585/**
2586 * t4_mdio_wr - write a PHY register through MDIO
2587 * @adap: the adapter
2588 * @mbox: mailbox to use for the FW command
2589 * @phy_addr: the PHY address
2590 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2591 * @reg: the register to write
2592 * @valp: value to write
2593 *
2594 * Issues a FW command through the given mailbox to write a PHY register.
2595 */
2596int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2597 unsigned int mmd, unsigned int reg, u16 val)
2598{
2599 struct fw_ldst_cmd c;
2600
2601 memset(&c, 0, sizeof(c));
2602 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2603 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2604 c.cycles_to_len16 = htonl(FW_LEN16(c));
2605 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2606 FW_LDST_CMD_MMD(mmd));
2607 c.u.mdio.raddr = htons(reg);
2608 c.u.mdio.rval = htons(val);
2609
2610 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2611}
2612
2613/**
Kumar Sanghvi68bce1922014-03-13 20:50:47 +05302614 * t4_sge_decode_idma_state - decode the idma state
2615 * @adap: the adapter
2616 * @state: the state idma is stuck in
2617 */
2618void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2619{
2620 static const char * const t4_decode[] = {
2621 "IDMA_IDLE",
2622 "IDMA_PUSH_MORE_CPL_FIFO",
2623 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2624 "Not used",
2625 "IDMA_PHYSADDR_SEND_PCIEHDR",
2626 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2627 "IDMA_PHYSADDR_SEND_PAYLOAD",
2628 "IDMA_SEND_FIFO_TO_IMSG",
2629 "IDMA_FL_REQ_DATA_FL_PREP",
2630 "IDMA_FL_REQ_DATA_FL",
2631 "IDMA_FL_DROP",
2632 "IDMA_FL_H_REQ_HEADER_FL",
2633 "IDMA_FL_H_SEND_PCIEHDR",
2634 "IDMA_FL_H_PUSH_CPL_FIFO",
2635 "IDMA_FL_H_SEND_CPL",
2636 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2637 "IDMA_FL_H_SEND_IP_HDR",
2638 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2639 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2640 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2641 "IDMA_FL_D_SEND_PCIEHDR",
2642 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2643 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2644 "IDMA_FL_SEND_PCIEHDR",
2645 "IDMA_FL_PUSH_CPL_FIFO",
2646 "IDMA_FL_SEND_CPL",
2647 "IDMA_FL_SEND_PAYLOAD_FIRST",
2648 "IDMA_FL_SEND_PAYLOAD",
2649 "IDMA_FL_REQ_NEXT_DATA_FL",
2650 "IDMA_FL_SEND_NEXT_PCIEHDR",
2651 "IDMA_FL_SEND_PADDING",
2652 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2653 "IDMA_FL_SEND_FIFO_TO_IMSG",
2654 "IDMA_FL_REQ_DATAFL_DONE",
2655 "IDMA_FL_REQ_HEADERFL_DONE",
2656 };
2657 static const char * const t5_decode[] = {
2658 "IDMA_IDLE",
2659 "IDMA_ALMOST_IDLE",
2660 "IDMA_PUSH_MORE_CPL_FIFO",
2661 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2662 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2663 "IDMA_PHYSADDR_SEND_PCIEHDR",
2664 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2665 "IDMA_PHYSADDR_SEND_PAYLOAD",
2666 "IDMA_SEND_FIFO_TO_IMSG",
2667 "IDMA_FL_REQ_DATA_FL",
2668 "IDMA_FL_DROP",
2669 "IDMA_FL_DROP_SEND_INC",
2670 "IDMA_FL_H_REQ_HEADER_FL",
2671 "IDMA_FL_H_SEND_PCIEHDR",
2672 "IDMA_FL_H_PUSH_CPL_FIFO",
2673 "IDMA_FL_H_SEND_CPL",
2674 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2675 "IDMA_FL_H_SEND_IP_HDR",
2676 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2677 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2678 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2679 "IDMA_FL_D_SEND_PCIEHDR",
2680 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2681 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2682 "IDMA_FL_SEND_PCIEHDR",
2683 "IDMA_FL_PUSH_CPL_FIFO",
2684 "IDMA_FL_SEND_CPL",
2685 "IDMA_FL_SEND_PAYLOAD_FIRST",
2686 "IDMA_FL_SEND_PAYLOAD",
2687 "IDMA_FL_REQ_NEXT_DATA_FL",
2688 "IDMA_FL_SEND_NEXT_PCIEHDR",
2689 "IDMA_FL_SEND_PADDING",
2690 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2691 };
2692 static const u32 sge_regs[] = {
2693 SGE_DEBUG_DATA_LOW_INDEX_2,
2694 SGE_DEBUG_DATA_LOW_INDEX_3,
2695 SGE_DEBUG_DATA_HIGH_INDEX_10,
2696 };
2697 const char **sge_idma_decode;
2698 int sge_idma_decode_nstates;
2699 int i;
2700
2701 if (is_t4(adapter->params.chip)) {
2702 sge_idma_decode = (const char **)t4_decode;
2703 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2704 } else {
2705 sge_idma_decode = (const char **)t5_decode;
2706 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2707 }
2708
2709 if (state < sge_idma_decode_nstates)
2710 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2711 else
2712 CH_WARN(adapter, "idma state %d unknown\n", state);
2713
2714 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2715 CH_WARN(adapter, "SGE register %#x value %#x\n",
2716 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2717}
2718
2719/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002720 * t4_fw_hello - establish communication with FW
2721 * @adap: the adapter
2722 * @mbox: mailbox to use for the FW command
2723 * @evt_mbox: mailbox to receive async FW events
2724 * @master: specifies the caller's willingness to be the device master
2725 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002726 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002727 * Issues a command to establish communication with FW. Returns either
2728 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002729 */
2730int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2731 enum dev_master master, enum dev_state *state)
2732{
2733 int ret;
2734 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002735 u32 v;
2736 unsigned int master_mbox;
2737 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002738
Vipul Pandya636f9d32012-09-26 02:39:39 +00002739retry:
2740 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002741 INIT_CMD(c, HELLO, WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302742 c.err_to_clearinit = htonl(
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002743 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2744 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002745 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2746 FW_HELLO_CMD_MBMASTER_MASK) |
2747 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2748 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2749 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002750
Vipul Pandya636f9d32012-09-26 02:39:39 +00002751 /*
2752 * Issue the HELLO command to the firmware. If it's not successful
2753 * but indicates that we got a "busy" or "timeout" condition, retry
2754 * the HELLO until we exhaust our retry limit.
2755 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002756 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002757 if (ret < 0) {
2758 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2759 goto retry;
2760 return ret;
2761 }
2762
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302763 v = ntohl(c.err_to_clearinit);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002764 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2765 if (state) {
2766 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002767 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002768 else if (v & FW_HELLO_CMD_INIT)
2769 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002770 else
2771 *state = DEV_STATE_UNINIT;
2772 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002773
2774 /*
2775 * If we're not the Master PF then we need to wait around for the
2776 * Master PF Driver to finish setting up the adapter.
2777 *
2778 * Note that we also do this wait if we're a non-Master-capable PF and
2779 * there is no current Master PF; a Master PF may show up momentarily
2780 * and we wouldn't want to fail pointlessly. (This can happen when an
2781 * OS loads lots of different drivers rapidly at the same time). In
2782 * this case, the Master PF returned by the firmware will be
2783 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2784 */
2785 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2786 master_mbox != mbox) {
2787 int waiting = FW_CMD_HELLO_TIMEOUT;
2788
2789 /*
2790 * Wait for the firmware to either indicate an error or
2791 * initialized state. If we see either of these we bail out
2792 * and report the issue to the caller. If we exhaust the
2793 * "hello timeout" and we haven't exhausted our retries, try
2794 * again. Otherwise bail with a timeout error.
2795 */
2796 for (;;) {
2797 u32 pcie_fw;
2798
2799 msleep(50);
2800 waiting -= 50;
2801
2802 /*
2803 * If neither Error nor Initialialized are indicated
2804 * by the firmware keep waiting till we exaust our
2805 * timeout ... and then retry if we haven't exhausted
2806 * our retries ...
2807 */
2808 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2809 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2810 if (waiting <= 0) {
2811 if (retries-- > 0)
2812 goto retry;
2813
2814 return -ETIMEDOUT;
2815 }
2816 continue;
2817 }
2818
2819 /*
2820 * We either have an Error or Initialized condition
2821 * report errors preferentially.
2822 */
2823 if (state) {
2824 if (pcie_fw & FW_PCIE_FW_ERR)
2825 *state = DEV_STATE_ERR;
2826 else if (pcie_fw & FW_PCIE_FW_INIT)
2827 *state = DEV_STATE_INIT;
2828 }
2829
2830 /*
2831 * If we arrived before a Master PF was selected and
2832 * there's not a valid Master PF, grab its identity
2833 * for our caller.
2834 */
2835 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2836 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2837 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2838 break;
2839 }
2840 }
2841
2842 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002843}
2844
2845/**
2846 * t4_fw_bye - end communication with FW
2847 * @adap: the adapter
2848 * @mbox: mailbox to use for the FW command
2849 *
2850 * Issues a command to terminate communication with FW.
2851 */
2852int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2853{
2854 struct fw_bye_cmd c;
2855
Vipul Pandya0062b152012-11-06 03:37:09 +00002856 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002857 INIT_CMD(c, BYE, WRITE);
2858 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2859}
2860
2861/**
2862 * t4_init_cmd - ask FW to initialize the device
2863 * @adap: the adapter
2864 * @mbox: mailbox to use for the FW command
2865 *
2866 * Issues a command to FW to partially initialize the device. This
2867 * performs initialization that generally doesn't depend on user input.
2868 */
2869int t4_early_init(struct adapter *adap, unsigned int mbox)
2870{
2871 struct fw_initialize_cmd c;
2872
Vipul Pandya0062b152012-11-06 03:37:09 +00002873 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002874 INIT_CMD(c, INITIALIZE, WRITE);
2875 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2876}
2877
2878/**
2879 * t4_fw_reset - issue a reset to FW
2880 * @adap: the adapter
2881 * @mbox: mailbox to use for the FW command
2882 * @reset: specifies the type of reset to perform
2883 *
2884 * Issues a reset command of the specified type to FW.
2885 */
2886int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2887{
2888 struct fw_reset_cmd c;
2889
Vipul Pandya0062b152012-11-06 03:37:09 +00002890 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002891 INIT_CMD(c, RESET, WRITE);
2892 c.val = htonl(reset);
2893 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2894}
2895
2896/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002897 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2898 * @adap: the adapter
2899 * @mbox: mailbox to use for the FW RESET command (if desired)
2900 * @force: force uP into RESET even if FW RESET command fails
2901 *
2902 * Issues a RESET command to firmware (if desired) with a HALT indication
2903 * and then puts the microprocessor into RESET state. The RESET command
2904 * will only be issued if a legitimate mailbox is provided (mbox <=
2905 * FW_PCIE_FW_MASTER_MASK).
2906 *
2907 * This is generally used in order for the host to safely manipulate the
2908 * adapter without fear of conflicting with whatever the firmware might
2909 * be doing. The only way out of this state is to RESTART the firmware
2910 * ...
2911 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002912static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002913{
2914 int ret = 0;
2915
2916 /*
2917 * If a legitimate mailbox is provided, issue a RESET command
2918 * with a HALT indication.
2919 */
2920 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2921 struct fw_reset_cmd c;
2922
2923 memset(&c, 0, sizeof(c));
2924 INIT_CMD(c, RESET, WRITE);
2925 c.val = htonl(PIORST | PIORSTMODE);
2926 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2927 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2928 }
2929
2930 /*
2931 * Normally we won't complete the operation if the firmware RESET
2932 * command fails but if our caller insists we'll go ahead and put the
2933 * uP into RESET. This can be useful if the firmware is hung or even
2934 * missing ... We'll have to take the risk of putting the uP into
2935 * RESET without the cooperation of firmware in that case.
2936 *
2937 * We also force the firmware's HALT flag to be on in case we bypassed
2938 * the firmware RESET command above or we're dealing with old firmware
2939 * which doesn't have the HALT capability. This will serve as a flag
2940 * for the incoming firmware to know that it's coming out of a HALT
2941 * rather than a RESET ... if it's new enough to understand that ...
2942 */
2943 if (ret == 0 || force) {
2944 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2945 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2946 FW_PCIE_FW_HALT);
2947 }
2948
2949 /*
2950 * And we always return the result of the firmware RESET command
2951 * even when we force the uP into RESET ...
2952 */
2953 return ret;
2954}
2955
2956/**
2957 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2958 * @adap: the adapter
2959 * @reset: if we want to do a RESET to restart things
2960 *
2961 * Restart firmware previously halted by t4_fw_halt(). On successful
2962 * return the previous PF Master remains as the new PF Master and there
2963 * is no need to issue a new HELLO command, etc.
2964 *
2965 * We do this in two ways:
2966 *
2967 * 1. If we're dealing with newer firmware we'll simply want to take
2968 * the chip's microprocessor out of RESET. This will cause the
2969 * firmware to start up from its start vector. And then we'll loop
2970 * until the firmware indicates it's started again (PCIE_FW.HALT
2971 * reset to 0) or we timeout.
2972 *
2973 * 2. If we're dealing with older firmware then we'll need to RESET
2974 * the chip since older firmware won't recognize the PCIE_FW.HALT
2975 * flag and automatically RESET itself on startup.
2976 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002977static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002978{
2979 if (reset) {
2980 /*
2981 * Since we're directing the RESET instead of the firmware
2982 * doing it automatically, we need to clear the PCIE_FW.HALT
2983 * bit.
2984 */
2985 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2986
2987 /*
2988 * If we've been given a valid mailbox, first try to get the
2989 * firmware to do the RESET. If that works, great and we can
2990 * return success. Otherwise, if we haven't been given a
2991 * valid mailbox or the RESET command failed, fall back to
2992 * hitting the chip with a hammer.
2993 */
2994 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2995 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2996 msleep(100);
2997 if (t4_fw_reset(adap, mbox,
2998 PIORST | PIORSTMODE) == 0)
2999 return 0;
3000 }
3001
3002 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3003 msleep(2000);
3004 } else {
3005 int ms;
3006
3007 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3008 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3009 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3010 return 0;
3011 msleep(100);
3012 ms += 100;
3013 }
3014 return -ETIMEDOUT;
3015 }
3016 return 0;
3017}
3018
3019/**
3020 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3021 * @adap: the adapter
3022 * @mbox: mailbox to use for the FW RESET command (if desired)
3023 * @fw_data: the firmware image to write
3024 * @size: image size
3025 * @force: force upgrade even if firmware doesn't cooperate
3026 *
3027 * Perform all of the steps necessary for upgrading an adapter's
3028 * firmware image. Normally this requires the cooperation of the
3029 * existing firmware in order to halt all existing activities
3030 * but if an invalid mailbox token is passed in we skip that step
3031 * (though we'll still put the adapter microprocessor into RESET in
3032 * that case).
3033 *
3034 * On successful return the new firmware will have been loaded and
3035 * the adapter will have been fully RESET losing all previous setup
3036 * state. On unsuccessful return the adapter may be completely hosed ...
3037 * positive errno indicates that the adapter is ~probably~ intact, a
3038 * negative errno indicates that things are looking bad ...
3039 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08003040static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3041 const u8 *fw_data, unsigned int size, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00003042{
3043 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3044 int reset, ret;
3045
3046 ret = t4_fw_halt(adap, mbox, force);
3047 if (ret < 0 && !force)
3048 return ret;
3049
3050 ret = t4_load_fw(adap, fw_data, size);
3051 if (ret < 0)
3052 return ret;
3053
3054 /*
3055 * Older versions of the firmware don't understand the new
3056 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3057 * restart. So for newly loaded older firmware we'll have to do the
3058 * RESET for it so it starts up on a clean slate. We can tell if
3059 * the newly loaded firmware will handle this right by checking
3060 * its header flags to see if it advertises the capability.
3061 */
3062 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3063 return t4_fw_restart(adap, mbox, reset);
3064}
3065
Vipul Pandya636f9d32012-09-26 02:39:39 +00003066/**
3067 * t4_fixup_host_params - fix up host-dependent parameters
3068 * @adap: the adapter
3069 * @page_size: the host's Base Page Size
3070 * @cache_line_size: the host's Cache Line Size
3071 *
3072 * Various registers in T4 contain values which are dependent on the
3073 * host's Base Page and Cache Line Sizes. This function will fix all of
3074 * those registers with the appropriate values as passed in ...
3075 */
3076int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3077 unsigned int cache_line_size)
3078{
3079 unsigned int page_shift = fls(page_size) - 1;
3080 unsigned int sge_hps = page_shift - 10;
3081 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3082 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3083 unsigned int fl_align_log = fls(fl_align) - 1;
3084
3085 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3086 HOSTPAGESIZEPF0(sge_hps) |
3087 HOSTPAGESIZEPF1(sge_hps) |
3088 HOSTPAGESIZEPF2(sge_hps) |
3089 HOSTPAGESIZEPF3(sge_hps) |
3090 HOSTPAGESIZEPF4(sge_hps) |
3091 HOSTPAGESIZEPF5(sge_hps) |
3092 HOSTPAGESIZEPF6(sge_hps) |
3093 HOSTPAGESIZEPF7(sge_hps));
3094
3095 t4_set_reg_field(adap, SGE_CONTROL,
Vipul Pandya0dad9e92012-11-07 03:45:46 +00003096 INGPADBOUNDARY_MASK |
Vipul Pandya636f9d32012-09-26 02:39:39 +00003097 EGRSTATUSPAGESIZE_MASK,
3098 INGPADBOUNDARY(fl_align_log - 5) |
3099 EGRSTATUSPAGESIZE(stat_len != 64));
3100
3101 /*
3102 * Adjust various SGE Free List Host Buffer Sizes.
3103 *
3104 * This is something of a crock since we're using fixed indices into
3105 * the array which are also known by the sge.c code and the T4
3106 * Firmware Configuration File. We need to come up with a much better
3107 * approach to managing this array. For now, the first four entries
3108 * are:
3109 *
3110 * 0: Host Page Size
3111 * 1: 64KB
3112 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3113 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3114 *
3115 * For the single-MTU buffers in unpacked mode we need to include
3116 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3117 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3118 * Padding boundry. All of these are accommodated in the Factory
3119 * Default Firmware Configuration File but we need to adjust it for
3120 * this host's cache line size.
3121 */
3122 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3123 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3124 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3125 & ~(fl_align-1));
3126 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3127 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3128 & ~(fl_align-1));
3129
3130 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3131
3132 return 0;
3133}
3134
3135/**
3136 * t4_fw_initialize - ask FW to initialize the device
3137 * @adap: the adapter
3138 * @mbox: mailbox to use for the FW command
3139 *
3140 * Issues a command to FW to partially initialize the device. This
3141 * performs initialization that generally doesn't depend on user input.
3142 */
3143int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3144{
3145 struct fw_initialize_cmd c;
3146
3147 memset(&c, 0, sizeof(c));
3148 INIT_CMD(c, INITIALIZE, WRITE);
3149 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3150}
3151
3152/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003153 * t4_query_params - query FW or device parameters
3154 * @adap: the adapter
3155 * @mbox: mailbox to use for the FW command
3156 * @pf: the PF
3157 * @vf: the VF
3158 * @nparams: the number of parameters
3159 * @params: the parameter names
3160 * @val: the parameter values
3161 *
3162 * Reads the value of FW or device parameters. Up to 7 parameters can be
3163 * queried at once.
3164 */
3165int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3166 unsigned int vf, unsigned int nparams, const u32 *params,
3167 u32 *val)
3168{
3169 int i, ret;
3170 struct fw_params_cmd c;
3171 __be32 *p = &c.param[0].mnem;
3172
3173 if (nparams > 7)
3174 return -EINVAL;
3175
3176 memset(&c, 0, sizeof(c));
3177 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3178 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3179 FW_PARAMS_CMD_VFN(vf));
3180 c.retval_len16 = htonl(FW_LEN16(c));
3181 for (i = 0; i < nparams; i++, p += 2)
3182 *p = htonl(*params++);
3183
3184 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3185 if (ret == 0)
3186 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3187 *val++ = ntohl(*p);
3188 return ret;
3189}
3190
3191/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003192 * t4_set_params_nosleep - sets FW or device parameters
3193 * @adap: the adapter
3194 * @mbox: mailbox to use for the FW command
3195 * @pf: the PF
3196 * @vf: the VF
3197 * @nparams: the number of parameters
3198 * @params: the parameter names
3199 * @val: the parameter values
3200 *
3201 * Does not ever sleep
3202 * Sets the value of FW or device parameters. Up to 7 parameters can be
3203 * specified at once.
3204 */
3205int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3206 unsigned int pf, unsigned int vf,
3207 unsigned int nparams, const u32 *params,
3208 const u32 *val)
3209{
3210 struct fw_params_cmd c;
3211 __be32 *p = &c.param[0].mnem;
3212
3213 if (nparams > 7)
3214 return -EINVAL;
3215
3216 memset(&c, 0, sizeof(c));
3217 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3218 FW_CMD_REQUEST | FW_CMD_WRITE |
3219 FW_PARAMS_CMD_PFN(pf) |
3220 FW_PARAMS_CMD_VFN(vf));
3221 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3222
3223 while (nparams--) {
3224 *p++ = cpu_to_be32(*params++);
3225 *p++ = cpu_to_be32(*val++);
3226 }
3227
3228 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3229}
3230
3231/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003232 * t4_set_params - sets FW or device parameters
3233 * @adap: the adapter
3234 * @mbox: mailbox to use for the FW command
3235 * @pf: the PF
3236 * @vf: the VF
3237 * @nparams: the number of parameters
3238 * @params: the parameter names
3239 * @val: the parameter values
3240 *
3241 * Sets the value of FW or device parameters. Up to 7 parameters can be
3242 * specified at once.
3243 */
3244int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3245 unsigned int vf, unsigned int nparams, const u32 *params,
3246 const u32 *val)
3247{
3248 struct fw_params_cmd c;
3249 __be32 *p = &c.param[0].mnem;
3250
3251 if (nparams > 7)
3252 return -EINVAL;
3253
3254 memset(&c, 0, sizeof(c));
3255 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3256 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3257 FW_PARAMS_CMD_VFN(vf));
3258 c.retval_len16 = htonl(FW_LEN16(c));
3259 while (nparams--) {
3260 *p++ = htonl(*params++);
3261 *p++ = htonl(*val++);
3262 }
3263
3264 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3265}
3266
3267/**
3268 * t4_cfg_pfvf - configure PF/VF resource limits
3269 * @adap: the adapter
3270 * @mbox: mailbox to use for the FW command
3271 * @pf: the PF being configured
3272 * @vf: the VF being configured
3273 * @txq: the max number of egress queues
3274 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3275 * @rxqi: the max number of interrupt-capable ingress queues
3276 * @rxq: the max number of interruptless ingress queues
3277 * @tc: the PCI traffic class
3278 * @vi: the max number of virtual interfaces
3279 * @cmask: the channel access rights mask for the PF/VF
3280 * @pmask: the port access rights mask for the PF/VF
3281 * @nexact: the maximum number of exact MPS filters
3282 * @rcaps: read capabilities
3283 * @wxcaps: write/execute capabilities
3284 *
3285 * Configures resource limits and capabilities for a physical or virtual
3286 * function.
3287 */
3288int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3289 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3290 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3291 unsigned int vi, unsigned int cmask, unsigned int pmask,
3292 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3293{
3294 struct fw_pfvf_cmd c;
3295
3296 memset(&c, 0, sizeof(c));
3297 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3298 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3299 FW_PFVF_CMD_VFN(vf));
3300 c.retval_len16 = htonl(FW_LEN16(c));
3301 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3302 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00003303 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003304 FW_PFVF_CMD_PMASK(pmask) |
3305 FW_PFVF_CMD_NEQ(txq));
3306 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3307 FW_PFVF_CMD_NEXACTF(nexact));
3308 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3309 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3310 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3311 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3312}
3313
3314/**
3315 * t4_alloc_vi - allocate a virtual interface
3316 * @adap: the adapter
3317 * @mbox: mailbox to use for the FW command
3318 * @port: physical port associated with the VI
3319 * @pf: the PF owning the VI
3320 * @vf: the VF owning the VI
3321 * @nmac: number of MAC addresses needed (1 to 5)
3322 * @mac: the MAC addresses of the VI
3323 * @rss_size: size of RSS table slice associated with this VI
3324 *
3325 * Allocates a virtual interface for the given physical port. If @mac is
3326 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3327 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3328 * stored consecutively so the space needed is @nmac * 6 bytes.
3329 * Returns a negative error number or the non-negative VI id.
3330 */
3331int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3332 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3333 unsigned int *rss_size)
3334{
3335 int ret;
3336 struct fw_vi_cmd c;
3337
3338 memset(&c, 0, sizeof(c));
3339 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3340 FW_CMD_WRITE | FW_CMD_EXEC |
3341 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3342 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3343 c.portid_pkd = FW_VI_CMD_PORTID(port);
3344 c.nmac = nmac - 1;
3345
3346 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3347 if (ret)
3348 return ret;
3349
3350 if (mac) {
3351 memcpy(mac, c.mac, sizeof(c.mac));
3352 switch (nmac) {
3353 case 5:
3354 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3355 case 4:
3356 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3357 case 3:
3358 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3359 case 2:
3360 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3361 }
3362 }
3363 if (rss_size)
3364 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003365 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003366}
3367
3368/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003369 * t4_set_rxmode - set Rx properties of a virtual interface
3370 * @adap: the adapter
3371 * @mbox: mailbox to use for the FW command
3372 * @viid: the VI id
3373 * @mtu: the new MTU or -1
3374 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3375 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3376 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003377 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003378 * @sleep_ok: if true we may sleep while awaiting command completion
3379 *
3380 * Sets Rx properties of a virtual interface.
3381 */
3382int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003383 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3384 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003385{
3386 struct fw_vi_rxmode_cmd c;
3387
3388 /* convert to FW values */
3389 if (mtu < 0)
3390 mtu = FW_RXMODE_MTU_NO_CHG;
3391 if (promisc < 0)
3392 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3393 if (all_multi < 0)
3394 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3395 if (bcast < 0)
3396 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003397 if (vlanex < 0)
3398 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003399
3400 memset(&c, 0, sizeof(c));
3401 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3402 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3403 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003404 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3405 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3406 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3407 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3408 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003409 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3410}
3411
3412/**
3413 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3414 * @adap: the adapter
3415 * @mbox: mailbox to use for the FW command
3416 * @viid: the VI id
3417 * @free: if true any existing filters for this VI id are first removed
3418 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3419 * @addr: the MAC address(es)
3420 * @idx: where to store the index of each allocated filter
3421 * @hash: pointer to hash address filter bitmap
3422 * @sleep_ok: call is allowed to sleep
3423 *
3424 * Allocates an exact-match filter for each of the supplied addresses and
3425 * sets it to the corresponding address. If @idx is not %NULL it should
3426 * have at least @naddr entries, each of which will be set to the index of
3427 * the filter allocated for the corresponding MAC address. If a filter
3428 * could not be allocated for an address its index is set to 0xffff.
3429 * If @hash is not %NULL addresses that fail to allocate an exact filter
3430 * are hashed and update the hash filter bitmap pointed at by @hash.
3431 *
3432 * Returns a negative error number or the number of filters allocated.
3433 */
3434int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3435 unsigned int viid, bool free, unsigned int naddr,
3436 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3437{
3438 int i, ret;
3439 struct fw_vi_mac_cmd c;
3440 struct fw_vi_mac_exact *p;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303441 unsigned int max_naddr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003442 NUM_MPS_CLS_SRAM_L_INSTANCES :
3443 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003444
3445 if (naddr > 7)
3446 return -EINVAL;
3447
3448 memset(&c, 0, sizeof(c));
3449 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3450 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3451 FW_VI_MAC_CMD_VIID(viid));
3452 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3453 FW_CMD_LEN16((naddr + 2) / 2));
3454
3455 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3456 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3457 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3458 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3459 }
3460
3461 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3462 if (ret)
3463 return ret;
3464
3465 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3466 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3467
3468 if (idx)
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003469 idx[i] = index >= max_naddr ? 0xffff : index;
3470 if (index < max_naddr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003471 ret++;
3472 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003473 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003474 }
3475 return ret;
3476}
3477
3478/**
3479 * t4_change_mac - modifies the exact-match filter for a MAC address
3480 * @adap: the adapter
3481 * @mbox: mailbox to use for the FW command
3482 * @viid: the VI id
3483 * @idx: index of existing filter for old value of MAC address, or -1
3484 * @addr: the new MAC address value
3485 * @persist: whether a new MAC allocation should be persistent
3486 * @add_smt: if true also add the address to the HW SMT
3487 *
3488 * Modifies an exact-match filter and sets it to the new MAC address.
3489 * Note that in general it is not possible to modify the value of a given
3490 * filter so the generic way to modify an address filter is to free the one
3491 * being used by the old address value and allocate a new filter for the
3492 * new address value. @idx can be -1 if the address is a new addition.
3493 *
3494 * Returns a negative error number or the index of the filter with the new
3495 * MAC value.
3496 */
3497int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3498 int idx, const u8 *addr, bool persist, bool add_smt)
3499{
3500 int ret, mode;
3501 struct fw_vi_mac_cmd c;
3502 struct fw_vi_mac_exact *p = c.u.exact;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303503 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003504 NUM_MPS_CLS_SRAM_L_INSTANCES :
3505 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003506
3507 if (idx < 0) /* new allocation */
3508 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3509 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3510
3511 memset(&c, 0, sizeof(c));
3512 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3513 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3514 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3515 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3516 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3517 FW_VI_MAC_CMD_IDX(idx));
3518 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3519
3520 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3521 if (ret == 0) {
3522 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003523 if (ret >= max_mac_addr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003524 ret = -ENOMEM;
3525 }
3526 return ret;
3527}
3528
3529/**
3530 * t4_set_addr_hash - program the MAC inexact-match hash filter
3531 * @adap: the adapter
3532 * @mbox: mailbox to use for the FW command
3533 * @viid: the VI id
3534 * @ucast: whether the hash filter should also match unicast addresses
3535 * @vec: the value to be written to the hash filter
3536 * @sleep_ok: call is allowed to sleep
3537 *
3538 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3539 */
3540int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3541 bool ucast, u64 vec, bool sleep_ok)
3542{
3543 struct fw_vi_mac_cmd c;
3544
3545 memset(&c, 0, sizeof(c));
3546 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3547 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3548 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3549 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3550 FW_CMD_LEN16(1));
3551 c.u.hash.hashvec = cpu_to_be64(vec);
3552 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3553}
3554
3555/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003556 * t4_enable_vi_params - enable/disable a virtual interface
3557 * @adap: the adapter
3558 * @mbox: mailbox to use for the FW command
3559 * @viid: the VI id
3560 * @rx_en: 1=enable Rx, 0=disable Rx
3561 * @tx_en: 1=enable Tx, 0=disable Tx
3562 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3563 *
3564 * Enables/disables a virtual interface. Note that setting DCB Enable
3565 * only makes sense when enabling a Virtual Interface ...
3566 */
3567int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3568 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3569{
3570 struct fw_vi_enable_cmd c;
3571
3572 memset(&c, 0, sizeof(c));
3573 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3574 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3575
3576 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3577 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3578 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
Anish Bhatt30f00842014-08-05 16:05:23 -07003579 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
Anish Bhatt688848b2014-06-19 21:37:13 -07003580}
3581
3582/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003583 * t4_enable_vi - enable/disable a virtual interface
3584 * @adap: the adapter
3585 * @mbox: mailbox to use for the FW command
3586 * @viid: the VI id
3587 * @rx_en: 1=enable Rx, 0=disable Rx
3588 * @tx_en: 1=enable Tx, 0=disable Tx
3589 *
3590 * Enables/disables a virtual interface.
3591 */
3592int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3593 bool rx_en, bool tx_en)
3594{
Anish Bhatt688848b2014-06-19 21:37:13 -07003595 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003596}
3597
3598/**
3599 * t4_identify_port - identify a VI's port by blinking its LED
3600 * @adap: the adapter
3601 * @mbox: mailbox to use for the FW command
3602 * @viid: the VI id
3603 * @nblinks: how many times to blink LED at 2.5 Hz
3604 *
3605 * Identifies a VI's port by blinking its LED.
3606 */
3607int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3608 unsigned int nblinks)
3609{
3610 struct fw_vi_enable_cmd c;
3611
Vipul Pandya0062b152012-11-06 03:37:09 +00003612 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003613 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3614 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3615 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3616 c.blinkdur = htons(nblinks);
3617 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3618}
3619
3620/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003621 * t4_iq_free - free an ingress queue and its FLs
3622 * @adap: the adapter
3623 * @mbox: mailbox to use for the FW command
3624 * @pf: the PF owning the queues
3625 * @vf: the VF owning the queues
3626 * @iqtype: the ingress queue type
3627 * @iqid: ingress queue id
3628 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3629 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3630 *
3631 * Frees an ingress queue and its associated FLs, if any.
3632 */
3633int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3634 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3635 unsigned int fl0id, unsigned int fl1id)
3636{
3637 struct fw_iq_cmd c;
3638
3639 memset(&c, 0, sizeof(c));
3640 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3641 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3642 FW_IQ_CMD_VFN(vf));
3643 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3644 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3645 c.iqid = htons(iqid);
3646 c.fl0id = htons(fl0id);
3647 c.fl1id = htons(fl1id);
3648 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3649}
3650
3651/**
3652 * t4_eth_eq_free - free an Ethernet egress queue
3653 * @adap: the adapter
3654 * @mbox: mailbox to use for the FW command
3655 * @pf: the PF owning the queue
3656 * @vf: the VF owning the queue
3657 * @eqid: egress queue id
3658 *
3659 * Frees an Ethernet egress queue.
3660 */
3661int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3662 unsigned int vf, unsigned int eqid)
3663{
3664 struct fw_eq_eth_cmd c;
3665
3666 memset(&c, 0, sizeof(c));
3667 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3668 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3669 FW_EQ_ETH_CMD_VFN(vf));
3670 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3671 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3672 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3673}
3674
3675/**
3676 * t4_ctrl_eq_free - free a control egress queue
3677 * @adap: the adapter
3678 * @mbox: mailbox to use for the FW command
3679 * @pf: the PF owning the queue
3680 * @vf: the VF owning the queue
3681 * @eqid: egress queue id
3682 *
3683 * Frees a control egress queue.
3684 */
3685int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3686 unsigned int vf, unsigned int eqid)
3687{
3688 struct fw_eq_ctrl_cmd c;
3689
3690 memset(&c, 0, sizeof(c));
3691 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3692 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3693 FW_EQ_CTRL_CMD_VFN(vf));
3694 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3695 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3696 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3697}
3698
3699/**
3700 * t4_ofld_eq_free - free an offload egress queue
3701 * @adap: the adapter
3702 * @mbox: mailbox to use for the FW command
3703 * @pf: the PF owning the queue
3704 * @vf: the VF owning the queue
3705 * @eqid: egress queue id
3706 *
3707 * Frees a control egress queue.
3708 */
3709int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3710 unsigned int vf, unsigned int eqid)
3711{
3712 struct fw_eq_ofld_cmd c;
3713
3714 memset(&c, 0, sizeof(c));
3715 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3716 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3717 FW_EQ_OFLD_CMD_VFN(vf));
3718 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3719 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3720 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3721}
3722
3723/**
3724 * t4_handle_fw_rpl - process a FW reply message
3725 * @adap: the adapter
3726 * @rpl: start of the FW message
3727 *
3728 * Processes a FW message, such as link state change messages.
3729 */
3730int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3731{
3732 u8 opcode = *(const u8 *)rpl;
3733
3734 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3735 int speed = 0, fc = 0;
3736 const struct fw_port_cmd *p = (void *)rpl;
3737 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3738 int port = adap->chan_map[chan];
3739 struct port_info *pi = adap2pinfo(adap, port);
3740 struct link_config *lc = &pi->link_cfg;
3741 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3742 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3743 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3744
3745 if (stat & FW_PORT_CMD_RXPAUSE)
3746 fc |= PAUSE_RX;
3747 if (stat & FW_PORT_CMD_TXPAUSE)
3748 fc |= PAUSE_TX;
3749 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003750 speed = 100;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003751 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003752 speed = 1000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003753 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003754 speed = 10000;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05303755 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003756 speed = 40000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003757
3758 if (link_ok != lc->link_ok || speed != lc->speed ||
3759 fc != lc->fc) { /* something changed */
3760 lc->link_ok = link_ok;
3761 lc->speed = speed;
3762 lc->fc = fc;
Hariprasad Shenai444018a2014-09-01 19:54:55 +05303763 lc->supported = be16_to_cpu(p->u.info.pcap);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003764 t4_os_link_changed(adap, port, link_ok);
3765 }
3766 if (mod != pi->mod_type) {
3767 pi->mod_type = mod;
3768 t4_os_portmod_changed(adap, port);
3769 }
3770 }
3771 return 0;
3772}
3773
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003774static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003775{
3776 u16 val;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003777
Jiang Liue5c8ae52012-08-20 13:53:19 -06003778 if (pci_is_pcie(adapter->pdev)) {
3779 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003780 p->speed = val & PCI_EXP_LNKSTA_CLS;
3781 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3782 }
3783}
3784
3785/**
3786 * init_link_config - initialize a link's SW state
3787 * @lc: structure holding the link state
3788 * @caps: link capabilities
3789 *
3790 * Initializes the SW state maintained for each link, including the link's
3791 * capabilities and default speed/flow-control/autonegotiation settings.
3792 */
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003793static void init_link_config(struct link_config *lc, unsigned int caps)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003794{
3795 lc->supported = caps;
3796 lc->requested_speed = 0;
3797 lc->speed = 0;
3798 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3799 if (lc->supported & FW_PORT_CAP_ANEG) {
3800 lc->advertising = lc->supported & ADVERT_MASK;
3801 lc->autoneg = AUTONEG_ENABLE;
3802 lc->requested_fc |= PAUSE_AUTONEG;
3803 } else {
3804 lc->advertising = 0;
3805 lc->autoneg = AUTONEG_DISABLE;
3806 }
3807}
3808
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003809int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003810{
3811 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3812 return 0;
3813 msleep(500);
3814 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3815}
3816
Bill Pemberton91744942012-12-03 09:23:02 -05003817static int get_flash_params(struct adapter *adap)
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003818{
3819 int ret;
3820 u32 info;
3821
3822 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3823 if (!ret)
3824 ret = sf1_read(adap, 3, 0, 1, &info);
3825 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3826 if (ret)
3827 return ret;
3828
3829 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3830 return -EINVAL;
3831 info >>= 16; /* log2 of size */
3832 if (info >= 0x14 && info < 0x18)
3833 adap->params.sf_nsec = 1 << (info - 16);
3834 else if (info == 0x18)
3835 adap->params.sf_nsec = 64;
3836 else
3837 return -EINVAL;
3838 adap->params.sf_size = 1 << info;
3839 adap->params.sf_fw_start =
3840 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3841 return 0;
3842}
3843
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003844/**
3845 * t4_prep_adapter - prepare SW and HW for operation
3846 * @adapter: the adapter
3847 * @reset: if true perform a HW reset
3848 *
3849 * Initialize adapter SW state for the various HW modules, set initial
3850 * values for some adapter tunables, take PHYs out of reset, and
3851 * initialize the MDIO interface.
3852 */
Bill Pemberton91744942012-12-03 09:23:02 -05003853int t4_prep_adapter(struct adapter *adapter)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003854{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003855 int ret, ver;
3856 uint16_t device_id;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303857 u32 pl_rev;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003858
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003859 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003860 if (ret < 0)
3861 return ret;
3862
3863 get_pci_mode(adapter, &adapter->params.pci);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303864 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003865
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003866 ret = get_flash_params(adapter);
3867 if (ret < 0) {
3868 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3869 return ret;
3870 }
3871
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003872 /* Retrieve adapter's device ID
3873 */
3874 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3875 ver = device_id >> 12;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303876 adapter->params.chip = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003877 switch (ver) {
3878 case CHELSIO_T4:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303879 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003880 break;
3881 case CHELSIO_T5:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303882 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003883 break;
3884 default:
3885 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3886 device_id);
3887 return -EINVAL;
3888 }
3889
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003890 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3891
3892 /*
3893 * Default port for debugging in case we can't reach FW.
3894 */
3895 adapter->params.nports = 1;
3896 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003897 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003898 return 0;
3899}
3900
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303901/**
3902 * t4_init_tp_params - initialize adap->params.tp
3903 * @adap: the adapter
3904 *
3905 * Initialize various fields of the adapter's TP Parameters structure.
3906 */
3907int t4_init_tp_params(struct adapter *adap)
3908{
3909 int chan;
3910 u32 v;
3911
3912 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3913 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3914 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3915
3916 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3917 for (chan = 0; chan < NCHAN; chan++)
3918 adap->params.tp.tx_modq[chan] = chan;
3919
3920 /* Cache the adapter's Compressed Filter Mode and global Incress
3921 * Configuration.
3922 */
3923 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3924 &adap->params.tp.vlan_pri_map, 1,
3925 TP_VLAN_PRI_MAP);
3926 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3927 &adap->params.tp.ingress_config, 1,
3928 TP_INGRESS_CONFIG);
3929
3930 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3931 * shift positions of several elements of the Compressed Filter Tuple
3932 * for this adapter which we need frequently ...
3933 */
3934 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3935 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3936 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3937 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3938 F_PROTOCOL);
3939
3940 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3941 * represents the presense of an Outer VLAN instead of a VNIC ID.
3942 */
3943 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3944 adap->params.tp.vnic_shift = -1;
3945
3946 return 0;
3947}
3948
3949/**
3950 * t4_filter_field_shift - calculate filter field shift
3951 * @adap: the adapter
3952 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3953 *
3954 * Return the shift position of a filter field within the Compressed
3955 * Filter Tuple. The filter field is specified via its selection bit
3956 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3957 */
3958int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3959{
3960 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3961 unsigned int sel;
3962 int field_shift;
3963
3964 if ((filter_mode & filter_sel) == 0)
3965 return -1;
3966
3967 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3968 switch (filter_mode & sel) {
3969 case F_FCOE:
3970 field_shift += W_FT_FCOE;
3971 break;
3972 case F_PORT:
3973 field_shift += W_FT_PORT;
3974 break;
3975 case F_VNIC_ID:
3976 field_shift += W_FT_VNIC_ID;
3977 break;
3978 case F_VLAN:
3979 field_shift += W_FT_VLAN;
3980 break;
3981 case F_TOS:
3982 field_shift += W_FT_TOS;
3983 break;
3984 case F_PROTOCOL:
3985 field_shift += W_FT_PROTOCOL;
3986 break;
3987 case F_ETHERTYPE:
3988 field_shift += W_FT_ETHERTYPE;
3989 break;
3990 case F_MACMATCH:
3991 field_shift += W_FT_MACMATCH;
3992 break;
3993 case F_MPSHITTYPE:
3994 field_shift += W_FT_MPSHITTYPE;
3995 break;
3996 case F_FRAGMENTATION:
3997 field_shift += W_FT_FRAGMENTATION;
3998 break;
3999 }
4000 }
4001 return field_shift;
4002}
4003
Bill Pemberton91744942012-12-03 09:23:02 -05004004int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004005{
4006 u8 addr[6];
4007 int ret, i, j = 0;
4008 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004009 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004010
4011 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004012 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004013
4014 for_each_port(adap, i) {
4015 unsigned int rss_size;
4016 struct port_info *p = adap2pinfo(adap, i);
4017
4018 while ((adap->params.portvec & (1 << j)) == 0)
4019 j++;
4020
4021 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4022 FW_CMD_REQUEST | FW_CMD_READ |
4023 FW_PORT_CMD_PORTID(j));
4024 c.action_to_len16 = htonl(
4025 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4026 FW_LEN16(c));
4027 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4028 if (ret)
4029 return ret;
4030
4031 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4032 if (ret < 0)
4033 return ret;
4034
4035 p->viid = ret;
4036 p->tx_chan = j;
4037 p->lport = j;
4038 p->rss_size = rss_size;
4039 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
Thadeu Lima de Souza Cascardo40c9f8a2014-06-21 09:48:08 -03004040 adap->port[i]->dev_port = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004041
4042 ret = ntohl(c.u.info.lstatus_to_modtype);
4043 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4044 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4045 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004046 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004047
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004048 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4049 FW_CMD_REQUEST | FW_CMD_READ |
4050 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4051 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4052 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4053 if (ret)
4054 return ret;
4055 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4056
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004057 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4058 j++;
4059 }
4060 return 0;
4061}