blob: eb5a278e8045897ea7f962b81143730a7dd0666f [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
Anish Bhattce100b8b2014-06-19 21:37:15 -07004 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000035#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
stephen hemmingerde5b8672013-12-18 14:16:47 -080040static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000042/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
Roland Dreierde498c82010-04-21 08:59:17 +000057static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000059{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
Vipul Pandyaf2b7e782012-12-10 09:30:52 +0000113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
Roland Dreierde498c82010-04-21 08:59:17 +0000114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000146/*
Hariprasad Shenai0abfd152014-06-27 19:23:48 +0530147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
154 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155
156 if (is_t4(adap->params.chip))
157 req |= F_LOCALCFG;
158
159 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161
162 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 * Configuration Space read. (None of the other fields matter when
164 * ENABLE is 0 so a simple register write is easier than a
165 * read-modify-write via t4_set_reg_field().)
166 */
167 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168}
169
170/*
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000171 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172 */
173static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
174 u32 mbox_addr)
175{
176 for ( ; nflit; nflit--, mbox_addr += 8)
177 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
178}
179
180/*
181 * Handle a FW assertion reported in a mailbox.
182 */
183static void fw_asrt(struct adapter *adap, u32 mbox_addr)
184{
185 struct fw_debug_cmd asrt;
186
187 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
188 dev_alert(adap->pdev_dev,
189 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
190 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
191 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
192}
193
194static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
195{
196 dev_err(adap->pdev_dev,
197 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
198 (unsigned long long)t4_read_reg64(adap, data_reg),
199 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
200 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
201 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
202 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
203 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
204 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
205 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
206}
207
208/**
209 * t4_wr_mbox_meat - send a command to FW through the given mailbox
210 * @adap: the adapter
211 * @mbox: index of the mailbox to use
212 * @cmd: the command to write
213 * @size: command length in bytes
214 * @rpl: where to optionally store the reply
215 * @sleep_ok: if true we may sleep while awaiting command completion
216 *
217 * Sends the given command to FW through the selected mailbox and waits
218 * for the FW to execute the command. If @rpl is not %NULL it is used to
219 * store the FW's reply to the command. The command and its optional
220 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
221 * to respond. @sleep_ok determines whether we may sleep while awaiting
222 * the response. If sleeping is allowed we use progressive backoff
223 * otherwise we spin.
224 *
225 * The return value is 0 on success or a negative errno on failure. A
226 * failure can happen either because we are not able to execute the
227 * command or FW executes it but signals an error. In the latter case
228 * the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 void *rpl, bool sleep_ok)
232{
Joe Perches005b5712010-12-14 21:36:53 +0000233 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000234 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
235 };
236
237 u32 v;
238 u64 res;
239 int i, ms, delay_idx;
240 const __be64 *p = cmd;
241 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
242 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
243
244 if ((size & 15) || size > MBOX_LEN)
245 return -EINVAL;
246
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000247 /*
248 * If the device is off-line, as in EEH, commands will time out.
249 * Fail them early so we don't waste time waiting.
250 */
251 if (adap->pdev->error_state != pci_channel_io_normal)
252 return -EIO;
253
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000254 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
255 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
256 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
257
258 if (v != MBOX_OWNER_DRV)
259 return v ? -EBUSY : -ETIMEDOUT;
260
261 for (i = 0; i < size; i += 8)
262 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
263
264 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
265 t4_read_reg(adap, ctl_reg); /* flush write */
266
267 delay_idx = 0;
268 ms = delay[0];
269
270 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271 if (sleep_ok) {
272 ms = delay[delay_idx]; /* last element may repeat */
273 if (delay_idx < ARRAY_SIZE(delay) - 1)
274 delay_idx++;
275 msleep(ms);
276 } else
277 mdelay(ms);
278
279 v = t4_read_reg(adap, ctl_reg);
280 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
281 if (!(v & MBMSGVALID)) {
282 t4_write_reg(adap, ctl_reg, 0);
283 continue;
284 }
285
286 res = t4_read_reg64(adap, data_reg);
287 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
288 fw_asrt(adap, data_reg);
289 res = FW_CMD_RETVAL(EIO);
290 } else if (rpl)
291 get_mbox_rpl(adap, rpl, size / 8, data_reg);
292
293 if (FW_CMD_RETVAL_GET((int)res))
294 dump_mbox(adap, mbox, data_reg);
295 t4_write_reg(adap, ctl_reg, 0);
296 return -FW_CMD_RETVAL_GET((int)res);
297 }
298 }
299
300 dump_mbox(adap, mbox, data_reg);
301 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 *(const u8 *)cmd, mbox);
303 return -ETIMEDOUT;
304}
305
306/**
307 * t4_mc_read - read from MC through backdoor accesses
308 * @adap: the adapter
309 * @addr: address of first byte requested
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000310 * @idx: which MC to access
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000311 * @data: 64 bytes of data containing the requested address
312 * @ecc: where to store the corresponding 64-bit ECC word
313 *
314 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
315 * that covers the requested address @addr. If @parity is not %NULL it
316 * is assigned the 64-bit ECC word for the read data.
317 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000318int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000319{
320 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000321 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
322 u32 mc_bist_status_rdata, mc_bist_data_pattern;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000323
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530324 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000325 mc_bist_cmd = MC_BIST_CMD;
326 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
327 mc_bist_cmd_len = MC_BIST_CMD_LEN;
328 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
329 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
330 } else {
331 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
332 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
333 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
334 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
335 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
336 }
337
338 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000339 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000340 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
341 t4_write_reg(adap, mc_bist_cmd_len, 64);
342 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
343 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000344 BIST_CMD_GAP(1));
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000345 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000346 if (i)
347 return i;
348
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000349#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000350
351 for (i = 15; i >= 0; i--)
352 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
353 if (ecc)
354 *ecc = t4_read_reg64(adap, MC_DATA(16));
355#undef MC_DATA
356 return 0;
357}
358
359/**
360 * t4_edc_read - read from EDC through backdoor accesses
361 * @adap: the adapter
362 * @idx: which EDC to access
363 * @addr: address of first byte requested
364 * @data: 64 bytes of data containing the requested address
365 * @ecc: where to store the corresponding 64-bit ECC word
366 *
367 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
368 * that covers the requested address @addr. If @parity is not %NULL it
369 * is assigned the 64-bit ECC word for the read data.
370 */
371int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
372{
373 int i;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000374 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
375 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000376
Hariprasad Shenaid14807d2013-12-03 17:05:56 +0530377 if (is_t4(adap->params.chip)) {
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000378 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
379 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
380 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
381 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
382 idx);
383 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
384 idx);
385 } else {
386 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
387 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
388 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
389 edc_bist_cmd_data_pattern =
390 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
391 edc_bist_status_rdata =
392 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
393 }
394
395 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000396 return -EBUSY;
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000397 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
398 t4_write_reg(adap, edc_bist_cmd_len, 64);
399 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
400 t4_write_reg(adap, edc_bist_cmd,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000401 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000402 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000403 if (i)
404 return i;
405
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000406#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000407
408 for (i = 15; i >= 0; i--)
409 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
410 if (ecc)
411 *ecc = t4_read_reg64(adap, EDC_DATA(16));
412#undef EDC_DATA
413 return 0;
414}
415
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000416/**
417 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
418 * @adap: the adapter
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530419 * @win: PCI-E Memory Window to use
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000420 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
421 * @addr: address within indicated memory type
422 * @len: amount of memory to transfer
423 * @buf: host memory buffer
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530424 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000425 *
426 * Reads/writes an [almost] arbitrary memory region in the firmware: the
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530427 * firmware memory address and host buffer must be aligned on 32-bit
428 * boudaries; the length may be arbitrary. The memory is transferred as
429 * a raw byte sequence from/to the firmware's memory. If this memory
430 * contains data structures which contain multi-byte integers, it's the
431 * caller's responsibility to perform appropriate byte order conversions.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000432 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530433int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
434 u32 len, __be32 *buf, int dir)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000435{
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530436 u32 pos, offset, resid, memoffset;
437 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000438
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530439 /* Argument sanity checks ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000440 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530441 if (addr & 0x3)
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000442 return -EINVAL;
443
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530444 /* It's convenient to be able to handle lengths which aren't a
445 * multiple of 32-bits because we often end up transferring files to
446 * the firmware. So we'll handle that by normalizing the length here
447 * and then handling any residual transfer at the end.
448 */
449 resid = len & 0x3;
450 len -= resid;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000451
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000452 /* Offset into the region of memory which is being accessed
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000453 * MEM_EDC0 = 0
454 * MEM_EDC1 = 1
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000455 * MEM_MC = 2 -- T4
456 * MEM_MC0 = 2 -- For T5
457 * MEM_MC1 = 3 -- For T5
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000458 */
Santosh Rastapur19dd37b2013-03-14 05:08:53 +0000459 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
460 if (mtype != MEM_MC1)
461 memoffset = (mtype * (edc_size * 1024 * 1024));
462 else {
463 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
464 MA_EXT_MEMORY_BAR));
465 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
466 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000467
468 /* Determine the PCIE_MEM_ACCESS_OFFSET */
469 addr = addr + memoffset;
470
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530471 /* Each PCI-E Memory Window is programmed with a window size -- or
472 * "aperture" -- which controls the granularity of its mapping onto
473 * adapter memory. We need to grab that aperture in order to know
474 * how to use the specified window. The window is also programmed
475 * with the base address of the Memory Window in BAR0's address
476 * space. For T4 this is an absolute PCI-E Bus Address. For T5
477 * the address is relative to BAR0.
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000478 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530479 mem_reg = t4_read_reg(adap,
480 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
481 win));
482 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
483 mem_base = GET_PCIEOFST(mem_reg) << 10;
484 if (is_t4(adap->params.chip))
485 mem_base -= adap->t4_bar0;
486 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000487
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530488 /* Calculate our initial PCI-E Memory Window Position and Offset into
489 * that Window.
490 */
491 pos = addr & ~(mem_aperture-1);
492 offset = addr - pos;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000493
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530494 /* Set up initial PCI-E Memory Window to cover the start of our
495 * transfer. (Read it back to ensure that changes propagate before we
496 * attempt to use the new value.)
497 */
498 t4_write_reg(adap,
499 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
500 pos | win_pf);
501 t4_read_reg(adap,
502 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
503
504 /* Transfer data to/from the adapter as long as there's an integral
505 * number of 32-bit transfers to complete.
506 */
507 while (len > 0) {
508 if (dir == T4_MEMORY_READ)
509 *buf++ = (__force __be32) t4_read_reg(adap,
510 mem_base + offset);
511 else
512 t4_write_reg(adap, mem_base + offset,
513 (__force u32) *buf++);
514 offset += sizeof(__be32);
515 len -= sizeof(__be32);
516
517 /* If we've reached the end of our current window aperture,
518 * move the PCI-E Memory Window on to the next. Note that
519 * doing this here after "len" may be 0 allows us to set up
520 * the PCI-E Memory Window for a possible final residual
521 * transfer below ...
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000522 */
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530523 if (offset == mem_aperture) {
524 pos += mem_aperture;
525 offset = 0;
526 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
528 win), pos | win_pf);
529 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
531 win));
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000532 }
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000533 }
534
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530535 /* If the original transfer had a length which wasn't a multiple of
536 * 32-bits, now's where we need to finish off the transfer of the
537 * residual amount. The PCI-E Memory Window has already been moved
538 * above (if necessary) to cover this final transfer.
539 */
540 if (resid) {
541 union {
542 __be32 word;
543 char byte[4];
544 } last;
545 unsigned char *bp;
546 int i;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000547
Hariprasad Shenaifc5ab022014-06-27 19:23:49 +0530548 if (dir == T4_MEMORY_WRITE) {
549 last.word = (__force __be32) t4_read_reg(adap,
550 mem_base + offset);
551 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
552 bp[i] = last.byte[i];
553 } else {
554 last.word = *buf;
555 for (i = resid; i < 4; i++)
556 last.byte[i] = 0;
557 t4_write_reg(adap, mem_base + offset,
558 (__force u32) last.word);
559 }
560 }
561
562 return 0;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000563}
564
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000565#define EEPROM_STAT_ADDR 0x7bfc
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000566#define VPD_BASE 0x400
567#define VPD_BASE_OLD 0
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000568#define VPD_LEN 1024
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000569
570/**
571 * t4_seeprom_wp - enable/disable EEPROM write protection
572 * @adapter: the adapter
573 * @enable: whether to enable or disable write protection
574 *
575 * Enables or disables write protection on the serial EEPROM.
576 */
577int t4_seeprom_wp(struct adapter *adapter, bool enable)
578{
579 unsigned int v = enable ? 0xc : 0;
580 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
581 return ret < 0 ? ret : 0;
582}
583
584/**
585 * get_vpd_params - read VPD parameters from VPD EEPROM
586 * @adapter: adapter to read
587 * @p: where to store the parameters
588 *
589 * Reads card parameters stored in VPD EEPROM.
590 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000591int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000592{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000593 u32 cclk_param, cclk_val;
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000594 int i, ret, addr;
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530595 int ec, sn, pn;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000596 u8 *vpd, csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000597 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000598
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000599 vpd = vmalloc(VPD_LEN);
600 if (!vpd)
601 return -ENOMEM;
602
Santosh Rastapur47ce9c42013-03-08 03:35:29 +0000603 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
604 if (ret < 0)
605 goto out;
606 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
607
608 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000609 if (ret < 0)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000610 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000611
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000612 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
613 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000614 ret = -EINVAL;
615 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000616 }
617
618 id_len = pci_vpd_lrdt_size(vpd);
619 if (id_len > ID_LEN)
620 id_len = ID_LEN;
621
622 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
623 if (i < 0) {
624 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000625 ret = -EINVAL;
626 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000627 }
628
629 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
630 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
631 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000632 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000633 ret = -EINVAL;
634 goto out;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000635 }
636
637#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000638 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000639 if (var < 0) { \
640 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000641 ret = -EINVAL; \
642 goto out; \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000643 } \
644 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
645} while (0)
646
647 FIND_VPD_KW(i, "RV");
648 for (csum = 0; i >= 0; i--)
649 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000650
651 if (csum) {
652 dev_err(adapter->pdev_dev,
653 "corrupted VPD EEPROM, actual csum %u\n", csum);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000654 ret = -EINVAL;
655 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000656 }
657
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000658 FIND_VPD_KW(ec, "EC");
659 FIND_VPD_KW(sn, "SN");
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530660 FIND_VPD_KW(pn, "PN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000661#undef FIND_VPD_KW
662
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000663 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000664 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000665 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000666 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000667 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
668 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000669 strim(p->sn);
Kumar Sanghvia94cd702014-02-18 17:56:09 +0530670 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
671 strim(p->pn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000672
673 /*
674 * Ask firmware for the Core Clock since it knows how to translate the
675 * Reference Clock ('V2') VPD field into a Core Clock value ...
676 */
677 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
678 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
679 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
680 1, &cclk_param, &cclk_val);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000681
682out:
683 vfree(vpd);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000684 if (ret)
685 return ret;
686 p->cclk = cclk_val;
687
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000688 return 0;
689}
690
691/* serial flash and firmware constants */
692enum {
693 SF_ATTEMPTS = 10, /* max retries for SF operations */
694
695 /* flash command opcodes */
696 SF_PROG_PAGE = 2, /* program page */
697 SF_WR_DISABLE = 4, /* disable writes */
698 SF_RD_STATUS = 5, /* read status register */
699 SF_WR_ENABLE = 6, /* enable writes */
700 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000701 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000702 SF_ERASE_SECTOR = 0xd8, /* erase sector */
703
Steve Wise6f1d7212014-04-15 14:22:34 -0500704 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000705};
706
707/**
708 * sf1_read - read data from the serial flash
709 * @adapter: the adapter
710 * @byte_cnt: number of bytes to read
711 * @cont: whether another operation will be chained
712 * @lock: whether to lock SF for PL access only
713 * @valp: where to store the read data
714 *
715 * Reads up to 4 bytes of data from the serial flash. The location of
716 * the read needs to be specified prior to calling this by issuing the
717 * appropriate commands to the serial flash.
718 */
719static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
720 int lock, u32 *valp)
721{
722 int ret;
723
724 if (!byte_cnt || byte_cnt > 4)
725 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530726 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000727 return -EBUSY;
728 cont = cont ? SF_CONT : 0;
729 lock = lock ? SF_LOCK : 0;
730 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530731 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000732 if (!ret)
733 *valp = t4_read_reg(adapter, SF_DATA);
734 return ret;
735}
736
737/**
738 * sf1_write - write data to the serial flash
739 * @adapter: the adapter
740 * @byte_cnt: number of bytes to write
741 * @cont: whether another operation will be chained
742 * @lock: whether to lock SF for PL access only
743 * @val: value to write
744 *
745 * Writes up to 4 bytes of data to the serial flash. The location of
746 * the write needs to be specified prior to calling this by issuing the
747 * appropriate commands to the serial flash.
748 */
749static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
750 int lock, u32 val)
751{
752 if (!byte_cnt || byte_cnt > 4)
753 return -EINVAL;
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530754 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000755 return -EBUSY;
756 cont = cont ? SF_CONT : 0;
757 lock = lock ? SF_LOCK : 0;
758 t4_write_reg(adapter, SF_DATA, val);
759 t4_write_reg(adapter, SF_OP, lock |
760 cont | BYTECNT(byte_cnt - 1) | OP_WR);
Naresh Kumar Innace91a922012-11-15 22:41:17 +0530761 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000762}
763
764/**
765 * flash_wait_op - wait for a flash operation to complete
766 * @adapter: the adapter
767 * @attempts: max number of polls of the status register
768 * @delay: delay between polls in ms
769 *
770 * Wait for a flash operation to complete by polling the status register.
771 */
772static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
773{
774 int ret;
775 u32 status;
776
777 while (1) {
778 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
779 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
780 return ret;
781 if (!(status & 1))
782 return 0;
783 if (--attempts == 0)
784 return -EAGAIN;
785 if (delay)
786 msleep(delay);
787 }
788}
789
790/**
791 * t4_read_flash - read words from serial flash
792 * @adapter: the adapter
793 * @addr: the start address for the read
794 * @nwords: how many 32-bit words to read
795 * @data: where to store the read data
796 * @byte_oriented: whether to store data as bytes or as words
797 *
798 * Read the specified number of 32-bit words from the serial flash.
799 * If @byte_oriented is set the read data is stored as a byte array
800 * (i.e., big-endian), otherwise as 32-bit words in the platform's
801 * natural endianess.
802 */
Roland Dreierde498c82010-04-21 08:59:17 +0000803static int t4_read_flash(struct adapter *adapter, unsigned int addr,
804 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000805{
806 int ret;
807
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000808 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000809 return -EINVAL;
810
811 addr = swab32(addr) | SF_RD_DATA_FAST;
812
813 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
814 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
815 return ret;
816
817 for ( ; nwords; nwords--, data++) {
818 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
819 if (nwords == 1)
820 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
821 if (ret)
822 return ret;
823 if (byte_oriented)
Vipul Pandya404d9e32012-10-08 02:59:43 +0000824 *data = (__force __u32) (htonl(*data));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000825 }
826 return 0;
827}
828
829/**
830 * t4_write_flash - write up to a page of data to the serial flash
831 * @adapter: the adapter
832 * @addr: the start address to write
833 * @n: length of data to write in bytes
834 * @data: the data to write
835 *
836 * Writes up to a page of data (256 bytes) to the serial flash starting
837 * at the given address. All the data must be written to the same page.
838 */
839static int t4_write_flash(struct adapter *adapter, unsigned int addr,
840 unsigned int n, const u8 *data)
841{
842 int ret;
843 u32 buf[64];
844 unsigned int i, c, left, val, offset = addr & 0xff;
845
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000846 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000847 return -EINVAL;
848
849 val = swab32(addr) | SF_PROG_PAGE;
850
851 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
852 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
853 goto unlock;
854
855 for (left = n; left; left -= c) {
856 c = min(left, 4U);
857 for (val = 0, i = 0; i < c; ++i)
858 val = (val << 8) + *data++;
859
860 ret = sf1_write(adapter, c, c != left, 1, val);
861 if (ret)
862 goto unlock;
863 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000864 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000865 if (ret)
866 goto unlock;
867
868 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
869
870 /* Read the page to verify the write succeeded */
871 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
872 if (ret)
873 return ret;
874
875 if (memcmp(data - n, (u8 *)buf + offset, n)) {
876 dev_err(adapter->pdev_dev,
877 "failed to correctly write the flash page at %#x\n",
878 addr);
879 return -EIO;
880 }
881 return 0;
882
883unlock:
884 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
885 return ret;
886}
887
888/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530889 * t4_get_fw_version - read the firmware version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000890 * @adapter: the adapter
891 * @vers: where to place the version
892 *
893 * Reads the FW version from flash.
894 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530895int t4_get_fw_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000896{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530897 return t4_read_flash(adapter, FLASH_FW_START +
898 offsetof(struct fw_hdr, fw_ver), 1,
899 vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000900}
901
902/**
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530903 * t4_get_tp_version - read the TP microcode version
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000904 * @adapter: the adapter
905 * @vers: where to place the version
906 *
907 * Reads the TP microcode version from flash.
908 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530909int t4_get_tp_version(struct adapter *adapter, u32 *vers)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000910{
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530911 return t4_read_flash(adapter, FLASH_FW_START +
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000912 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000913 1, vers, 0);
914}
915
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530916/* Is the given firmware API compatible with the one the driver was compiled
917 * with?
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000918 */
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530919static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000920{
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000921
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530922 /* short circuit if it's the exact same firmware version */
923 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
924 return 1;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000925
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530926#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
927 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
928 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
929 return 1;
930#undef SAME_INTF
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000931
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530932 return 0;
933}
934
935/* The firmware in the filesystem is usable, but should it be installed?
936 * This routine explains itself in detail if it indicates the filesystem
937 * firmware should be installed.
938 */
939static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
940 int k, int c)
941{
942 const char *reason;
943
944 if (!card_fw_usable) {
945 reason = "incompatible or unusable";
946 goto install;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000947 }
948
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530949 if (k > c) {
950 reason = "older than the version supported with this driver";
951 goto install;
Jay Hernandeze69972f2013-05-30 03:24:14 +0000952 }
953
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530954 return 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +0000955
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530956install:
957 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
958 "installing firmware %u.%u.%u.%u on card.\n",
959 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
960 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
961 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
962 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000963
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000964 return 1;
965}
966
Hariprasad Shenai16e47622013-12-03 17:05:58 +0530967int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
968 const u8 *fw_data, unsigned int fw_size,
969 struct fw_hdr *card_fw, enum dev_state state,
970 int *reset)
971{
972 int ret, card_fw_usable, fs_fw_usable;
973 const struct fw_hdr *fs_fw;
974 const struct fw_hdr *drv_fw;
975
976 drv_fw = &fw_info->fw_hdr;
977
978 /* Read the header of the firmware on the card */
979 ret = -t4_read_flash(adap, FLASH_FW_START,
980 sizeof(*card_fw) / sizeof(uint32_t),
981 (uint32_t *)card_fw, 1);
982 if (ret == 0) {
983 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
984 } else {
985 dev_err(adap->pdev_dev,
986 "Unable to read card's firmware header: %d\n", ret);
987 card_fw_usable = 0;
988 }
989
990 if (fw_data != NULL) {
991 fs_fw = (const void *)fw_data;
992 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
993 } else {
994 fs_fw = NULL;
995 fs_fw_usable = 0;
996 }
997
998 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
999 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1000 /* Common case: the firmware on the card is an exact match and
1001 * the filesystem one is an exact match too, or the filesystem
1002 * one is absent/incompatible.
1003 */
1004 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1005 should_install_fs_fw(adap, card_fw_usable,
1006 be32_to_cpu(fs_fw->fw_ver),
1007 be32_to_cpu(card_fw->fw_ver))) {
1008 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1009 fw_size, 0);
1010 if (ret != 0) {
1011 dev_err(adap->pdev_dev,
1012 "failed to install firmware: %d\n", ret);
1013 goto bye;
1014 }
1015
1016 /* Installed successfully, update the cached header too. */
1017 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1018 card_fw_usable = 1;
1019 *reset = 0; /* already reset as part of load_fw */
1020 }
1021
1022 if (!card_fw_usable) {
1023 uint32_t d, c, k;
1024
1025 d = be32_to_cpu(drv_fw->fw_ver);
1026 c = be32_to_cpu(card_fw->fw_ver);
1027 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1028
1029 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1030 "chip state %d, "
1031 "driver compiled with %d.%d.%d.%d, "
1032 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1033 state,
1034 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1035 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1036 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1037 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1038 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1039 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1040 ret = EINVAL;
1041 goto bye;
1042 }
1043
1044 /* We're using whatever's on the card and it's known to be good. */
1045 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1046 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1047
1048bye:
1049 return ret;
1050}
1051
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001052/**
1053 * t4_flash_erase_sectors - erase a range of flash sectors
1054 * @adapter: the adapter
1055 * @start: the first sector to erase
1056 * @end: the last sector to erase
1057 *
1058 * Erases the sectors in the given inclusive range.
1059 */
1060static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1061{
1062 int ret = 0;
1063
1064 while (start <= end) {
1065 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1066 (ret = sf1_write(adapter, 4, 0, 1,
1067 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001068 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001069 dev_err(adapter->pdev_dev,
1070 "erase of flash sector %d failed, error %d\n",
1071 start, ret);
1072 break;
1073 }
1074 start++;
1075 }
1076 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1077 return ret;
1078}
1079
1080/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001081 * t4_flash_cfg_addr - return the address of the flash configuration file
1082 * @adapter: the adapter
1083 *
1084 * Return the address within the flash where the Firmware Configuration
1085 * File is stored.
1086 */
1087unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1088{
1089 if (adapter->params.sf_size == 0x100000)
1090 return FLASH_FPGA_CFG_START;
1091 else
1092 return FLASH_CFG_START;
1093}
1094
1095/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001096 * t4_load_fw - download firmware
1097 * @adap: the adapter
1098 * @fw_data: the firmware image to write
1099 * @size: image size
1100 *
1101 * Write the supplied firmware image to the card's serial flash.
1102 */
1103int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1104{
1105 u32 csum;
1106 int ret, addr;
1107 unsigned int i;
1108 u8 first_page[SF_PAGE_SIZE];
Vipul Pandya404d9e32012-10-08 02:59:43 +00001109 const __be32 *p = (const __be32 *)fw_data;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001110 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001111 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1112 unsigned int fw_img_start = adap->params.sf_fw_start;
1113 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001114
1115 if (!size) {
1116 dev_err(adap->pdev_dev, "FW image has no data\n");
1117 return -EINVAL;
1118 }
1119 if (size & 511) {
1120 dev_err(adap->pdev_dev,
1121 "FW image size not multiple of 512 bytes\n");
1122 return -EINVAL;
1123 }
1124 if (ntohs(hdr->len512) * 512 != size) {
1125 dev_err(adap->pdev_dev,
1126 "FW image size differs from size in FW header\n");
1127 return -EINVAL;
1128 }
1129 if (size > FW_MAX_SIZE) {
1130 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1131 FW_MAX_SIZE);
1132 return -EFBIG;
1133 }
1134
1135 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1136 csum += ntohl(p[i]);
1137
1138 if (csum != 0xffffffff) {
1139 dev_err(adap->pdev_dev,
1140 "corrupted firmware image, checksum %#x\n", csum);
1141 return -EINVAL;
1142 }
1143
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001144 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1145 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001146 if (ret)
1147 goto out;
1148
1149 /*
1150 * We write the correct version at the end so the driver can see a bad
1151 * version if the FW write fails. Start by writing a copy of the
1152 * first page with a bad version.
1153 */
1154 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1155 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001156 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001157 if (ret)
1158 goto out;
1159
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001160 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001161 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1162 addr += SF_PAGE_SIZE;
1163 fw_data += SF_PAGE_SIZE;
1164 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1165 if (ret)
1166 goto out;
1167 }
1168
1169 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001170 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001171 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1172out:
1173 if (ret)
1174 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1175 ret);
1176 return ret;
1177}
1178
1179#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05301180 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1181 FW_PORT_CAP_ANEG)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001182
1183/**
1184 * t4_link_start - apply link configuration to MAC/PHY
1185 * @phy: the PHY to setup
1186 * @mac: the MAC to setup
1187 * @lc: the requested link configuration
1188 *
1189 * Set up a port's MAC and PHY according to a desired link configuration.
1190 * - If the PHY can auto-negotiate first decide what to advertise, then
1191 * enable/disable auto-negotiation as desired, and reset.
1192 * - If the PHY does not auto-negotiate just reset it.
1193 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1194 * otherwise do it later based on the outcome of auto-negotiation.
1195 */
1196int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1197 struct link_config *lc)
1198{
1199 struct fw_port_cmd c;
1200 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1201
1202 lc->link_ok = 0;
1203 if (lc->requested_fc & PAUSE_RX)
1204 fc |= FW_PORT_CAP_FC_RX;
1205 if (lc->requested_fc & PAUSE_TX)
1206 fc |= FW_PORT_CAP_FC_TX;
1207
1208 memset(&c, 0, sizeof(c));
1209 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1210 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1211 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1212 FW_LEN16(c));
1213
1214 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1215 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1216 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1217 } else if (lc->autoneg == AUTONEG_DISABLE) {
1218 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1219 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1220 } else
1221 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1222
1223 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1224}
1225
1226/**
1227 * t4_restart_aneg - restart autonegotiation
1228 * @adap: the adapter
1229 * @mbox: mbox to use for the FW command
1230 * @port: the port id
1231 *
1232 * Restarts autonegotiation for the selected port.
1233 */
1234int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1235{
1236 struct fw_port_cmd c;
1237
1238 memset(&c, 0, sizeof(c));
1239 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1240 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1241 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1242 FW_LEN16(c));
1243 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1244 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1245}
1246
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301247typedef void (*int_handler_t)(struct adapter *adap);
1248
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001249struct intr_info {
1250 unsigned int mask; /* bits to check in interrupt status */
1251 const char *msg; /* message to print or NULL */
1252 short stat_idx; /* stat counter to increment or -1 */
1253 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301254 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001255};
1256
1257/**
1258 * t4_handle_intr_status - table driven interrupt handler
1259 * @adapter: the adapter that generated the interrupt
1260 * @reg: the interrupt status register to process
1261 * @acts: table of interrupt actions
1262 *
1263 * A table driven interrupt handler that applies a set of masks to an
1264 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001265 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001266 * optionally emitting a warning or alert message. The table is terminated
1267 * by an entry specifying mask 0. Returns the number of fatal interrupt
1268 * conditions.
1269 */
1270static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1271 const struct intr_info *acts)
1272{
1273 int fatal = 0;
1274 unsigned int mask = 0;
1275 unsigned int status = t4_read_reg(adapter, reg);
1276
1277 for ( ; acts->mask; ++acts) {
1278 if (!(status & acts->mask))
1279 continue;
1280 if (acts->fatal) {
1281 fatal++;
1282 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1283 status & acts->mask);
1284 } else if (acts->msg && printk_ratelimit())
1285 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1286 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301287 if (acts->int_handler)
1288 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001289 mask |= acts->mask;
1290 }
1291 status &= mask;
1292 if (status) /* clear processed interrupts */
1293 t4_write_reg(adapter, reg, status);
1294 return fatal;
1295}
1296
1297/*
1298 * Interrupt handler for the PCIE module.
1299 */
1300static void pcie_intr_handler(struct adapter *adapter)
1301{
Joe Perches005b5712010-12-14 21:36:53 +00001302 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001303 { RNPP, "RXNP array parity error", -1, 1 },
1304 { RPCP, "RXPC array parity error", -1, 1 },
1305 { RCIP, "RXCIF array parity error", -1, 1 },
1306 { RCCP, "Rx completions control array parity error", -1, 1 },
1307 { RFTP, "RXFT array parity error", -1, 1 },
1308 { 0 }
1309 };
Joe Perches005b5712010-12-14 21:36:53 +00001310 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001311 { TPCP, "TXPC array parity error", -1, 1 },
1312 { TNPP, "TXNP array parity error", -1, 1 },
1313 { TFTP, "TXFT array parity error", -1, 1 },
1314 { TCAP, "TXCA array parity error", -1, 1 },
1315 { TCIP, "TXCIF array parity error", -1, 1 },
1316 { RCAP, "RXCA array parity error", -1, 1 },
1317 { OTDD, "outbound request TLP discarded", -1, 1 },
1318 { RDPE, "Rx data parity error", -1, 1 },
1319 { TDUE, "Tx uncorrectable data error", -1, 1 },
1320 { 0 }
1321 };
Joe Perches005b5712010-12-14 21:36:53 +00001322 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001323 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1324 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1325 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1326 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1327 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1328 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1329 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1330 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1331 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1332 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1333 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1334 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1335 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1336 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1337 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1338 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1339 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1340 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1341 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1342 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1343 { FIDPERR, "PCI FID parity error", -1, 1 },
1344 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1345 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1346 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1347 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1348 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1349 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1350 { PCIESINT, "PCI core secondary fault", -1, 1 },
1351 { PCIEPINT, "PCI core primary fault", -1, 1 },
1352 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1353 { 0 }
1354 };
1355
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001356 static struct intr_info t5_pcie_intr_info[] = {
1357 { MSTGRPPERR, "Master Response Read Queue parity error",
1358 -1, 1 },
1359 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1360 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1361 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1362 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1363 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1364 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1365 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1366 -1, 1 },
1367 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1368 -1, 1 },
1369 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1370 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1371 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1372 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1373 { DREQWRPERR, "PCI DMA channel write request parity error",
1374 -1, 1 },
1375 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1376 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1377 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1378 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1379 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1380 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1381 { FIDPERR, "PCI FID parity error", -1, 1 },
1382 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1383 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1384 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1385 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1386 -1, 1 },
1387 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1388 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1389 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1390 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1391 { READRSPERR, "Outbound read error", -1, 0 },
1392 { 0 }
1393 };
1394
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001395 int fat;
1396
1397 fat = t4_handle_intr_status(adapter,
1398 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1399 sysbus_intr_info) +
1400 t4_handle_intr_status(adapter,
1401 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1402 pcie_port_intr_info) +
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001403 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301404 is_t4(adapter->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001405 pcie_intr_info : t5_pcie_intr_info);
1406
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001407 if (fat)
1408 t4_fatal_err(adapter);
1409}
1410
1411/*
1412 * TP interrupt handler.
1413 */
1414static void tp_intr_handler(struct adapter *adapter)
1415{
Joe Perches005b5712010-12-14 21:36:53 +00001416 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001417 { 0x3fffffff, "TP parity error", -1, 1 },
1418 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1419 { 0 }
1420 };
1421
1422 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1423 t4_fatal_err(adapter);
1424}
1425
1426/*
1427 * SGE interrupt handler.
1428 */
1429static void sge_intr_handler(struct adapter *adapter)
1430{
1431 u64 v;
1432
Joe Perches005b5712010-12-14 21:36:53 +00001433 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001434 { ERR_CPL_EXCEED_IQE_SIZE,
1435 "SGE received CPL exceeding IQE size", -1, 1 },
1436 { ERR_INVALID_CIDX_INC,
1437 "SGE GTS CIDX increment too large", -1, 0 },
1438 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001439 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1440 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1441 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001442 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1443 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1444 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1445 0 },
1446 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1447 0 },
1448 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1449 0 },
1450 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1451 0 },
1452 { ERR_ING_CTXT_PRIO,
1453 "SGE too many priority ingress contexts", -1, 0 },
1454 { ERR_EGR_CTXT_PRIO,
1455 "SGE too many priority egress contexts", -1, 0 },
1456 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1457 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1458 { 0 }
1459 };
1460
1461 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301462 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001463 if (v) {
1464 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301465 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001466 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1467 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1468 }
1469
1470 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1471 v != 0)
1472 t4_fatal_err(adapter);
1473}
1474
1475/*
1476 * CIM interrupt handler.
1477 */
1478static void cim_intr_handler(struct adapter *adapter)
1479{
Joe Perches005b5712010-12-14 21:36:53 +00001480 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001481 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1482 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1483 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1484 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1485 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1486 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1487 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1488 { 0 }
1489 };
Joe Perches005b5712010-12-14 21:36:53 +00001490 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001491 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1492 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1493 { ILLWRINT, "CIM illegal write", -1, 1 },
1494 { ILLRDINT, "CIM illegal read", -1, 1 },
1495 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1496 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1497 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1498 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1499 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1500 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1501 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1502 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1503 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1504 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1505 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1506 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1507 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1508 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1509 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1510 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1511 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1512 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1513 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1514 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1515 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1516 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1517 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1518 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1519 { 0 }
1520 };
1521
1522 int fat;
1523
1524 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1525 cim_intr_info) +
1526 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1527 cim_upintr_info);
1528 if (fat)
1529 t4_fatal_err(adapter);
1530}
1531
1532/*
1533 * ULP RX interrupt handler.
1534 */
1535static void ulprx_intr_handler(struct adapter *adapter)
1536{
Joe Perches005b5712010-12-14 21:36:53 +00001537 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001538 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001539 { 0x7fffff, "ULPRX parity error", -1, 1 },
1540 { 0 }
1541 };
1542
1543 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1544 t4_fatal_err(adapter);
1545}
1546
1547/*
1548 * ULP TX interrupt handler.
1549 */
1550static void ulptx_intr_handler(struct adapter *adapter)
1551{
Joe Perches005b5712010-12-14 21:36:53 +00001552 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001553 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1554 0 },
1555 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1556 0 },
1557 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1558 0 },
1559 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1560 0 },
1561 { 0xfffffff, "ULPTX parity error", -1, 1 },
1562 { 0 }
1563 };
1564
1565 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1566 t4_fatal_err(adapter);
1567}
1568
1569/*
1570 * PM TX interrupt handler.
1571 */
1572static void pmtx_intr_handler(struct adapter *adapter)
1573{
Joe Perches005b5712010-12-14 21:36:53 +00001574 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001575 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1576 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1577 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1578 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1579 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1580 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1581 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1582 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1583 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1584 { 0 }
1585 };
1586
1587 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1588 t4_fatal_err(adapter);
1589}
1590
1591/*
1592 * PM RX interrupt handler.
1593 */
1594static void pmrx_intr_handler(struct adapter *adapter)
1595{
Joe Perches005b5712010-12-14 21:36:53 +00001596 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001597 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1598 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1599 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1600 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1601 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1602 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1603 { 0 }
1604 };
1605
1606 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1607 t4_fatal_err(adapter);
1608}
1609
1610/*
1611 * CPL switch interrupt handler.
1612 */
1613static void cplsw_intr_handler(struct adapter *adapter)
1614{
Joe Perches005b5712010-12-14 21:36:53 +00001615 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001616 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1617 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1618 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1619 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1620 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1621 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1622 { 0 }
1623 };
1624
1625 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1626 t4_fatal_err(adapter);
1627}
1628
1629/*
1630 * LE interrupt handler.
1631 */
1632static void le_intr_handler(struct adapter *adap)
1633{
Joe Perches005b5712010-12-14 21:36:53 +00001634 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001635 { LIPMISS, "LE LIP miss", -1, 0 },
1636 { LIP0, "LE 0 LIP error", -1, 0 },
1637 { PARITYERR, "LE parity error", -1, 1 },
1638 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1639 { REQQPARERR, "LE request queue parity error", -1, 1 },
1640 { 0 }
1641 };
1642
1643 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1644 t4_fatal_err(adap);
1645}
1646
1647/*
1648 * MPS interrupt handler.
1649 */
1650static void mps_intr_handler(struct adapter *adapter)
1651{
Joe Perches005b5712010-12-14 21:36:53 +00001652 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001653 { 0xffffff, "MPS Rx parity error", -1, 1 },
1654 { 0 }
1655 };
Joe Perches005b5712010-12-14 21:36:53 +00001656 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001657 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1658 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1659 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1660 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1661 { BUBBLE, "MPS Tx underflow", -1, 1 },
1662 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1663 { FRMERR, "MPS Tx framing error", -1, 1 },
1664 { 0 }
1665 };
Joe Perches005b5712010-12-14 21:36:53 +00001666 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001667 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1668 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1669 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1670 { 0 }
1671 };
Joe Perches005b5712010-12-14 21:36:53 +00001672 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001673 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1674 { 0 }
1675 };
Joe Perches005b5712010-12-14 21:36:53 +00001676 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001677 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1678 { 0 }
1679 };
Joe Perches005b5712010-12-14 21:36:53 +00001680 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001681 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1682 { 0 }
1683 };
Joe Perches005b5712010-12-14 21:36:53 +00001684 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001685 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1686 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1687 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1688 { 0 }
1689 };
1690
1691 int fat;
1692
1693 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1694 mps_rx_intr_info) +
1695 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1696 mps_tx_intr_info) +
1697 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1698 mps_trc_intr_info) +
1699 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1700 mps_stat_sram_intr_info) +
1701 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1702 mps_stat_tx_intr_info) +
1703 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1704 mps_stat_rx_intr_info) +
1705 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1706 mps_cls_intr_info);
1707
1708 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1709 RXINT | TXINT | STATINT);
1710 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1711 if (fat)
1712 t4_fatal_err(adapter);
1713}
1714
1715#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1716
1717/*
1718 * EDC/MC interrupt handler.
1719 */
1720static void mem_intr_handler(struct adapter *adapter, int idx)
1721{
1722 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1723
1724 unsigned int addr, cnt_addr, v;
1725
1726 if (idx <= MEM_EDC1) {
1727 addr = EDC_REG(EDC_INT_CAUSE, idx);
1728 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1729 } else {
1730 addr = MC_INT_CAUSE;
1731 cnt_addr = MC_ECC_STATUS;
1732 }
1733
1734 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1735 if (v & PERR_INT_CAUSE)
1736 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1737 name[idx]);
1738 if (v & ECC_CE_INT_CAUSE) {
1739 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1740
1741 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1742 if (printk_ratelimit())
1743 dev_warn(adapter->pdev_dev,
1744 "%u %s correctable ECC data error%s\n",
1745 cnt, name[idx], cnt > 1 ? "s" : "");
1746 }
1747 if (v & ECC_UE_INT_CAUSE)
1748 dev_alert(adapter->pdev_dev,
1749 "%s uncorrectable ECC data error\n", name[idx]);
1750
1751 t4_write_reg(adapter, addr, v);
1752 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1753 t4_fatal_err(adapter);
1754}
1755
1756/*
1757 * MA interrupt handler.
1758 */
1759static void ma_intr_handler(struct adapter *adap)
1760{
1761 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1762
1763 if (status & MEM_PERR_INT_CAUSE)
1764 dev_alert(adap->pdev_dev,
1765 "MA parity error, parity status %#x\n",
1766 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1767 if (status & MEM_WRAP_INT_CAUSE) {
1768 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1769 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1770 "client %u to address %#x\n",
1771 MEM_WRAP_CLIENT_NUM_GET(v),
1772 MEM_WRAP_ADDRESS_GET(v) << 4);
1773 }
1774 t4_write_reg(adap, MA_INT_CAUSE, status);
1775 t4_fatal_err(adap);
1776}
1777
1778/*
1779 * SMB interrupt handler.
1780 */
1781static void smb_intr_handler(struct adapter *adap)
1782{
Joe Perches005b5712010-12-14 21:36:53 +00001783 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001784 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1785 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1786 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1787 { 0 }
1788 };
1789
1790 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1791 t4_fatal_err(adap);
1792}
1793
1794/*
1795 * NC-SI interrupt handler.
1796 */
1797static void ncsi_intr_handler(struct adapter *adap)
1798{
Joe Perches005b5712010-12-14 21:36:53 +00001799 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001800 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1801 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1802 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1803 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1804 { 0 }
1805 };
1806
1807 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1808 t4_fatal_err(adap);
1809}
1810
1811/*
1812 * XGMAC interrupt handler.
1813 */
1814static void xgmac_intr_handler(struct adapter *adap, int port)
1815{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001816 u32 v, int_cause_reg;
1817
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05301818 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00001819 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1820 else
1821 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1822
1823 v = t4_read_reg(adap, int_cause_reg);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001824
1825 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1826 if (!v)
1827 return;
1828
1829 if (v & TXFIFO_PRTY_ERR)
1830 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1831 port);
1832 if (v & RXFIFO_PRTY_ERR)
1833 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1834 port);
1835 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1836 t4_fatal_err(adap);
1837}
1838
1839/*
1840 * PL interrupt handler.
1841 */
1842static void pl_intr_handler(struct adapter *adap)
1843{
Joe Perches005b5712010-12-14 21:36:53 +00001844 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001845 { FATALPERR, "T4 fatal parity error", -1, 1 },
1846 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1847 { 0 }
1848 };
1849
1850 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1851 t4_fatal_err(adap);
1852}
1853
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001854#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001855#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1856 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1857 CPL_SWITCH | SGE | ULP_TX)
1858
1859/**
1860 * t4_slow_intr_handler - control path interrupt handler
1861 * @adapter: the adapter
1862 *
1863 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1864 * The designation 'slow' is because it involves register reads, while
1865 * data interrupts typically don't involve any MMIOs.
1866 */
1867int t4_slow_intr_handler(struct adapter *adapter)
1868{
1869 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1870
1871 if (!(cause & GLBL_INTR_MASK))
1872 return 0;
1873 if (cause & CIM)
1874 cim_intr_handler(adapter);
1875 if (cause & MPS)
1876 mps_intr_handler(adapter);
1877 if (cause & NCSI)
1878 ncsi_intr_handler(adapter);
1879 if (cause & PL)
1880 pl_intr_handler(adapter);
1881 if (cause & SMB)
1882 smb_intr_handler(adapter);
1883 if (cause & XGMAC0)
1884 xgmac_intr_handler(adapter, 0);
1885 if (cause & XGMAC1)
1886 xgmac_intr_handler(adapter, 1);
1887 if (cause & XGMAC_KR0)
1888 xgmac_intr_handler(adapter, 2);
1889 if (cause & XGMAC_KR1)
1890 xgmac_intr_handler(adapter, 3);
1891 if (cause & PCIE)
1892 pcie_intr_handler(adapter);
1893 if (cause & MC)
1894 mem_intr_handler(adapter, MEM_MC);
1895 if (cause & EDC0)
1896 mem_intr_handler(adapter, MEM_EDC0);
1897 if (cause & EDC1)
1898 mem_intr_handler(adapter, MEM_EDC1);
1899 if (cause & LE)
1900 le_intr_handler(adapter);
1901 if (cause & TP)
1902 tp_intr_handler(adapter);
1903 if (cause & MA)
1904 ma_intr_handler(adapter);
1905 if (cause & PM_TX)
1906 pmtx_intr_handler(adapter);
1907 if (cause & PM_RX)
1908 pmrx_intr_handler(adapter);
1909 if (cause & ULP_RX)
1910 ulprx_intr_handler(adapter);
1911 if (cause & CPL_SWITCH)
1912 cplsw_intr_handler(adapter);
1913 if (cause & SGE)
1914 sge_intr_handler(adapter);
1915 if (cause & ULP_TX)
1916 ulptx_intr_handler(adapter);
1917
1918 /* Clear the interrupts just processed for which we are the master. */
1919 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1920 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1921 return 1;
1922}
1923
1924/**
1925 * t4_intr_enable - enable interrupts
1926 * @adapter: the adapter whose interrupts should be enabled
1927 *
1928 * Enable PF-specific interrupts for the calling function and the top-level
1929 * interrupt concentrator for global interrupts. Interrupts are already
1930 * enabled at each module, here we just enable the roots of the interrupt
1931 * hierarchies.
1932 *
1933 * Note: this function should be called only when the driver manages
1934 * non PF-specific interrupts from the various HW modules. Only one PCI
1935 * function at a time should be doing this.
1936 */
1937void t4_intr_enable(struct adapter *adapter)
1938{
1939 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1940
1941 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1942 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1943 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1944 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1945 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1946 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1947 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001948 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001949 EGRESS_SIZE_ERR);
1950 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1951 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1952}
1953
1954/**
1955 * t4_intr_disable - disable interrupts
1956 * @adapter: the adapter whose interrupts should be disabled
1957 *
1958 * Disable interrupts. We only disable the top-level interrupt
1959 * concentrators. The caller must be a PCI function managing global
1960 * interrupts.
1961 */
1962void t4_intr_disable(struct adapter *adapter)
1963{
1964 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1965
1966 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1967 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1968}
1969
1970/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001971 * hash_mac_addr - return the hash value of a MAC address
1972 * @addr: the 48-bit Ethernet MAC address
1973 *
1974 * Hashes a MAC address according to the hash function used by HW inexact
1975 * (hash) address matching.
1976 */
1977static int hash_mac_addr(const u8 *addr)
1978{
1979 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1980 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1981 a ^= b;
1982 a ^= (a >> 12);
1983 a ^= (a >> 6);
1984 return a & 0x3f;
1985}
1986
1987/**
1988 * t4_config_rss_range - configure a portion of the RSS mapping table
1989 * @adapter: the adapter
1990 * @mbox: mbox to use for the FW command
1991 * @viid: virtual interface whose RSS subtable is to be written
1992 * @start: start entry in the table to write
1993 * @n: how many table entries to write
1994 * @rspq: values for the response queue lookup table
1995 * @nrspq: number of values in @rspq
1996 *
1997 * Programs the selected part of the VI's RSS mapping table with the
1998 * provided values. If @nrspq < @n the supplied values are used repeatedly
1999 * until the full table range is populated.
2000 *
2001 * The caller must ensure the values in @rspq are in the range allowed for
2002 * @viid.
2003 */
2004int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2005 int start, int n, const u16 *rspq, unsigned int nrspq)
2006{
2007 int ret;
2008 const u16 *rsp = rspq;
2009 const u16 *rsp_end = rspq + nrspq;
2010 struct fw_rss_ind_tbl_cmd cmd;
2011
2012 memset(&cmd, 0, sizeof(cmd));
2013 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2014 FW_CMD_REQUEST | FW_CMD_WRITE |
2015 FW_RSS_IND_TBL_CMD_VIID(viid));
2016 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2017
2018 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2019 while (n > 0) {
2020 int nq = min(n, 32);
2021 __be32 *qp = &cmd.iq0_to_iq2;
2022
2023 cmd.niqid = htons(nq);
2024 cmd.startidx = htons(start);
2025
2026 start += nq;
2027 n -= nq;
2028
2029 while (nq > 0) {
2030 unsigned int v;
2031
2032 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2033 if (++rsp >= rsp_end)
2034 rsp = rspq;
2035 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2036 if (++rsp >= rsp_end)
2037 rsp = rspq;
2038 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2039 if (++rsp >= rsp_end)
2040 rsp = rspq;
2041
2042 *qp++ = htonl(v);
2043 nq -= 3;
2044 }
2045
2046 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2047 if (ret)
2048 return ret;
2049 }
2050 return 0;
2051}
2052
2053/**
2054 * t4_config_glbl_rss - configure the global RSS mode
2055 * @adapter: the adapter
2056 * @mbox: mbox to use for the FW command
2057 * @mode: global RSS mode
2058 * @flags: mode-specific flags
2059 *
2060 * Sets the global RSS mode.
2061 */
2062int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2063 unsigned int flags)
2064{
2065 struct fw_rss_glb_config_cmd c;
2066
2067 memset(&c, 0, sizeof(c));
2068 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2069 FW_CMD_REQUEST | FW_CMD_WRITE);
2070 c.retval_len16 = htonl(FW_LEN16(c));
2071 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2072 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2073 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2074 c.u.basicvirtual.mode_pkd =
2075 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2076 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2077 } else
2078 return -EINVAL;
2079 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2080}
2081
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002082/**
2083 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2084 * @adap: the adapter
2085 * @v4: holds the TCP/IP counter values
2086 * @v6: holds the TCP/IPv6 counter values
2087 *
2088 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2089 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2090 */
2091void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2092 struct tp_tcp_stats *v6)
2093{
2094 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2095
2096#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2097#define STAT(x) val[STAT_IDX(x)]
2098#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2099
2100 if (v4) {
2101 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2102 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2103 v4->tcpOutRsts = STAT(OUT_RST);
2104 v4->tcpInSegs = STAT64(IN_SEG);
2105 v4->tcpOutSegs = STAT64(OUT_SEG);
2106 v4->tcpRetransSegs = STAT64(RXT_SEG);
2107 }
2108 if (v6) {
2109 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2110 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2111 v6->tcpOutRsts = STAT(OUT_RST);
2112 v6->tcpInSegs = STAT64(IN_SEG);
2113 v6->tcpOutSegs = STAT64(OUT_SEG);
2114 v6->tcpRetransSegs = STAT64(RXT_SEG);
2115 }
2116#undef STAT64
2117#undef STAT
2118#undef STAT_IDX
2119}
2120
2121/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002122 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2123 * @adap: the adapter
2124 * @mtus: where to store the MTU values
2125 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2126 *
2127 * Reads the HW path MTU table.
2128 */
2129void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2130{
2131 u32 v;
2132 int i;
2133
2134 for (i = 0; i < NMTUS; ++i) {
2135 t4_write_reg(adap, TP_MTU_TABLE,
2136 MTUINDEX(0xff) | MTUVALUE(i));
2137 v = t4_read_reg(adap, TP_MTU_TABLE);
2138 mtus[i] = MTUVALUE_GET(v);
2139 if (mtu_log)
2140 mtu_log[i] = MTUWIDTH_GET(v);
2141 }
2142}
2143
2144/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002145 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2146 * @adap: the adapter
2147 * @addr: the indirect TP register address
2148 * @mask: specifies the field within the register to modify
2149 * @val: new value for the field
2150 *
2151 * Sets a field of an indirect TP register to the given value.
2152 */
2153void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2154 unsigned int mask, unsigned int val)
2155{
2156 t4_write_reg(adap, TP_PIO_ADDR, addr);
2157 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2158 t4_write_reg(adap, TP_PIO_DATA, val);
2159}
2160
2161/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002162 * init_cong_ctrl - initialize congestion control parameters
2163 * @a: the alpha values for congestion control
2164 * @b: the beta values for congestion control
2165 *
2166 * Initialize the congestion control parameters.
2167 */
Bill Pemberton91744942012-12-03 09:23:02 -05002168static void init_cong_ctrl(unsigned short *a, unsigned short *b)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002169{
2170 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2171 a[9] = 2;
2172 a[10] = 3;
2173 a[11] = 4;
2174 a[12] = 5;
2175 a[13] = 6;
2176 a[14] = 7;
2177 a[15] = 8;
2178 a[16] = 9;
2179 a[17] = 10;
2180 a[18] = 14;
2181 a[19] = 17;
2182 a[20] = 21;
2183 a[21] = 25;
2184 a[22] = 30;
2185 a[23] = 35;
2186 a[24] = 45;
2187 a[25] = 60;
2188 a[26] = 80;
2189 a[27] = 100;
2190 a[28] = 200;
2191 a[29] = 300;
2192 a[30] = 400;
2193 a[31] = 500;
2194
2195 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2196 b[9] = b[10] = 1;
2197 b[11] = b[12] = 2;
2198 b[13] = b[14] = b[15] = b[16] = 3;
2199 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2200 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2201 b[28] = b[29] = 6;
2202 b[30] = b[31] = 7;
2203}
2204
2205/* The minimum additive increment value for the congestion control table */
2206#define CC_MIN_INCR 2U
2207
2208/**
2209 * t4_load_mtus - write the MTU and congestion control HW tables
2210 * @adap: the adapter
2211 * @mtus: the values for the MTU table
2212 * @alpha: the values for the congestion control alpha parameter
2213 * @beta: the values for the congestion control beta parameter
2214 *
2215 * Write the HW MTU table with the supplied MTUs and the high-speed
2216 * congestion control table with the supplied alpha, beta, and MTUs.
2217 * We write the two tables together because the additive increments
2218 * depend on the MTUs.
2219 */
2220void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2221 const unsigned short *alpha, const unsigned short *beta)
2222{
2223 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2224 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2225 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2226 28672, 40960, 57344, 81920, 114688, 163840, 229376
2227 };
2228
2229 unsigned int i, w;
2230
2231 for (i = 0; i < NMTUS; ++i) {
2232 unsigned int mtu = mtus[i];
2233 unsigned int log2 = fls(mtu);
2234
2235 if (!(mtu & ((1 << log2) >> 2))) /* round */
2236 log2--;
2237 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2238 MTUWIDTH(log2) | MTUVALUE(mtu));
2239
2240 for (w = 0; w < NCCTRL_WIN; ++w) {
2241 unsigned int inc;
2242
2243 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2244 CC_MIN_INCR);
2245
2246 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2247 (w << 16) | (beta[w] << 13) | inc);
2248 }
2249 }
2250}
2251
2252/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002253 * get_mps_bg_map - return the buffer groups associated with a port
2254 * @adap: the adapter
2255 * @idx: the port index
2256 *
2257 * Returns a bitmap indicating which MPS buffer groups are associated
2258 * with the given port. Bit i is set if buffer group i is used by the
2259 * port.
2260 */
2261static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2262{
2263 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2264
2265 if (n == 0)
2266 return idx == 0 ? 0xf : 0;
2267 if (n == 1)
2268 return idx < 2 ? (3 << (2 * idx)) : 0;
2269 return 1 << idx;
2270}
2271
2272/**
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05302273 * t4_get_port_type_description - return Port Type string description
2274 * @port_type: firmware Port Type enumeration
2275 */
2276const char *t4_get_port_type_description(enum fw_port_type port_type)
2277{
2278 static const char *const port_type_description[] = {
2279 "R XFI",
2280 "R XAUI",
2281 "T SGMII",
2282 "T XFI",
2283 "T XAUI",
2284 "KX4",
2285 "CX4",
2286 "KX",
2287 "KR",
2288 "R SFP+",
2289 "KR/KX",
2290 "KR/KX/KX4",
2291 "R QSFP_10G",
2292 "",
2293 "R QSFP",
2294 "R BP40_BA",
2295 };
2296
2297 if (port_type < ARRAY_SIZE(port_type_description))
2298 return port_type_description[port_type];
2299 return "UNKNOWN";
2300}
2301
2302/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002303 * t4_get_port_stats - collect port statistics
2304 * @adap: the adapter
2305 * @idx: the port index
2306 * @p: the stats structure to fill
2307 *
2308 * Collect statistics related to the given port from HW.
2309 */
2310void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2311{
2312 u32 bgmap = get_mps_bg_map(adap, idx);
2313
2314#define GET_STAT(name) \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002315 t4_read_reg64(adap, \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302316 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002317 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002318#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2319
2320 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2321 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2322 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2323 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2324 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2325 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2326 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2327 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2328 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2329 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2330 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2331 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2332 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2333 p->tx_drop = GET_STAT(TX_PORT_DROP);
2334 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2335 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2336 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2337 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2338 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2339 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2340 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2341 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2342 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2343
2344 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2345 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2346 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2347 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2348 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2349 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2350 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2351 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2352 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2353 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2354 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2355 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2356 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2357 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2358 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2359 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2360 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2361 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2362 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2363 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2364 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2365 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2366 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2367 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2368 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2369 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2370 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2371
2372 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2373 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2374 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2375 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2376 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2377 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2378 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2379 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2380
2381#undef GET_STAT
2382#undef GET_STAT_COM
2383}
2384
2385/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002386 * t4_wol_magic_enable - enable/disable magic packet WoL
2387 * @adap: the adapter
2388 * @port: the physical port index
2389 * @addr: MAC address expected in magic packets, %NULL to disable
2390 *
2391 * Enables/disables magic packet wake-on-LAN for the selected port.
2392 */
2393void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2394 const u8 *addr)
2395{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002396 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2397
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302398 if (is_t4(adap->params.chip)) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002399 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2400 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2401 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2402 } else {
2403 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2404 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2405 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2406 }
2407
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002408 if (addr) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002409 t4_write_reg(adap, mag_id_reg_l,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002410 (addr[2] << 24) | (addr[3] << 16) |
2411 (addr[4] << 8) | addr[5]);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002412 t4_write_reg(adap, mag_id_reg_h,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002413 (addr[0] << 8) | addr[1]);
2414 }
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002415 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002416 addr ? MAGICEN : 0);
2417}
2418
2419/**
2420 * t4_wol_pat_enable - enable/disable pattern-based WoL
2421 * @adap: the adapter
2422 * @port: the physical port index
2423 * @map: bitmap of which HW pattern filters to set
2424 * @mask0: byte mask for bytes 0-63 of a packet
2425 * @mask1: byte mask for bytes 64-127 of a packet
2426 * @crc: Ethernet CRC for selected bytes
2427 * @enable: enable/disable switch
2428 *
2429 * Sets the pattern filters indicated in @map to mask out the bytes
2430 * specified in @mask0/@mask1 in received packets and compare the CRC of
2431 * the resulting packet against @crc. If @enable is %true pattern-based
2432 * WoL is enabled, otherwise disabled.
2433 */
2434int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2435 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2436{
2437 int i;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002438 u32 port_cfg_reg;
2439
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302440 if (is_t4(adap->params.chip))
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002441 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2442 else
2443 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002444
2445 if (!enable) {
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002446 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002447 return 0;
2448 }
2449 if (map > 0xff)
2450 return -EINVAL;
2451
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002452#define EPIO_REG(name) \
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05302453 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
Santosh Rastapur0a57a532013-03-14 05:08:49 +00002454 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002455
2456 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2457 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2458 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2459
2460 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2461 if (!(map & 1))
2462 continue;
2463
2464 /* write byte masks */
2465 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2466 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2467 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302468 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002469 return -ETIMEDOUT;
2470
2471 /* write CRC */
2472 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2473 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2474 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302475 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002476 return -ETIMEDOUT;
2477 }
2478#undef EPIO_REG
2479
2480 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2481 return 0;
2482}
2483
Vipul Pandyaf2b7e782012-12-10 09:30:52 +00002484/* t4_mk_filtdelwr - create a delete filter WR
2485 * @ftid: the filter ID
2486 * @wr: the filter work request to populate
2487 * @qid: ingress queue to receive the delete notification
2488 *
2489 * Creates a filter work request to delete the supplied filter. If @qid is
2490 * negative the delete notification is suppressed.
2491 */
2492void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2493{
2494 memset(wr, 0, sizeof(*wr));
2495 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2496 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2497 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2498 V_FW_FILTER_WR_NOREPLY(qid < 0));
2499 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2500 if (qid >= 0)
2501 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2502}
2503
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002504#define INIT_CMD(var, cmd, rd_wr) do { \
2505 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2506 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2507 (var).retval_len16 = htonl(FW_LEN16(var)); \
2508} while (0)
2509
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302510int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2511 u32 addr, u32 val)
2512{
2513 struct fw_ldst_cmd c;
2514
2515 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002516 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2517 FW_CMD_WRITE |
2518 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302519 c.cycles_to_len16 = htonl(FW_LEN16(c));
2520 c.u.addrval.addr = htonl(addr);
2521 c.u.addrval.val = htonl(val);
2522
2523 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2524}
2525
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002526/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002527 * t4_mdio_rd - read a PHY register through MDIO
2528 * @adap: the adapter
2529 * @mbox: mailbox to use for the FW command
2530 * @phy_addr: the PHY address
2531 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2532 * @reg: the register to read
2533 * @valp: where to store the value
2534 *
2535 * Issues a FW command through the given mailbox to read a PHY register.
2536 */
2537int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2538 unsigned int mmd, unsigned int reg, u16 *valp)
2539{
2540 int ret;
2541 struct fw_ldst_cmd c;
2542
2543 memset(&c, 0, sizeof(c));
2544 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2545 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2546 c.cycles_to_len16 = htonl(FW_LEN16(c));
2547 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2548 FW_LDST_CMD_MMD(mmd));
2549 c.u.mdio.raddr = htons(reg);
2550
2551 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2552 if (ret == 0)
2553 *valp = ntohs(c.u.mdio.rval);
2554 return ret;
2555}
2556
2557/**
2558 * t4_mdio_wr - write a PHY register through MDIO
2559 * @adap: the adapter
2560 * @mbox: mailbox to use for the FW command
2561 * @phy_addr: the PHY address
2562 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2563 * @reg: the register to write
2564 * @valp: value to write
2565 *
2566 * Issues a FW command through the given mailbox to write a PHY register.
2567 */
2568int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2569 unsigned int mmd, unsigned int reg, u16 val)
2570{
2571 struct fw_ldst_cmd c;
2572
2573 memset(&c, 0, sizeof(c));
2574 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2575 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2576 c.cycles_to_len16 = htonl(FW_LEN16(c));
2577 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2578 FW_LDST_CMD_MMD(mmd));
2579 c.u.mdio.raddr = htons(reg);
2580 c.u.mdio.rval = htons(val);
2581
2582 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2583}
2584
2585/**
Kumar Sanghvi68bce1922014-03-13 20:50:47 +05302586 * t4_sge_decode_idma_state - decode the idma state
2587 * @adap: the adapter
2588 * @state: the state idma is stuck in
2589 */
2590void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2591{
2592 static const char * const t4_decode[] = {
2593 "IDMA_IDLE",
2594 "IDMA_PUSH_MORE_CPL_FIFO",
2595 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2596 "Not used",
2597 "IDMA_PHYSADDR_SEND_PCIEHDR",
2598 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2599 "IDMA_PHYSADDR_SEND_PAYLOAD",
2600 "IDMA_SEND_FIFO_TO_IMSG",
2601 "IDMA_FL_REQ_DATA_FL_PREP",
2602 "IDMA_FL_REQ_DATA_FL",
2603 "IDMA_FL_DROP",
2604 "IDMA_FL_H_REQ_HEADER_FL",
2605 "IDMA_FL_H_SEND_PCIEHDR",
2606 "IDMA_FL_H_PUSH_CPL_FIFO",
2607 "IDMA_FL_H_SEND_CPL",
2608 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2609 "IDMA_FL_H_SEND_IP_HDR",
2610 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2611 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2612 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2613 "IDMA_FL_D_SEND_PCIEHDR",
2614 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2615 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2616 "IDMA_FL_SEND_PCIEHDR",
2617 "IDMA_FL_PUSH_CPL_FIFO",
2618 "IDMA_FL_SEND_CPL",
2619 "IDMA_FL_SEND_PAYLOAD_FIRST",
2620 "IDMA_FL_SEND_PAYLOAD",
2621 "IDMA_FL_REQ_NEXT_DATA_FL",
2622 "IDMA_FL_SEND_NEXT_PCIEHDR",
2623 "IDMA_FL_SEND_PADDING",
2624 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2625 "IDMA_FL_SEND_FIFO_TO_IMSG",
2626 "IDMA_FL_REQ_DATAFL_DONE",
2627 "IDMA_FL_REQ_HEADERFL_DONE",
2628 };
2629 static const char * const t5_decode[] = {
2630 "IDMA_IDLE",
2631 "IDMA_ALMOST_IDLE",
2632 "IDMA_PUSH_MORE_CPL_FIFO",
2633 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2634 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2635 "IDMA_PHYSADDR_SEND_PCIEHDR",
2636 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2637 "IDMA_PHYSADDR_SEND_PAYLOAD",
2638 "IDMA_SEND_FIFO_TO_IMSG",
2639 "IDMA_FL_REQ_DATA_FL",
2640 "IDMA_FL_DROP",
2641 "IDMA_FL_DROP_SEND_INC",
2642 "IDMA_FL_H_REQ_HEADER_FL",
2643 "IDMA_FL_H_SEND_PCIEHDR",
2644 "IDMA_FL_H_PUSH_CPL_FIFO",
2645 "IDMA_FL_H_SEND_CPL",
2646 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2647 "IDMA_FL_H_SEND_IP_HDR",
2648 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2649 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2650 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2651 "IDMA_FL_D_SEND_PCIEHDR",
2652 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2653 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2654 "IDMA_FL_SEND_PCIEHDR",
2655 "IDMA_FL_PUSH_CPL_FIFO",
2656 "IDMA_FL_SEND_CPL",
2657 "IDMA_FL_SEND_PAYLOAD_FIRST",
2658 "IDMA_FL_SEND_PAYLOAD",
2659 "IDMA_FL_REQ_NEXT_DATA_FL",
2660 "IDMA_FL_SEND_NEXT_PCIEHDR",
2661 "IDMA_FL_SEND_PADDING",
2662 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2663 };
2664 static const u32 sge_regs[] = {
2665 SGE_DEBUG_DATA_LOW_INDEX_2,
2666 SGE_DEBUG_DATA_LOW_INDEX_3,
2667 SGE_DEBUG_DATA_HIGH_INDEX_10,
2668 };
2669 const char **sge_idma_decode;
2670 int sge_idma_decode_nstates;
2671 int i;
2672
2673 if (is_t4(adapter->params.chip)) {
2674 sge_idma_decode = (const char **)t4_decode;
2675 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2676 } else {
2677 sge_idma_decode = (const char **)t5_decode;
2678 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2679 }
2680
2681 if (state < sge_idma_decode_nstates)
2682 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2683 else
2684 CH_WARN(adapter, "idma state %d unknown\n", state);
2685
2686 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2687 CH_WARN(adapter, "SGE register %#x value %#x\n",
2688 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2689}
2690
2691/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002692 * t4_fw_hello - establish communication with FW
2693 * @adap: the adapter
2694 * @mbox: mailbox to use for the FW command
2695 * @evt_mbox: mailbox to receive async FW events
2696 * @master: specifies the caller's willingness to be the device master
2697 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002698 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002699 * Issues a command to establish communication with FW. Returns either
2700 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002701 */
2702int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2703 enum dev_master master, enum dev_state *state)
2704{
2705 int ret;
2706 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002707 u32 v;
2708 unsigned int master_mbox;
2709 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002710
Vipul Pandya636f9d32012-09-26 02:39:39 +00002711retry:
2712 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002713 INIT_CMD(c, HELLO, WRITE);
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302714 c.err_to_clearinit = htonl(
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002715 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2716 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002717 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2718 FW_HELLO_CMD_MBMASTER_MASK) |
2719 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2720 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2721 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002722
Vipul Pandya636f9d32012-09-26 02:39:39 +00002723 /*
2724 * Issue the HELLO command to the firmware. If it's not successful
2725 * but indicates that we got a "busy" or "timeout" condition, retry
2726 * the HELLO until we exhaust our retry limit.
2727 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002728 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002729 if (ret < 0) {
2730 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2731 goto retry;
2732 return ret;
2733 }
2734
Naresh Kumar Innace91a922012-11-15 22:41:17 +05302735 v = ntohl(c.err_to_clearinit);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002736 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2737 if (state) {
2738 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002739 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002740 else if (v & FW_HELLO_CMD_INIT)
2741 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002742 else
2743 *state = DEV_STATE_UNINIT;
2744 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002745
2746 /*
2747 * If we're not the Master PF then we need to wait around for the
2748 * Master PF Driver to finish setting up the adapter.
2749 *
2750 * Note that we also do this wait if we're a non-Master-capable PF and
2751 * there is no current Master PF; a Master PF may show up momentarily
2752 * and we wouldn't want to fail pointlessly. (This can happen when an
2753 * OS loads lots of different drivers rapidly at the same time). In
2754 * this case, the Master PF returned by the firmware will be
2755 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2756 */
2757 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2758 master_mbox != mbox) {
2759 int waiting = FW_CMD_HELLO_TIMEOUT;
2760
2761 /*
2762 * Wait for the firmware to either indicate an error or
2763 * initialized state. If we see either of these we bail out
2764 * and report the issue to the caller. If we exhaust the
2765 * "hello timeout" and we haven't exhausted our retries, try
2766 * again. Otherwise bail with a timeout error.
2767 */
2768 for (;;) {
2769 u32 pcie_fw;
2770
2771 msleep(50);
2772 waiting -= 50;
2773
2774 /*
2775 * If neither Error nor Initialialized are indicated
2776 * by the firmware keep waiting till we exaust our
2777 * timeout ... and then retry if we haven't exhausted
2778 * our retries ...
2779 */
2780 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2781 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2782 if (waiting <= 0) {
2783 if (retries-- > 0)
2784 goto retry;
2785
2786 return -ETIMEDOUT;
2787 }
2788 continue;
2789 }
2790
2791 /*
2792 * We either have an Error or Initialized condition
2793 * report errors preferentially.
2794 */
2795 if (state) {
2796 if (pcie_fw & FW_PCIE_FW_ERR)
2797 *state = DEV_STATE_ERR;
2798 else if (pcie_fw & FW_PCIE_FW_INIT)
2799 *state = DEV_STATE_INIT;
2800 }
2801
2802 /*
2803 * If we arrived before a Master PF was selected and
2804 * there's not a valid Master PF, grab its identity
2805 * for our caller.
2806 */
2807 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2808 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2809 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2810 break;
2811 }
2812 }
2813
2814 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002815}
2816
2817/**
2818 * t4_fw_bye - end communication with FW
2819 * @adap: the adapter
2820 * @mbox: mailbox to use for the FW command
2821 *
2822 * Issues a command to terminate communication with FW.
2823 */
2824int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2825{
2826 struct fw_bye_cmd c;
2827
Vipul Pandya0062b152012-11-06 03:37:09 +00002828 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002829 INIT_CMD(c, BYE, WRITE);
2830 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2831}
2832
2833/**
2834 * t4_init_cmd - ask FW to initialize the device
2835 * @adap: the adapter
2836 * @mbox: mailbox to use for the FW command
2837 *
2838 * Issues a command to FW to partially initialize the device. This
2839 * performs initialization that generally doesn't depend on user input.
2840 */
2841int t4_early_init(struct adapter *adap, unsigned int mbox)
2842{
2843 struct fw_initialize_cmd c;
2844
Vipul Pandya0062b152012-11-06 03:37:09 +00002845 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002846 INIT_CMD(c, INITIALIZE, WRITE);
2847 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2848}
2849
2850/**
2851 * t4_fw_reset - issue a reset to FW
2852 * @adap: the adapter
2853 * @mbox: mailbox to use for the FW command
2854 * @reset: specifies the type of reset to perform
2855 *
2856 * Issues a reset command of the specified type to FW.
2857 */
2858int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2859{
2860 struct fw_reset_cmd c;
2861
Vipul Pandya0062b152012-11-06 03:37:09 +00002862 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002863 INIT_CMD(c, RESET, WRITE);
2864 c.val = htonl(reset);
2865 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2866}
2867
2868/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002869 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2870 * @adap: the adapter
2871 * @mbox: mailbox to use for the FW RESET command (if desired)
2872 * @force: force uP into RESET even if FW RESET command fails
2873 *
2874 * Issues a RESET command to firmware (if desired) with a HALT indication
2875 * and then puts the microprocessor into RESET state. The RESET command
2876 * will only be issued if a legitimate mailbox is provided (mbox <=
2877 * FW_PCIE_FW_MASTER_MASK).
2878 *
2879 * This is generally used in order for the host to safely manipulate the
2880 * adapter without fear of conflicting with whatever the firmware might
2881 * be doing. The only way out of this state is to RESTART the firmware
2882 * ...
2883 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002884static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002885{
2886 int ret = 0;
2887
2888 /*
2889 * If a legitimate mailbox is provided, issue a RESET command
2890 * with a HALT indication.
2891 */
2892 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2893 struct fw_reset_cmd c;
2894
2895 memset(&c, 0, sizeof(c));
2896 INIT_CMD(c, RESET, WRITE);
2897 c.val = htonl(PIORST | PIORSTMODE);
2898 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2899 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2900 }
2901
2902 /*
2903 * Normally we won't complete the operation if the firmware RESET
2904 * command fails but if our caller insists we'll go ahead and put the
2905 * uP into RESET. This can be useful if the firmware is hung or even
2906 * missing ... We'll have to take the risk of putting the uP into
2907 * RESET without the cooperation of firmware in that case.
2908 *
2909 * We also force the firmware's HALT flag to be on in case we bypassed
2910 * the firmware RESET command above or we're dealing with old firmware
2911 * which doesn't have the HALT capability. This will serve as a flag
2912 * for the incoming firmware to know that it's coming out of a HALT
2913 * rather than a RESET ... if it's new enough to understand that ...
2914 */
2915 if (ret == 0 || force) {
2916 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2917 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2918 FW_PCIE_FW_HALT);
2919 }
2920
2921 /*
2922 * And we always return the result of the firmware RESET command
2923 * even when we force the uP into RESET ...
2924 */
2925 return ret;
2926}
2927
2928/**
2929 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2930 * @adap: the adapter
2931 * @reset: if we want to do a RESET to restart things
2932 *
2933 * Restart firmware previously halted by t4_fw_halt(). On successful
2934 * return the previous PF Master remains as the new PF Master and there
2935 * is no need to issue a new HELLO command, etc.
2936 *
2937 * We do this in two ways:
2938 *
2939 * 1. If we're dealing with newer firmware we'll simply want to take
2940 * the chip's microprocessor out of RESET. This will cause the
2941 * firmware to start up from its start vector. And then we'll loop
2942 * until the firmware indicates it's started again (PCIE_FW.HALT
2943 * reset to 0) or we timeout.
2944 *
2945 * 2. If we're dealing with older firmware then we'll need to RESET
2946 * the chip since older firmware won't recognize the PCIE_FW.HALT
2947 * flag and automatically RESET itself on startup.
2948 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08002949static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002950{
2951 if (reset) {
2952 /*
2953 * Since we're directing the RESET instead of the firmware
2954 * doing it automatically, we need to clear the PCIE_FW.HALT
2955 * bit.
2956 */
2957 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2958
2959 /*
2960 * If we've been given a valid mailbox, first try to get the
2961 * firmware to do the RESET. If that works, great and we can
2962 * return success. Otherwise, if we haven't been given a
2963 * valid mailbox or the RESET command failed, fall back to
2964 * hitting the chip with a hammer.
2965 */
2966 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2967 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2968 msleep(100);
2969 if (t4_fw_reset(adap, mbox,
2970 PIORST | PIORSTMODE) == 0)
2971 return 0;
2972 }
2973
2974 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2975 msleep(2000);
2976 } else {
2977 int ms;
2978
2979 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2980 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2981 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2982 return 0;
2983 msleep(100);
2984 ms += 100;
2985 }
2986 return -ETIMEDOUT;
2987 }
2988 return 0;
2989}
2990
2991/**
2992 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2993 * @adap: the adapter
2994 * @mbox: mailbox to use for the FW RESET command (if desired)
2995 * @fw_data: the firmware image to write
2996 * @size: image size
2997 * @force: force upgrade even if firmware doesn't cooperate
2998 *
2999 * Perform all of the steps necessary for upgrading an adapter's
3000 * firmware image. Normally this requires the cooperation of the
3001 * existing firmware in order to halt all existing activities
3002 * but if an invalid mailbox token is passed in we skip that step
3003 * (though we'll still put the adapter microprocessor into RESET in
3004 * that case).
3005 *
3006 * On successful return the new firmware will have been loaded and
3007 * the adapter will have been fully RESET losing all previous setup
3008 * state. On unsuccessful return the adapter may be completely hosed ...
3009 * positive errno indicates that the adapter is ~probably~ intact, a
3010 * negative errno indicates that things are looking bad ...
3011 */
stephen hemmingerde5b8672013-12-18 14:16:47 -08003012static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3013 const u8 *fw_data, unsigned int size, int force)
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00003014{
3015 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3016 int reset, ret;
3017
3018 ret = t4_fw_halt(adap, mbox, force);
3019 if (ret < 0 && !force)
3020 return ret;
3021
3022 ret = t4_load_fw(adap, fw_data, size);
3023 if (ret < 0)
3024 return ret;
3025
3026 /*
3027 * Older versions of the firmware don't understand the new
3028 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3029 * restart. So for newly loaded older firmware we'll have to do the
3030 * RESET for it so it starts up on a clean slate. We can tell if
3031 * the newly loaded firmware will handle this right by checking
3032 * its header flags to see if it advertises the capability.
3033 */
3034 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3035 return t4_fw_restart(adap, mbox, reset);
3036}
3037
Vipul Pandya636f9d32012-09-26 02:39:39 +00003038/**
3039 * t4_fixup_host_params - fix up host-dependent parameters
3040 * @adap: the adapter
3041 * @page_size: the host's Base Page Size
3042 * @cache_line_size: the host's Cache Line Size
3043 *
3044 * Various registers in T4 contain values which are dependent on the
3045 * host's Base Page and Cache Line Sizes. This function will fix all of
3046 * those registers with the appropriate values as passed in ...
3047 */
3048int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3049 unsigned int cache_line_size)
3050{
3051 unsigned int page_shift = fls(page_size) - 1;
3052 unsigned int sge_hps = page_shift - 10;
3053 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3054 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3055 unsigned int fl_align_log = fls(fl_align) - 1;
3056
3057 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3058 HOSTPAGESIZEPF0(sge_hps) |
3059 HOSTPAGESIZEPF1(sge_hps) |
3060 HOSTPAGESIZEPF2(sge_hps) |
3061 HOSTPAGESIZEPF3(sge_hps) |
3062 HOSTPAGESIZEPF4(sge_hps) |
3063 HOSTPAGESIZEPF5(sge_hps) |
3064 HOSTPAGESIZEPF6(sge_hps) |
3065 HOSTPAGESIZEPF7(sge_hps));
3066
3067 t4_set_reg_field(adap, SGE_CONTROL,
Vipul Pandya0dad9e92012-11-07 03:45:46 +00003068 INGPADBOUNDARY_MASK |
Vipul Pandya636f9d32012-09-26 02:39:39 +00003069 EGRSTATUSPAGESIZE_MASK,
3070 INGPADBOUNDARY(fl_align_log - 5) |
3071 EGRSTATUSPAGESIZE(stat_len != 64));
3072
3073 /*
3074 * Adjust various SGE Free List Host Buffer Sizes.
3075 *
3076 * This is something of a crock since we're using fixed indices into
3077 * the array which are also known by the sge.c code and the T4
3078 * Firmware Configuration File. We need to come up with a much better
3079 * approach to managing this array. For now, the first four entries
3080 * are:
3081 *
3082 * 0: Host Page Size
3083 * 1: 64KB
3084 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3085 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3086 *
3087 * For the single-MTU buffers in unpacked mode we need to include
3088 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3089 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3090 * Padding boundry. All of these are accommodated in the Factory
3091 * Default Firmware Configuration File but we need to adjust it for
3092 * this host's cache line size.
3093 */
3094 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3095 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3096 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3097 & ~(fl_align-1));
3098 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3099 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3100 & ~(fl_align-1));
3101
3102 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3103
3104 return 0;
3105}
3106
3107/**
3108 * t4_fw_initialize - ask FW to initialize the device
3109 * @adap: the adapter
3110 * @mbox: mailbox to use for the FW command
3111 *
3112 * Issues a command to FW to partially initialize the device. This
3113 * performs initialization that generally doesn't depend on user input.
3114 */
3115int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3116{
3117 struct fw_initialize_cmd c;
3118
3119 memset(&c, 0, sizeof(c));
3120 INIT_CMD(c, INITIALIZE, WRITE);
3121 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3122}
3123
3124/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003125 * t4_query_params - query FW or device parameters
3126 * @adap: the adapter
3127 * @mbox: mailbox to use for the FW command
3128 * @pf: the PF
3129 * @vf: the VF
3130 * @nparams: the number of parameters
3131 * @params: the parameter names
3132 * @val: the parameter values
3133 *
3134 * Reads the value of FW or device parameters. Up to 7 parameters can be
3135 * queried at once.
3136 */
3137int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3138 unsigned int vf, unsigned int nparams, const u32 *params,
3139 u32 *val)
3140{
3141 int i, ret;
3142 struct fw_params_cmd c;
3143 __be32 *p = &c.param[0].mnem;
3144
3145 if (nparams > 7)
3146 return -EINVAL;
3147
3148 memset(&c, 0, sizeof(c));
3149 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3150 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3151 FW_PARAMS_CMD_VFN(vf));
3152 c.retval_len16 = htonl(FW_LEN16(c));
3153 for (i = 0; i < nparams; i++, p += 2)
3154 *p = htonl(*params++);
3155
3156 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3157 if (ret == 0)
3158 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3159 *val++ = ntohl(*p);
3160 return ret;
3161}
3162
3163/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003164 * t4_set_params_nosleep - sets FW or device parameters
3165 * @adap: the adapter
3166 * @mbox: mailbox to use for the FW command
3167 * @pf: the PF
3168 * @vf: the VF
3169 * @nparams: the number of parameters
3170 * @params: the parameter names
3171 * @val: the parameter values
3172 *
3173 * Does not ever sleep
3174 * Sets the value of FW or device parameters. Up to 7 parameters can be
3175 * specified at once.
3176 */
3177int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3178 unsigned int pf, unsigned int vf,
3179 unsigned int nparams, const u32 *params,
3180 const u32 *val)
3181{
3182 struct fw_params_cmd c;
3183 __be32 *p = &c.param[0].mnem;
3184
3185 if (nparams > 7)
3186 return -EINVAL;
3187
3188 memset(&c, 0, sizeof(c));
3189 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3190 FW_CMD_REQUEST | FW_CMD_WRITE |
3191 FW_PARAMS_CMD_PFN(pf) |
3192 FW_PARAMS_CMD_VFN(vf));
3193 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3194
3195 while (nparams--) {
3196 *p++ = cpu_to_be32(*params++);
3197 *p++ = cpu_to_be32(*val++);
3198 }
3199
3200 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3201}
3202
3203/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003204 * t4_set_params - sets FW or device parameters
3205 * @adap: the adapter
3206 * @mbox: mailbox to use for the FW command
3207 * @pf: the PF
3208 * @vf: the VF
3209 * @nparams: the number of parameters
3210 * @params: the parameter names
3211 * @val: the parameter values
3212 *
3213 * Sets the value of FW or device parameters. Up to 7 parameters can be
3214 * specified at once.
3215 */
3216int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3217 unsigned int vf, unsigned int nparams, const u32 *params,
3218 const u32 *val)
3219{
3220 struct fw_params_cmd c;
3221 __be32 *p = &c.param[0].mnem;
3222
3223 if (nparams > 7)
3224 return -EINVAL;
3225
3226 memset(&c, 0, sizeof(c));
3227 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3228 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3229 FW_PARAMS_CMD_VFN(vf));
3230 c.retval_len16 = htonl(FW_LEN16(c));
3231 while (nparams--) {
3232 *p++ = htonl(*params++);
3233 *p++ = htonl(*val++);
3234 }
3235
3236 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3237}
3238
3239/**
3240 * t4_cfg_pfvf - configure PF/VF resource limits
3241 * @adap: the adapter
3242 * @mbox: mailbox to use for the FW command
3243 * @pf: the PF being configured
3244 * @vf: the VF being configured
3245 * @txq: the max number of egress queues
3246 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3247 * @rxqi: the max number of interrupt-capable ingress queues
3248 * @rxq: the max number of interruptless ingress queues
3249 * @tc: the PCI traffic class
3250 * @vi: the max number of virtual interfaces
3251 * @cmask: the channel access rights mask for the PF/VF
3252 * @pmask: the port access rights mask for the PF/VF
3253 * @nexact: the maximum number of exact MPS filters
3254 * @rcaps: read capabilities
3255 * @wxcaps: write/execute capabilities
3256 *
3257 * Configures resource limits and capabilities for a physical or virtual
3258 * function.
3259 */
3260int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3261 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3262 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3263 unsigned int vi, unsigned int cmask, unsigned int pmask,
3264 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3265{
3266 struct fw_pfvf_cmd c;
3267
3268 memset(&c, 0, sizeof(c));
3269 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3270 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3271 FW_PFVF_CMD_VFN(vf));
3272 c.retval_len16 = htonl(FW_LEN16(c));
3273 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3274 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00003275 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003276 FW_PFVF_CMD_PMASK(pmask) |
3277 FW_PFVF_CMD_NEQ(txq));
3278 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3279 FW_PFVF_CMD_NEXACTF(nexact));
3280 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3281 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3282 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3283 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3284}
3285
3286/**
3287 * t4_alloc_vi - allocate a virtual interface
3288 * @adap: the adapter
3289 * @mbox: mailbox to use for the FW command
3290 * @port: physical port associated with the VI
3291 * @pf: the PF owning the VI
3292 * @vf: the VF owning the VI
3293 * @nmac: number of MAC addresses needed (1 to 5)
3294 * @mac: the MAC addresses of the VI
3295 * @rss_size: size of RSS table slice associated with this VI
3296 *
3297 * Allocates a virtual interface for the given physical port. If @mac is
3298 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3299 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3300 * stored consecutively so the space needed is @nmac * 6 bytes.
3301 * Returns a negative error number or the non-negative VI id.
3302 */
3303int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3304 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3305 unsigned int *rss_size)
3306{
3307 int ret;
3308 struct fw_vi_cmd c;
3309
3310 memset(&c, 0, sizeof(c));
3311 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3312 FW_CMD_WRITE | FW_CMD_EXEC |
3313 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3314 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3315 c.portid_pkd = FW_VI_CMD_PORTID(port);
3316 c.nmac = nmac - 1;
3317
3318 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3319 if (ret)
3320 return ret;
3321
3322 if (mac) {
3323 memcpy(mac, c.mac, sizeof(c.mac));
3324 switch (nmac) {
3325 case 5:
3326 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3327 case 4:
3328 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3329 case 3:
3330 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3331 case 2:
3332 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3333 }
3334 }
3335 if (rss_size)
3336 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003337 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003338}
3339
3340/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003341 * t4_set_rxmode - set Rx properties of a virtual interface
3342 * @adap: the adapter
3343 * @mbox: mailbox to use for the FW command
3344 * @viid: the VI id
3345 * @mtu: the new MTU or -1
3346 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3347 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3348 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003349 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003350 * @sleep_ok: if true we may sleep while awaiting command completion
3351 *
3352 * Sets Rx properties of a virtual interface.
3353 */
3354int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003355 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3356 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003357{
3358 struct fw_vi_rxmode_cmd c;
3359
3360 /* convert to FW values */
3361 if (mtu < 0)
3362 mtu = FW_RXMODE_MTU_NO_CHG;
3363 if (promisc < 0)
3364 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3365 if (all_multi < 0)
3366 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3367 if (bcast < 0)
3368 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003369 if (vlanex < 0)
3370 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003371
3372 memset(&c, 0, sizeof(c));
3373 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3374 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3375 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003376 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3377 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3378 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3379 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3380 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003381 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3382}
3383
3384/**
3385 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3386 * @adap: the adapter
3387 * @mbox: mailbox to use for the FW command
3388 * @viid: the VI id
3389 * @free: if true any existing filters for this VI id are first removed
3390 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3391 * @addr: the MAC address(es)
3392 * @idx: where to store the index of each allocated filter
3393 * @hash: pointer to hash address filter bitmap
3394 * @sleep_ok: call is allowed to sleep
3395 *
3396 * Allocates an exact-match filter for each of the supplied addresses and
3397 * sets it to the corresponding address. If @idx is not %NULL it should
3398 * have at least @naddr entries, each of which will be set to the index of
3399 * the filter allocated for the corresponding MAC address. If a filter
3400 * could not be allocated for an address its index is set to 0xffff.
3401 * If @hash is not %NULL addresses that fail to allocate an exact filter
3402 * are hashed and update the hash filter bitmap pointed at by @hash.
3403 *
3404 * Returns a negative error number or the number of filters allocated.
3405 */
3406int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3407 unsigned int viid, bool free, unsigned int naddr,
3408 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3409{
3410 int i, ret;
3411 struct fw_vi_mac_cmd c;
3412 struct fw_vi_mac_exact *p;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303413 unsigned int max_naddr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003414 NUM_MPS_CLS_SRAM_L_INSTANCES :
3415 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003416
3417 if (naddr > 7)
3418 return -EINVAL;
3419
3420 memset(&c, 0, sizeof(c));
3421 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3422 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3423 FW_VI_MAC_CMD_VIID(viid));
3424 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3425 FW_CMD_LEN16((naddr + 2) / 2));
3426
3427 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3428 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3429 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3430 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3431 }
3432
3433 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3434 if (ret)
3435 return ret;
3436
3437 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3438 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3439
3440 if (idx)
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003441 idx[i] = index >= max_naddr ? 0xffff : index;
3442 if (index < max_naddr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003443 ret++;
3444 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003445 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003446 }
3447 return ret;
3448}
3449
3450/**
3451 * t4_change_mac - modifies the exact-match filter for a MAC address
3452 * @adap: the adapter
3453 * @mbox: mailbox to use for the FW command
3454 * @viid: the VI id
3455 * @idx: index of existing filter for old value of MAC address, or -1
3456 * @addr: the new MAC address value
3457 * @persist: whether a new MAC allocation should be persistent
3458 * @add_smt: if true also add the address to the HW SMT
3459 *
3460 * Modifies an exact-match filter and sets it to the new MAC address.
3461 * Note that in general it is not possible to modify the value of a given
3462 * filter so the generic way to modify an address filter is to free the one
3463 * being used by the old address value and allocate a new filter for the
3464 * new address value. @idx can be -1 if the address is a new addition.
3465 *
3466 * Returns a negative error number or the index of the filter with the new
3467 * MAC value.
3468 */
3469int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3470 int idx, const u8 *addr, bool persist, bool add_smt)
3471{
3472 int ret, mode;
3473 struct fw_vi_mac_cmd c;
3474 struct fw_vi_mac_exact *p = c.u.exact;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303475 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003476 NUM_MPS_CLS_SRAM_L_INSTANCES :
3477 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003478
3479 if (idx < 0) /* new allocation */
3480 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3481 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3482
3483 memset(&c, 0, sizeof(c));
3484 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3485 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3486 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3487 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3488 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3489 FW_VI_MAC_CMD_IDX(idx));
3490 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3491
3492 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3493 if (ret == 0) {
3494 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003495 if (ret >= max_mac_addr)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003496 ret = -ENOMEM;
3497 }
3498 return ret;
3499}
3500
3501/**
3502 * t4_set_addr_hash - program the MAC inexact-match hash filter
3503 * @adap: the adapter
3504 * @mbox: mailbox to use for the FW command
3505 * @viid: the VI id
3506 * @ucast: whether the hash filter should also match unicast addresses
3507 * @vec: the value to be written to the hash filter
3508 * @sleep_ok: call is allowed to sleep
3509 *
3510 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3511 */
3512int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3513 bool ucast, u64 vec, bool sleep_ok)
3514{
3515 struct fw_vi_mac_cmd c;
3516
3517 memset(&c, 0, sizeof(c));
3518 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3519 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3520 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3521 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3522 FW_CMD_LEN16(1));
3523 c.u.hash.hashvec = cpu_to_be64(vec);
3524 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3525}
3526
3527/**
Anish Bhatt688848b2014-06-19 21:37:13 -07003528 * t4_enable_vi_params - enable/disable a virtual interface
3529 * @adap: the adapter
3530 * @mbox: mailbox to use for the FW command
3531 * @viid: the VI id
3532 * @rx_en: 1=enable Rx, 0=disable Rx
3533 * @tx_en: 1=enable Tx, 0=disable Tx
3534 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3535 *
3536 * Enables/disables a virtual interface. Note that setting DCB Enable
3537 * only makes sense when enabling a Virtual Interface ...
3538 */
3539int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3540 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3541{
3542 struct fw_vi_enable_cmd c;
3543
3544 memset(&c, 0, sizeof(c));
3545 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3546 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3547
3548 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3549 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3550 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3551 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3552}
3553
3554/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003555 * t4_enable_vi - enable/disable a virtual interface
3556 * @adap: the adapter
3557 * @mbox: mailbox to use for the FW command
3558 * @viid: the VI id
3559 * @rx_en: 1=enable Rx, 0=disable Rx
3560 * @tx_en: 1=enable Tx, 0=disable Tx
3561 *
3562 * Enables/disables a virtual interface.
3563 */
3564int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3565 bool rx_en, bool tx_en)
3566{
Anish Bhatt688848b2014-06-19 21:37:13 -07003567 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003568}
3569
3570/**
3571 * t4_identify_port - identify a VI's port by blinking its LED
3572 * @adap: the adapter
3573 * @mbox: mailbox to use for the FW command
3574 * @viid: the VI id
3575 * @nblinks: how many times to blink LED at 2.5 Hz
3576 *
3577 * Identifies a VI's port by blinking its LED.
3578 */
3579int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3580 unsigned int nblinks)
3581{
3582 struct fw_vi_enable_cmd c;
3583
Vipul Pandya0062b152012-11-06 03:37:09 +00003584 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003585 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3586 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3587 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3588 c.blinkdur = htons(nblinks);
3589 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3590}
3591
3592/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003593 * t4_iq_free - free an ingress queue and its FLs
3594 * @adap: the adapter
3595 * @mbox: mailbox to use for the FW command
3596 * @pf: the PF owning the queues
3597 * @vf: the VF owning the queues
3598 * @iqtype: the ingress queue type
3599 * @iqid: ingress queue id
3600 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3601 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3602 *
3603 * Frees an ingress queue and its associated FLs, if any.
3604 */
3605int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3606 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3607 unsigned int fl0id, unsigned int fl1id)
3608{
3609 struct fw_iq_cmd c;
3610
3611 memset(&c, 0, sizeof(c));
3612 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3613 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3614 FW_IQ_CMD_VFN(vf));
3615 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3616 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3617 c.iqid = htons(iqid);
3618 c.fl0id = htons(fl0id);
3619 c.fl1id = htons(fl1id);
3620 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3621}
3622
3623/**
3624 * t4_eth_eq_free - free an Ethernet egress queue
3625 * @adap: the adapter
3626 * @mbox: mailbox to use for the FW command
3627 * @pf: the PF owning the queue
3628 * @vf: the VF owning the queue
3629 * @eqid: egress queue id
3630 *
3631 * Frees an Ethernet egress queue.
3632 */
3633int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3634 unsigned int vf, unsigned int eqid)
3635{
3636 struct fw_eq_eth_cmd c;
3637
3638 memset(&c, 0, sizeof(c));
3639 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3640 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3641 FW_EQ_ETH_CMD_VFN(vf));
3642 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3643 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3644 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3645}
3646
3647/**
3648 * t4_ctrl_eq_free - free a control egress queue
3649 * @adap: the adapter
3650 * @mbox: mailbox to use for the FW command
3651 * @pf: the PF owning the queue
3652 * @vf: the VF owning the queue
3653 * @eqid: egress queue id
3654 *
3655 * Frees a control egress queue.
3656 */
3657int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3658 unsigned int vf, unsigned int eqid)
3659{
3660 struct fw_eq_ctrl_cmd c;
3661
3662 memset(&c, 0, sizeof(c));
3663 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3664 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3665 FW_EQ_CTRL_CMD_VFN(vf));
3666 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3667 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3668 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3669}
3670
3671/**
3672 * t4_ofld_eq_free - free an offload egress queue
3673 * @adap: the adapter
3674 * @mbox: mailbox to use for the FW command
3675 * @pf: the PF owning the queue
3676 * @vf: the VF owning the queue
3677 * @eqid: egress queue id
3678 *
3679 * Frees a control egress queue.
3680 */
3681int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3682 unsigned int vf, unsigned int eqid)
3683{
3684 struct fw_eq_ofld_cmd c;
3685
3686 memset(&c, 0, sizeof(c));
3687 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3688 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3689 FW_EQ_OFLD_CMD_VFN(vf));
3690 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3691 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3692 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3693}
3694
3695/**
3696 * t4_handle_fw_rpl - process a FW reply message
3697 * @adap: the adapter
3698 * @rpl: start of the FW message
3699 *
3700 * Processes a FW message, such as link state change messages.
3701 */
3702int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3703{
3704 u8 opcode = *(const u8 *)rpl;
3705
3706 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3707 int speed = 0, fc = 0;
3708 const struct fw_port_cmd *p = (void *)rpl;
3709 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3710 int port = adap->chan_map[chan];
3711 struct port_info *pi = adap2pinfo(adap, port);
3712 struct link_config *lc = &pi->link_cfg;
3713 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3714 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3715 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3716
3717 if (stat & FW_PORT_CMD_RXPAUSE)
3718 fc |= PAUSE_RX;
3719 if (stat & FW_PORT_CMD_TXPAUSE)
3720 fc |= PAUSE_TX;
3721 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003722 speed = 100;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003723 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003724 speed = 1000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003725 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003726 speed = 10000;
Kumar Sanghvi72aca4b2014-02-18 17:56:08 +05303727 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
Ben Hutchingse8b39012014-02-23 00:03:24 +00003728 speed = 40000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003729
3730 if (link_ok != lc->link_ok || speed != lc->speed ||
3731 fc != lc->fc) { /* something changed */
3732 lc->link_ok = link_ok;
3733 lc->speed = speed;
3734 lc->fc = fc;
3735 t4_os_link_changed(adap, port, link_ok);
3736 }
3737 if (mod != pi->mod_type) {
3738 pi->mod_type = mod;
3739 t4_os_portmod_changed(adap, port);
3740 }
3741 }
3742 return 0;
3743}
3744
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003745static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003746{
3747 u16 val;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003748
Jiang Liue5c8ae52012-08-20 13:53:19 -06003749 if (pci_is_pcie(adapter->pdev)) {
3750 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003751 p->speed = val & PCI_EXP_LNKSTA_CLS;
3752 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3753 }
3754}
3755
3756/**
3757 * init_link_config - initialize a link's SW state
3758 * @lc: structure holding the link state
3759 * @caps: link capabilities
3760 *
3761 * Initializes the SW state maintained for each link, including the link's
3762 * capabilities and default speed/flow-control/autonegotiation settings.
3763 */
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003764static void init_link_config(struct link_config *lc, unsigned int caps)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003765{
3766 lc->supported = caps;
3767 lc->requested_speed = 0;
3768 lc->speed = 0;
3769 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3770 if (lc->supported & FW_PORT_CAP_ANEG) {
3771 lc->advertising = lc->supported & ADVERT_MASK;
3772 lc->autoneg = AUTONEG_ENABLE;
3773 lc->requested_fc |= PAUSE_AUTONEG;
3774 } else {
3775 lc->advertising = 0;
3776 lc->autoneg = AUTONEG_DISABLE;
3777 }
3778}
3779
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003780int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003781{
3782 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3783 return 0;
3784 msleep(500);
3785 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3786}
3787
Bill Pemberton91744942012-12-03 09:23:02 -05003788static int get_flash_params(struct adapter *adap)
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003789{
3790 int ret;
3791 u32 info;
3792
3793 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3794 if (!ret)
3795 ret = sf1_read(adap, 3, 0, 1, &info);
3796 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3797 if (ret)
3798 return ret;
3799
3800 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3801 return -EINVAL;
3802 info >>= 16; /* log2 of size */
3803 if (info >= 0x14 && info < 0x18)
3804 adap->params.sf_nsec = 1 << (info - 16);
3805 else if (info == 0x18)
3806 adap->params.sf_nsec = 64;
3807 else
3808 return -EINVAL;
3809 adap->params.sf_size = 1 << info;
3810 adap->params.sf_fw_start =
3811 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3812 return 0;
3813}
3814
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003815/**
3816 * t4_prep_adapter - prepare SW and HW for operation
3817 * @adapter: the adapter
3818 * @reset: if true perform a HW reset
3819 *
3820 * Initialize adapter SW state for the various HW modules, set initial
3821 * values for some adapter tunables, take PHYs out of reset, and
3822 * initialize the MDIO interface.
3823 */
Bill Pemberton91744942012-12-03 09:23:02 -05003824int t4_prep_adapter(struct adapter *adapter)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003825{
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003826 int ret, ver;
3827 uint16_t device_id;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303828 u32 pl_rev;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003829
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003830 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003831 if (ret < 0)
3832 return ret;
3833
3834 get_pci_mode(adapter, &adapter->params.pci);
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303835 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003836
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003837 ret = get_flash_params(adapter);
3838 if (ret < 0) {
3839 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3840 return ret;
3841 }
3842
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003843 /* Retrieve adapter's device ID
3844 */
3845 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3846 ver = device_id >> 12;
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303847 adapter->params.chip = 0;
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003848 switch (ver) {
3849 case CHELSIO_T4:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303850 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003851 break;
3852 case CHELSIO_T5:
Hariprasad Shenaid14807d2013-12-03 17:05:56 +05303853 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
Santosh Rastapur0a57a532013-03-14 05:08:49 +00003854 break;
3855 default:
3856 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3857 device_id);
3858 return -EINVAL;
3859 }
3860
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003861 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3862
3863 /*
3864 * Default port for debugging in case we can't reach FW.
3865 */
3866 adapter->params.nports = 1;
3867 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003868 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003869 return 0;
3870}
3871
Kumar Sanghvidcf7b6f2013-12-18 16:38:23 +05303872/**
3873 * t4_init_tp_params - initialize adap->params.tp
3874 * @adap: the adapter
3875 *
3876 * Initialize various fields of the adapter's TP Parameters structure.
3877 */
3878int t4_init_tp_params(struct adapter *adap)
3879{
3880 int chan;
3881 u32 v;
3882
3883 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3884 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3885 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3886
3887 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3888 for (chan = 0; chan < NCHAN; chan++)
3889 adap->params.tp.tx_modq[chan] = chan;
3890
3891 /* Cache the adapter's Compressed Filter Mode and global Incress
3892 * Configuration.
3893 */
3894 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3895 &adap->params.tp.vlan_pri_map, 1,
3896 TP_VLAN_PRI_MAP);
3897 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3898 &adap->params.tp.ingress_config, 1,
3899 TP_INGRESS_CONFIG);
3900
3901 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3902 * shift positions of several elements of the Compressed Filter Tuple
3903 * for this adapter which we need frequently ...
3904 */
3905 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3906 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3907 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3908 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3909 F_PROTOCOL);
3910
3911 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3912 * represents the presense of an Outer VLAN instead of a VNIC ID.
3913 */
3914 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3915 adap->params.tp.vnic_shift = -1;
3916
3917 return 0;
3918}
3919
3920/**
3921 * t4_filter_field_shift - calculate filter field shift
3922 * @adap: the adapter
3923 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3924 *
3925 * Return the shift position of a filter field within the Compressed
3926 * Filter Tuple. The filter field is specified via its selection bit
3927 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3928 */
3929int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3930{
3931 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3932 unsigned int sel;
3933 int field_shift;
3934
3935 if ((filter_mode & filter_sel) == 0)
3936 return -1;
3937
3938 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3939 switch (filter_mode & sel) {
3940 case F_FCOE:
3941 field_shift += W_FT_FCOE;
3942 break;
3943 case F_PORT:
3944 field_shift += W_FT_PORT;
3945 break;
3946 case F_VNIC_ID:
3947 field_shift += W_FT_VNIC_ID;
3948 break;
3949 case F_VLAN:
3950 field_shift += W_FT_VLAN;
3951 break;
3952 case F_TOS:
3953 field_shift += W_FT_TOS;
3954 break;
3955 case F_PROTOCOL:
3956 field_shift += W_FT_PROTOCOL;
3957 break;
3958 case F_ETHERTYPE:
3959 field_shift += W_FT_ETHERTYPE;
3960 break;
3961 case F_MACMATCH:
3962 field_shift += W_FT_MACMATCH;
3963 break;
3964 case F_MPSHITTYPE:
3965 field_shift += W_FT_MPSHITTYPE;
3966 break;
3967 case F_FRAGMENTATION:
3968 field_shift += W_FT_FRAGMENTATION;
3969 break;
3970 }
3971 }
3972 return field_shift;
3973}
3974
Bill Pemberton91744942012-12-03 09:23:02 -05003975int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003976{
3977 u8 addr[6];
3978 int ret, i, j = 0;
3979 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003980 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003981
3982 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003983 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003984
3985 for_each_port(adap, i) {
3986 unsigned int rss_size;
3987 struct port_info *p = adap2pinfo(adap, i);
3988
3989 while ((adap->params.portvec & (1 << j)) == 0)
3990 j++;
3991
3992 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3993 FW_CMD_REQUEST | FW_CMD_READ |
3994 FW_PORT_CMD_PORTID(j));
3995 c.action_to_len16 = htonl(
3996 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3997 FW_LEN16(c));
3998 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3999 if (ret)
4000 return ret;
4001
4002 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4003 if (ret < 0)
4004 return ret;
4005
4006 p->viid = ret;
4007 p->tx_chan = j;
4008 p->lport = j;
4009 p->rss_size = rss_size;
4010 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
Thadeu Lima de Souza Cascardo40c9f8a2014-06-21 09:48:08 -03004011 adap->port[i]->dev_port = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004012
4013 ret = ntohl(c.u.info.lstatus_to_modtype);
4014 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4015 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4016 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00004017 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004018
Dimitris Michailidisf7965642010-07-11 12:01:18 +00004019 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4020 FW_CMD_REQUEST | FW_CMD_READ |
4021 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4022 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4023 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4024 if (ret)
4025 return ret;
4026 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4027
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00004028 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4029 j++;
4030 }
4031 return 0;
4032}