blob: 137a24438d9c76532d691158913a9066d38146f0 [file] [log] [blame]
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
Roland Dreierde498c82010-04-21 08:59:17 +000056static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +000058{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
Roland Dreierde498c82010-04-21 08:59:17 +0000112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
Vipul Pandya13ee15d2012-09-26 02:39:40 +0000123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
Joe Perches005b5712010-12-14 21:36:53 +0000208 static const int delay[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +0000222 /*
223 * If the device is off-line, as in EEH, commands will time out.
224 * Fail them early so we don't waste time waiting.
225 */
226 if (adap->pdev->error_state != pci_channel_io_normal)
227 return -EIO;
228
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232
233 if (v != MBOX_OWNER_DRV)
234 return v ? -EBUSY : -ETIMEDOUT;
235
236 for (i = 0; i < size; i += 8)
237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
238
239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
240 t4_read_reg(adap, ctl_reg); /* flush write */
241
242 delay_idx = 0;
243 ms = delay[0];
244
245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
246 if (sleep_ok) {
247 ms = delay[delay_idx]; /* last element may repeat */
248 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 delay_idx++;
250 msleep(ms);
251 } else
252 mdelay(ms);
253
254 v = t4_read_reg(adap, ctl_reg);
255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
256 if (!(v & MBMSGVALID)) {
257 t4_write_reg(adap, ctl_reg, 0);
258 continue;
259 }
260
261 res = t4_read_reg64(adap, data_reg);
262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
263 fw_asrt(adap, data_reg);
264 res = FW_CMD_RETVAL(EIO);
265 } else if (rpl)
266 get_mbox_rpl(adap, rpl, size / 8, data_reg);
267
268 if (FW_CMD_RETVAL_GET((int)res))
269 dump_mbox(adap, mbox, data_reg);
270 t4_write_reg(adap, ctl_reg, 0);
271 return -FW_CMD_RETVAL_GET((int)res);
272 }
273 }
274
275 dump_mbox(adap, mbox, data_reg);
276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
277 *(const u8 *)cmd, mbox);
278 return -ETIMEDOUT;
279}
280
281/**
282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter
284 * @addr: address of first byte requested
285 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word
287 *
288 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
289 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data.
291 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
293{
294 int i;
295
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
297 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
304 if (i)
305 return i;
306
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
308
309 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
311 if (ecc)
312 *ecc = t4_read_reg64(adap, MC_DATA(16));
313#undef MC_DATA
314 return 0;
315}
316
317/**
318 * t4_edc_read - read from EDC through backdoor accesses
319 * @adap: the adapter
320 * @idx: which EDC to access
321 * @addr: address of first byte requested
322 * @data: 64 bytes of data containing the requested address
323 * @ecc: where to store the corresponding 64-bit ECC word
324 *
325 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
326 * that covers the requested address @addr. If @parity is not %NULL it
327 * is assigned the 64-bit ECC word for the read data.
328 */
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{
331 int i;
332
333 idx *= EDC_STRIDE;
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
342 if (i)
343 return i;
344
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
346
347 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
349 if (ecc)
350 *ecc = t4_read_reg64(adap, EDC_DATA(16));
351#undef EDC_DATA
352 return 0;
353}
354
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
384 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
386 }
387
388 return 0;
389}
390
391/**
392 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
393 * @adap: the adapter
394 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
395 * @addr: address within indicated memory type
396 * @len: amount of memory to transfer
397 * @buf: host memory buffer
398 * @dir: direction of transfer 1 => read, 0 => write
399 *
400 * Reads/writes an [almost] arbitrary memory region in the firmware: the
401 * firmware memory address, length and host buffer must be aligned on
402 * 32-bit boudaries. The memory is transferred as a raw byte sequence
403 * from/to the firmware's memory. If this memory contains data
404 * structures which contain multi-byte integers, it's the callers
405 * responsibility to perform appropriate byte order conversions.
406 */
407static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
408 __be32 *buf, int dir)
409{
410 u32 pos, start, end, offset, memoffset;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000411 int ret = 0;
412 __be32 *data;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000413
414 /*
415 * Argument sanity checks ...
416 */
417 if ((addr & 0x3) || (len & 0x3))
418 return -EINVAL;
419
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000420 data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32));
421 if (!data)
422 return -ENOMEM;
423
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000424 /*
425 * Offset into the region of memory which is being accessed
426 * MEM_EDC0 = 0
427 * MEM_EDC1 = 1
428 * MEM_MC = 2
429 */
430 memoffset = (mtype * (5 * 1024 * 1024));
431
432 /* Determine the PCIE_MEM_ACCESS_OFFSET */
433 addr = addr + memoffset;
434
435 /*
436 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
437 * at a time so we need to round down the start and round up the end.
438 * We'll start copying out of the first line at (addr - start) a word
439 * at a time.
440 */
441 start = addr & ~(MEMWIN0_APERTURE-1);
442 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
443 offset = (addr - start)/sizeof(__be32);
444
445 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000446
447 /*
448 * If we're writing, copy the data from the caller's memory
449 * buffer
450 */
451 if (!dir) {
452 /*
453 * If we're doing a partial write, then we need to do
454 * a read-modify-write ...
455 */
456 if (offset || len < MEMWIN0_APERTURE) {
457 ret = t4_mem_win_rw(adap, pos, data, 1);
458 if (ret)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000459 break;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000460 }
461 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
462 len > 0) {
463 data[offset++] = *buf++;
464 len -= sizeof(__be32);
465 }
466 }
467
468 /*
469 * Transfer a block of memory and bail if there's an error.
470 */
471 ret = t4_mem_win_rw(adap, pos, data, dir);
472 if (ret)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000473 break;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000474
475 /*
476 * If we're reading, copy the data into the caller's memory
477 * buffer.
478 */
479 if (dir)
480 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
481 len > 0) {
482 *buf++ = data[offset++];
483 len -= sizeof(__be32);
484 }
485 }
486
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000487 vfree(data);
488 return ret;
Vipul Pandya5afc8b82012-09-26 02:39:37 +0000489}
490
491int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
492 __be32 *buf)
493{
494 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
495}
496
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000497#define EEPROM_STAT_ADDR 0x7bfc
498#define VPD_BASE 0
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000499#define VPD_LEN 512
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000500
501/**
502 * t4_seeprom_wp - enable/disable EEPROM write protection
503 * @adapter: the adapter
504 * @enable: whether to enable or disable write protection
505 *
506 * Enables or disables write protection on the serial EEPROM.
507 */
508int t4_seeprom_wp(struct adapter *adapter, bool enable)
509{
510 unsigned int v = enable ? 0xc : 0;
511 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
512 return ret < 0 ? ret : 0;
513}
514
515/**
516 * get_vpd_params - read VPD parameters from VPD EEPROM
517 * @adapter: adapter to read
518 * @p: where to store the parameters
519 *
520 * Reads card parameters stored in VPD EEPROM.
521 */
Vipul Pandya636f9d32012-09-26 02:39:39 +0000522int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000523{
Vipul Pandya636f9d32012-09-26 02:39:39 +0000524 u32 cclk_param, cclk_val;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000525 int i, ret;
Dimitris Michailidisec164002010-12-14 21:36:45 +0000526 int ec, sn;
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000527 u8 *vpd, csum;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000528 unsigned int vpdr_len, kw_offset, id_len;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000529
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000530 vpd = vmalloc(VPD_LEN);
531 if (!vpd)
532 return -ENOMEM;
533
534 ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000535 if (ret < 0)
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000536 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000537
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000538 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
539 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000540 ret = -EINVAL;
541 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000542 }
543
544 id_len = pci_vpd_lrdt_size(vpd);
545 if (id_len > ID_LEN)
546 id_len = ID_LEN;
547
548 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
549 if (i < 0) {
550 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000551 ret = -EINVAL;
552 goto out;
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000553 }
554
555 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
556 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
557 if (vpdr_len + kw_offset > VPD_LEN) {
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000558 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000559 ret = -EINVAL;
560 goto out;
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000561 }
562
563#define FIND_VPD_KW(var, name) do { \
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000564 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000565 if (var < 0) { \
566 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000567 ret = -EINVAL; \
568 goto out; \
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000569 } \
570 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
571} while (0)
572
573 FIND_VPD_KW(i, "RV");
574 for (csum = 0; i >= 0; i--)
575 csum += vpd[i];
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000576
577 if (csum) {
578 dev_err(adapter->pdev_dev,
579 "corrupted VPD EEPROM, actual csum %u\n", csum);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000580 ret = -EINVAL;
581 goto out;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000582 }
583
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000584 FIND_VPD_KW(ec, "EC");
585 FIND_VPD_KW(sn, "SN");
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000586#undef FIND_VPD_KW
587
Dimitris Michailidis23d88e12010-12-14 21:36:54 +0000588 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000589 strim(p->id);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000590 memcpy(p->ec, vpd + ec, EC_LEN);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000591 strim(p->ec);
Dimitris Michailidis226ec5f2010-04-27 12:24:15 +0000592 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
593 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000594 strim(p->sn);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000595
596 /*
597 * Ask firmware for the Core Clock since it knows how to translate the
598 * Reference Clock ('V2') VPD field into a Core Clock value ...
599 */
600 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
601 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
602 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
603 1, &cclk_param, &cclk_val);
Vipul Pandya8c357eb2012-10-03 03:22:32 +0000604
605out:
606 vfree(vpd);
Vipul Pandya636f9d32012-09-26 02:39:39 +0000607 if (ret)
608 return ret;
609 p->cclk = cclk_val;
610
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000611 return 0;
612}
613
614/* serial flash and firmware constants */
615enum {
616 SF_ATTEMPTS = 10, /* max retries for SF operations */
617
618 /* flash command opcodes */
619 SF_PROG_PAGE = 2, /* program page */
620 SF_WR_DISABLE = 4, /* disable writes */
621 SF_RD_STATUS = 5, /* read status register */
622 SF_WR_ENABLE = 6, /* enable writes */
623 SF_RD_DATA_FAST = 0xb, /* read flash */
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000624 SF_RD_ID = 0x9f, /* read ID */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000625 SF_ERASE_SECTOR = 0xd8, /* erase sector */
626
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000627 FW_MAX_SIZE = 512 * 1024,
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000628};
629
630/**
631 * sf1_read - read data from the serial flash
632 * @adapter: the adapter
633 * @byte_cnt: number of bytes to read
634 * @cont: whether another operation will be chained
635 * @lock: whether to lock SF for PL access only
636 * @valp: where to store the read data
637 *
638 * Reads up to 4 bytes of data from the serial flash. The location of
639 * the read needs to be specified prior to calling this by issuing the
640 * appropriate commands to the serial flash.
641 */
642static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
643 int lock, u32 *valp)
644{
645 int ret;
646
647 if (!byte_cnt || byte_cnt > 4)
648 return -EINVAL;
649 if (t4_read_reg(adapter, SF_OP) & BUSY)
650 return -EBUSY;
651 cont = cont ? SF_CONT : 0;
652 lock = lock ? SF_LOCK : 0;
653 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
654 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
655 if (!ret)
656 *valp = t4_read_reg(adapter, SF_DATA);
657 return ret;
658}
659
660/**
661 * sf1_write - write data to the serial flash
662 * @adapter: the adapter
663 * @byte_cnt: number of bytes to write
664 * @cont: whether another operation will be chained
665 * @lock: whether to lock SF for PL access only
666 * @val: value to write
667 *
668 * Writes up to 4 bytes of data to the serial flash. The location of
669 * the write needs to be specified prior to calling this by issuing the
670 * appropriate commands to the serial flash.
671 */
672static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
673 int lock, u32 val)
674{
675 if (!byte_cnt || byte_cnt > 4)
676 return -EINVAL;
677 if (t4_read_reg(adapter, SF_OP) & BUSY)
678 return -EBUSY;
679 cont = cont ? SF_CONT : 0;
680 lock = lock ? SF_LOCK : 0;
681 t4_write_reg(adapter, SF_DATA, val);
682 t4_write_reg(adapter, SF_OP, lock |
683 cont | BYTECNT(byte_cnt - 1) | OP_WR);
684 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
685}
686
687/**
688 * flash_wait_op - wait for a flash operation to complete
689 * @adapter: the adapter
690 * @attempts: max number of polls of the status register
691 * @delay: delay between polls in ms
692 *
693 * Wait for a flash operation to complete by polling the status register.
694 */
695static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
696{
697 int ret;
698 u32 status;
699
700 while (1) {
701 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
702 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
703 return ret;
704 if (!(status & 1))
705 return 0;
706 if (--attempts == 0)
707 return -EAGAIN;
708 if (delay)
709 msleep(delay);
710 }
711}
712
713/**
714 * t4_read_flash - read words from serial flash
715 * @adapter: the adapter
716 * @addr: the start address for the read
717 * @nwords: how many 32-bit words to read
718 * @data: where to store the read data
719 * @byte_oriented: whether to store data as bytes or as words
720 *
721 * Read the specified number of 32-bit words from the serial flash.
722 * If @byte_oriented is set the read data is stored as a byte array
723 * (i.e., big-endian), otherwise as 32-bit words in the platform's
724 * natural endianess.
725 */
Roland Dreierde498c82010-04-21 08:59:17 +0000726static int t4_read_flash(struct adapter *adapter, unsigned int addr,
727 unsigned int nwords, u32 *data, int byte_oriented)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000728{
729 int ret;
730
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000731 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000732 return -EINVAL;
733
734 addr = swab32(addr) | SF_RD_DATA_FAST;
735
736 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
737 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
738 return ret;
739
740 for ( ; nwords; nwords--, data++) {
741 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
742 if (nwords == 1)
743 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
744 if (ret)
745 return ret;
746 if (byte_oriented)
747 *data = htonl(*data);
748 }
749 return 0;
750}
751
752/**
753 * t4_write_flash - write up to a page of data to the serial flash
754 * @adapter: the adapter
755 * @addr: the start address to write
756 * @n: length of data to write in bytes
757 * @data: the data to write
758 *
759 * Writes up to a page of data (256 bytes) to the serial flash starting
760 * at the given address. All the data must be written to the same page.
761 */
762static int t4_write_flash(struct adapter *adapter, unsigned int addr,
763 unsigned int n, const u8 *data)
764{
765 int ret;
766 u32 buf[64];
767 unsigned int i, c, left, val, offset = addr & 0xff;
768
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000769 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000770 return -EINVAL;
771
772 val = swab32(addr) | SF_PROG_PAGE;
773
774 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
775 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
776 goto unlock;
777
778 for (left = n; left; left -= c) {
779 c = min(left, 4U);
780 for (val = 0, i = 0; i < c; ++i)
781 val = (val << 8) + *data++;
782
783 ret = sf1_write(adapter, c, c != left, 1, val);
784 if (ret)
785 goto unlock;
786 }
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000787 ret = flash_wait_op(adapter, 8, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000788 if (ret)
789 goto unlock;
790
791 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
792
793 /* Read the page to verify the write succeeded */
794 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
795 if (ret)
796 return ret;
797
798 if (memcmp(data - n, (u8 *)buf + offset, n)) {
799 dev_err(adapter->pdev_dev,
800 "failed to correctly write the flash page at %#x\n",
801 addr);
802 return -EIO;
803 }
804 return 0;
805
806unlock:
807 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
808 return ret;
809}
810
811/**
812 * get_fw_version - read the firmware version
813 * @adapter: the adapter
814 * @vers: where to place the version
815 *
816 * Reads the FW version from flash.
817 */
818static int get_fw_version(struct adapter *adapter, u32 *vers)
819{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000820 return t4_read_flash(adapter, adapter->params.sf_fw_start +
821 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000822}
823
824/**
825 * get_tp_version - read the TP microcode version
826 * @adapter: the adapter
827 * @vers: where to place the version
828 *
829 * Reads the TP microcode version from flash.
830 */
831static int get_tp_version(struct adapter *adapter, u32 *vers)
832{
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000833 return t4_read_flash(adapter, adapter->params.sf_fw_start +
834 offsetof(struct fw_hdr, tp_microcode_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000835 1, vers, 0);
836}
837
838/**
839 * t4_check_fw_version - check if the FW is compatible with this driver
840 * @adapter: the adapter
841 *
842 * Checks if an adapter's FW is compatible with the driver. Returns 0
843 * if there's exact match, a negative error if the version could not be
844 * read or there's a major version mismatch, and a positive value if the
845 * expected major version is found but there's a minor version mismatch.
846 */
847int t4_check_fw_version(struct adapter *adapter)
848{
849 u32 api_vers[2];
850 int ret, major, minor, micro;
851
852 ret = get_fw_version(adapter, &adapter->params.fw_vers);
853 if (!ret)
854 ret = get_tp_version(adapter, &adapter->params.tp_vers);
855 if (!ret)
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000856 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
857 offsetof(struct fw_hdr, intfver_nic),
858 2, api_vers, 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000859 if (ret)
860 return ret;
861
862 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
863 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
864 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
865 memcpy(adapter->params.api_vers, api_vers,
866 sizeof(adapter->params.api_vers));
867
868 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
869 dev_err(adapter->pdev_dev,
870 "card FW has major version %u, driver wants %u\n",
871 major, FW_VERSION_MAJOR);
872 return -EINVAL;
873 }
874
875 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
876 return 0; /* perfect match */
877
878 /* Minor/micro version mismatch. Report it but often it's OK. */
879 return 1;
880}
881
882/**
883 * t4_flash_erase_sectors - erase a range of flash sectors
884 * @adapter: the adapter
885 * @start: the first sector to erase
886 * @end: the last sector to erase
887 *
888 * Erases the sectors in the given inclusive range.
889 */
890static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
891{
892 int ret = 0;
893
894 while (start <= end) {
895 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
896 (ret = sf1_write(adapter, 4, 0, 1,
897 SF_ERASE_SECTOR | (start << 8))) != 0 ||
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000898 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000899 dev_err(adapter->pdev_dev,
900 "erase of flash sector %d failed, error %d\n",
901 start, ret);
902 break;
903 }
904 start++;
905 }
906 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
907 return ret;
908}
909
910/**
Vipul Pandya636f9d32012-09-26 02:39:39 +0000911 * t4_flash_cfg_addr - return the address of the flash configuration file
912 * @adapter: the adapter
913 *
914 * Return the address within the flash where the Firmware Configuration
915 * File is stored.
916 */
917unsigned int t4_flash_cfg_addr(struct adapter *adapter)
918{
919 if (adapter->params.sf_size == 0x100000)
920 return FLASH_FPGA_CFG_START;
921 else
922 return FLASH_CFG_START;
923}
924
925/**
926 * t4_load_cfg - download config file
927 * @adap: the adapter
928 * @cfg_data: the cfg text file to write
929 * @size: text file size
930 *
931 * Write the supplied config text file to the card's serial flash.
932 */
933int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
934{
935 int ret, i, n;
936 unsigned int addr;
937 unsigned int flash_cfg_start_sec;
938 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
939
940 addr = t4_flash_cfg_addr(adap);
941 flash_cfg_start_sec = addr / SF_SEC_SIZE;
942
943 if (size > FLASH_CFG_MAX_SIZE) {
944 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
945 FLASH_CFG_MAX_SIZE);
946 return -EFBIG;
947 }
948
949 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
950 sf_sec_size);
951 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
952 flash_cfg_start_sec + i - 1);
953 /*
954 * If size == 0 then we're simply erasing the FLASH sectors associated
955 * with the on-adapter Firmware Configuration File.
956 */
957 if (ret || size == 0)
958 goto out;
959
960 /* this will write to the flash up to SF_PAGE_SIZE at a time */
961 for (i = 0; i < size; i += SF_PAGE_SIZE) {
962 if ((size - i) < SF_PAGE_SIZE)
963 n = size - i;
964 else
965 n = SF_PAGE_SIZE;
966 ret = t4_write_flash(adap, addr, n, cfg_data);
967 if (ret)
968 goto out;
969
970 addr += SF_PAGE_SIZE;
971 cfg_data += SF_PAGE_SIZE;
972 }
973
974out:
975 if (ret)
976 dev_err(adap->pdev_dev, "config file %s failed %d\n",
977 (size == 0 ? "clear" : "download"), ret);
978 return ret;
979}
980
981/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +0000982 * t4_load_fw - download firmware
983 * @adap: the adapter
984 * @fw_data: the firmware image to write
985 * @size: image size
986 *
987 * Write the supplied firmware image to the card's serial flash.
988 */
989int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
990{
991 u32 csum;
992 int ret, addr;
993 unsigned int i;
994 u8 first_page[SF_PAGE_SIZE];
995 const u32 *p = (const u32 *)fw_data;
996 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
Dimitris Michailidis900a6592010-06-18 10:05:27 +0000997 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
998 unsigned int fw_img_start = adap->params.sf_fw_start;
999 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001000
1001 if (!size) {
1002 dev_err(adap->pdev_dev, "FW image has no data\n");
1003 return -EINVAL;
1004 }
1005 if (size & 511) {
1006 dev_err(adap->pdev_dev,
1007 "FW image size not multiple of 512 bytes\n");
1008 return -EINVAL;
1009 }
1010 if (ntohs(hdr->len512) * 512 != size) {
1011 dev_err(adap->pdev_dev,
1012 "FW image size differs from size in FW header\n");
1013 return -EINVAL;
1014 }
1015 if (size > FW_MAX_SIZE) {
1016 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1017 FW_MAX_SIZE);
1018 return -EFBIG;
1019 }
1020
1021 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1022 csum += ntohl(p[i]);
1023
1024 if (csum != 0xffffffff) {
1025 dev_err(adap->pdev_dev,
1026 "corrupted firmware image, checksum %#x\n", csum);
1027 return -EINVAL;
1028 }
1029
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001030 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1031 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001032 if (ret)
1033 goto out;
1034
1035 /*
1036 * We write the correct version at the end so the driver can see a bad
1037 * version if the FW write fails. Start by writing a copy of the
1038 * first page with a bad version.
1039 */
1040 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1041 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001042 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001043 if (ret)
1044 goto out;
1045
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001046 addr = fw_img_start;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001047 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1048 addr += SF_PAGE_SIZE;
1049 fw_data += SF_PAGE_SIZE;
1050 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1051 if (ret)
1052 goto out;
1053 }
1054
1055 ret = t4_write_flash(adap,
Dimitris Michailidis900a6592010-06-18 10:05:27 +00001056 fw_img_start + offsetof(struct fw_hdr, fw_ver),
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001057 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1058out:
1059 if (ret)
1060 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1061 ret);
1062 return ret;
1063}
1064
1065#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1066 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1067
1068/**
1069 * t4_link_start - apply link configuration to MAC/PHY
1070 * @phy: the PHY to setup
1071 * @mac: the MAC to setup
1072 * @lc: the requested link configuration
1073 *
1074 * Set up a port's MAC and PHY according to a desired link configuration.
1075 * - If the PHY can auto-negotiate first decide what to advertise, then
1076 * enable/disable auto-negotiation as desired, and reset.
1077 * - If the PHY does not auto-negotiate just reset it.
1078 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1079 * otherwise do it later based on the outcome of auto-negotiation.
1080 */
1081int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1082 struct link_config *lc)
1083{
1084 struct fw_port_cmd c;
1085 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1086
1087 lc->link_ok = 0;
1088 if (lc->requested_fc & PAUSE_RX)
1089 fc |= FW_PORT_CAP_FC_RX;
1090 if (lc->requested_fc & PAUSE_TX)
1091 fc |= FW_PORT_CAP_FC_TX;
1092
1093 memset(&c, 0, sizeof(c));
1094 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1095 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1096 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1097 FW_LEN16(c));
1098
1099 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1100 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1101 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1102 } else if (lc->autoneg == AUTONEG_DISABLE) {
1103 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1104 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1105 } else
1106 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1107
1108 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1109}
1110
1111/**
1112 * t4_restart_aneg - restart autonegotiation
1113 * @adap: the adapter
1114 * @mbox: mbox to use for the FW command
1115 * @port: the port id
1116 *
1117 * Restarts autonegotiation for the selected port.
1118 */
1119int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1120{
1121 struct fw_port_cmd c;
1122
1123 memset(&c, 0, sizeof(c));
1124 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1125 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1126 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1127 FW_LEN16(c));
1128 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1129 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1130}
1131
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301132typedef void (*int_handler_t)(struct adapter *adap);
1133
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001134struct intr_info {
1135 unsigned int mask; /* bits to check in interrupt status */
1136 const char *msg; /* message to print or NULL */
1137 short stat_idx; /* stat counter to increment or -1 */
1138 unsigned short fatal; /* whether the condition reported is fatal */
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301139 int_handler_t int_handler; /* platform-specific int handler */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001140};
1141
1142/**
1143 * t4_handle_intr_status - table driven interrupt handler
1144 * @adapter: the adapter that generated the interrupt
1145 * @reg: the interrupt status register to process
1146 * @acts: table of interrupt actions
1147 *
1148 * A table driven interrupt handler that applies a set of masks to an
1149 * interrupt status word and performs the corresponding actions if the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001150 * interrupts described by the mask have occurred. The actions include
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001151 * optionally emitting a warning or alert message. The table is terminated
1152 * by an entry specifying mask 0. Returns the number of fatal interrupt
1153 * conditions.
1154 */
1155static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1156 const struct intr_info *acts)
1157{
1158 int fatal = 0;
1159 unsigned int mask = 0;
1160 unsigned int status = t4_read_reg(adapter, reg);
1161
1162 for ( ; acts->mask; ++acts) {
1163 if (!(status & acts->mask))
1164 continue;
1165 if (acts->fatal) {
1166 fatal++;
1167 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1168 status & acts->mask);
1169 } else if (acts->msg && printk_ratelimit())
1170 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1171 status & acts->mask);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301172 if (acts->int_handler)
1173 acts->int_handler(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001174 mask |= acts->mask;
1175 }
1176 status &= mask;
1177 if (status) /* clear processed interrupts */
1178 t4_write_reg(adapter, reg, status);
1179 return fatal;
1180}
1181
1182/*
1183 * Interrupt handler for the PCIE module.
1184 */
1185static void pcie_intr_handler(struct adapter *adapter)
1186{
Joe Perches005b5712010-12-14 21:36:53 +00001187 static const struct intr_info sysbus_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001188 { RNPP, "RXNP array parity error", -1, 1 },
1189 { RPCP, "RXPC array parity error", -1, 1 },
1190 { RCIP, "RXCIF array parity error", -1, 1 },
1191 { RCCP, "Rx completions control array parity error", -1, 1 },
1192 { RFTP, "RXFT array parity error", -1, 1 },
1193 { 0 }
1194 };
Joe Perches005b5712010-12-14 21:36:53 +00001195 static const struct intr_info pcie_port_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001196 { TPCP, "TXPC array parity error", -1, 1 },
1197 { TNPP, "TXNP array parity error", -1, 1 },
1198 { TFTP, "TXFT array parity error", -1, 1 },
1199 { TCAP, "TXCA array parity error", -1, 1 },
1200 { TCIP, "TXCIF array parity error", -1, 1 },
1201 { RCAP, "RXCA array parity error", -1, 1 },
1202 { OTDD, "outbound request TLP discarded", -1, 1 },
1203 { RDPE, "Rx data parity error", -1, 1 },
1204 { TDUE, "Tx uncorrectable data error", -1, 1 },
1205 { 0 }
1206 };
Joe Perches005b5712010-12-14 21:36:53 +00001207 static const struct intr_info pcie_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001208 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1209 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1210 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1211 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1212 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1213 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1214 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1215 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1216 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1217 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1218 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1219 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1220 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1221 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1222 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1223 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1224 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1225 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1226 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1227 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1228 { FIDPERR, "PCI FID parity error", -1, 1 },
1229 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1230 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1231 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1232 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1233 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1234 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1235 { PCIESINT, "PCI core secondary fault", -1, 1 },
1236 { PCIEPINT, "PCI core primary fault", -1, 1 },
1237 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1238 { 0 }
1239 };
1240
1241 int fat;
1242
1243 fat = t4_handle_intr_status(adapter,
1244 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1245 sysbus_intr_info) +
1246 t4_handle_intr_status(adapter,
1247 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1248 pcie_port_intr_info) +
1249 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1250 if (fat)
1251 t4_fatal_err(adapter);
1252}
1253
1254/*
1255 * TP interrupt handler.
1256 */
1257static void tp_intr_handler(struct adapter *adapter)
1258{
Joe Perches005b5712010-12-14 21:36:53 +00001259 static const struct intr_info tp_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001260 { 0x3fffffff, "TP parity error", -1, 1 },
1261 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1262 { 0 }
1263 };
1264
1265 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1266 t4_fatal_err(adapter);
1267}
1268
1269/*
1270 * SGE interrupt handler.
1271 */
1272static void sge_intr_handler(struct adapter *adapter)
1273{
1274 u64 v;
1275
Joe Perches005b5712010-12-14 21:36:53 +00001276 static const struct intr_info sge_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001277 { ERR_CPL_EXCEED_IQE_SIZE,
1278 "SGE received CPL exceeding IQE size", -1, 1 },
1279 { ERR_INVALID_CIDX_INC,
1280 "SGE GTS CIDX increment too large", -1, 0 },
1281 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
Vipul Pandya840f3002012-09-05 02:01:55 +00001282 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1283 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1284 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001285 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1286 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1287 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1288 0 },
1289 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1290 0 },
1291 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1292 0 },
1293 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1294 0 },
1295 { ERR_ING_CTXT_PRIO,
1296 "SGE too many priority ingress contexts", -1, 0 },
1297 { ERR_EGR_CTXT_PRIO,
1298 "SGE too many priority egress contexts", -1, 0 },
1299 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1300 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1301 { 0 }
1302 };
1303
1304 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301305 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001306 if (v) {
1307 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
Vipul Pandya8caa1e82012-05-18 15:29:25 +05301308 (unsigned long long)v);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001309 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1310 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1311 }
1312
1313 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1314 v != 0)
1315 t4_fatal_err(adapter);
1316}
1317
1318/*
1319 * CIM interrupt handler.
1320 */
1321static void cim_intr_handler(struct adapter *adapter)
1322{
Joe Perches005b5712010-12-14 21:36:53 +00001323 static const struct intr_info cim_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001324 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1325 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1326 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1327 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1328 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1329 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1330 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1331 { 0 }
1332 };
Joe Perches005b5712010-12-14 21:36:53 +00001333 static const struct intr_info cim_upintr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001334 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1335 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1336 { ILLWRINT, "CIM illegal write", -1, 1 },
1337 { ILLRDINT, "CIM illegal read", -1, 1 },
1338 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1339 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1340 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1341 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1342 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1343 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1344 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1345 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1346 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1347 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1348 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1349 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1350 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1351 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1352 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1353 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1354 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1355 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1356 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1357 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1358 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1359 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1360 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1361 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1362 { 0 }
1363 };
1364
1365 int fat;
1366
1367 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1368 cim_intr_info) +
1369 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1370 cim_upintr_info);
1371 if (fat)
1372 t4_fatal_err(adapter);
1373}
1374
1375/*
1376 * ULP RX interrupt handler.
1377 */
1378static void ulprx_intr_handler(struct adapter *adapter)
1379{
Joe Perches005b5712010-12-14 21:36:53 +00001380 static const struct intr_info ulprx_intr_info[] = {
Dimitris Michailidis91e9a1e2010-06-18 10:05:33 +00001381 { 0x1800000, "ULPRX context error", -1, 1 },
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001382 { 0x7fffff, "ULPRX parity error", -1, 1 },
1383 { 0 }
1384 };
1385
1386 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1387 t4_fatal_err(adapter);
1388}
1389
1390/*
1391 * ULP TX interrupt handler.
1392 */
1393static void ulptx_intr_handler(struct adapter *adapter)
1394{
Joe Perches005b5712010-12-14 21:36:53 +00001395 static const struct intr_info ulptx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001396 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1397 0 },
1398 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1399 0 },
1400 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1401 0 },
1402 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1403 0 },
1404 { 0xfffffff, "ULPTX parity error", -1, 1 },
1405 { 0 }
1406 };
1407
1408 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1409 t4_fatal_err(adapter);
1410}
1411
1412/*
1413 * PM TX interrupt handler.
1414 */
1415static void pmtx_intr_handler(struct adapter *adapter)
1416{
Joe Perches005b5712010-12-14 21:36:53 +00001417 static const struct intr_info pmtx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001418 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1419 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1420 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1421 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1422 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1423 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1424 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1425 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1426 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1427 { 0 }
1428 };
1429
1430 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1431 t4_fatal_err(adapter);
1432}
1433
1434/*
1435 * PM RX interrupt handler.
1436 */
1437static void pmrx_intr_handler(struct adapter *adapter)
1438{
Joe Perches005b5712010-12-14 21:36:53 +00001439 static const struct intr_info pmrx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001440 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1441 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1442 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1443 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1444 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1445 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1446 { 0 }
1447 };
1448
1449 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1450 t4_fatal_err(adapter);
1451}
1452
1453/*
1454 * CPL switch interrupt handler.
1455 */
1456static void cplsw_intr_handler(struct adapter *adapter)
1457{
Joe Perches005b5712010-12-14 21:36:53 +00001458 static const struct intr_info cplsw_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001459 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1460 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1461 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1462 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1463 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1464 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1465 { 0 }
1466 };
1467
1468 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1469 t4_fatal_err(adapter);
1470}
1471
1472/*
1473 * LE interrupt handler.
1474 */
1475static void le_intr_handler(struct adapter *adap)
1476{
Joe Perches005b5712010-12-14 21:36:53 +00001477 static const struct intr_info le_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001478 { LIPMISS, "LE LIP miss", -1, 0 },
1479 { LIP0, "LE 0 LIP error", -1, 0 },
1480 { PARITYERR, "LE parity error", -1, 1 },
1481 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1482 { REQQPARERR, "LE request queue parity error", -1, 1 },
1483 { 0 }
1484 };
1485
1486 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1487 t4_fatal_err(adap);
1488}
1489
1490/*
1491 * MPS interrupt handler.
1492 */
1493static void mps_intr_handler(struct adapter *adapter)
1494{
Joe Perches005b5712010-12-14 21:36:53 +00001495 static const struct intr_info mps_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001496 { 0xffffff, "MPS Rx parity error", -1, 1 },
1497 { 0 }
1498 };
Joe Perches005b5712010-12-14 21:36:53 +00001499 static const struct intr_info mps_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001500 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1501 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1502 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1503 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1504 { BUBBLE, "MPS Tx underflow", -1, 1 },
1505 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1506 { FRMERR, "MPS Tx framing error", -1, 1 },
1507 { 0 }
1508 };
Joe Perches005b5712010-12-14 21:36:53 +00001509 static const struct intr_info mps_trc_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001510 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1511 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1512 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1513 { 0 }
1514 };
Joe Perches005b5712010-12-14 21:36:53 +00001515 static const struct intr_info mps_stat_sram_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001516 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1517 { 0 }
1518 };
Joe Perches005b5712010-12-14 21:36:53 +00001519 static const struct intr_info mps_stat_tx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001520 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1521 { 0 }
1522 };
Joe Perches005b5712010-12-14 21:36:53 +00001523 static const struct intr_info mps_stat_rx_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001524 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1525 { 0 }
1526 };
Joe Perches005b5712010-12-14 21:36:53 +00001527 static const struct intr_info mps_cls_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001528 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1529 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1530 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1531 { 0 }
1532 };
1533
1534 int fat;
1535
1536 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1537 mps_rx_intr_info) +
1538 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1539 mps_tx_intr_info) +
1540 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1541 mps_trc_intr_info) +
1542 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1543 mps_stat_sram_intr_info) +
1544 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1545 mps_stat_tx_intr_info) +
1546 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1547 mps_stat_rx_intr_info) +
1548 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1549 mps_cls_intr_info);
1550
1551 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1552 RXINT | TXINT | STATINT);
1553 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1554 if (fat)
1555 t4_fatal_err(adapter);
1556}
1557
1558#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1559
1560/*
1561 * EDC/MC interrupt handler.
1562 */
1563static void mem_intr_handler(struct adapter *adapter, int idx)
1564{
1565 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1566
1567 unsigned int addr, cnt_addr, v;
1568
1569 if (idx <= MEM_EDC1) {
1570 addr = EDC_REG(EDC_INT_CAUSE, idx);
1571 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1572 } else {
1573 addr = MC_INT_CAUSE;
1574 cnt_addr = MC_ECC_STATUS;
1575 }
1576
1577 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1578 if (v & PERR_INT_CAUSE)
1579 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1580 name[idx]);
1581 if (v & ECC_CE_INT_CAUSE) {
1582 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1583
1584 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1585 if (printk_ratelimit())
1586 dev_warn(adapter->pdev_dev,
1587 "%u %s correctable ECC data error%s\n",
1588 cnt, name[idx], cnt > 1 ? "s" : "");
1589 }
1590 if (v & ECC_UE_INT_CAUSE)
1591 dev_alert(adapter->pdev_dev,
1592 "%s uncorrectable ECC data error\n", name[idx]);
1593
1594 t4_write_reg(adapter, addr, v);
1595 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1596 t4_fatal_err(adapter);
1597}
1598
1599/*
1600 * MA interrupt handler.
1601 */
1602static void ma_intr_handler(struct adapter *adap)
1603{
1604 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1605
1606 if (status & MEM_PERR_INT_CAUSE)
1607 dev_alert(adap->pdev_dev,
1608 "MA parity error, parity status %#x\n",
1609 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1610 if (status & MEM_WRAP_INT_CAUSE) {
1611 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1612 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1613 "client %u to address %#x\n",
1614 MEM_WRAP_CLIENT_NUM_GET(v),
1615 MEM_WRAP_ADDRESS_GET(v) << 4);
1616 }
1617 t4_write_reg(adap, MA_INT_CAUSE, status);
1618 t4_fatal_err(adap);
1619}
1620
1621/*
1622 * SMB interrupt handler.
1623 */
1624static void smb_intr_handler(struct adapter *adap)
1625{
Joe Perches005b5712010-12-14 21:36:53 +00001626 static const struct intr_info smb_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001627 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1628 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1629 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1630 { 0 }
1631 };
1632
1633 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1634 t4_fatal_err(adap);
1635}
1636
1637/*
1638 * NC-SI interrupt handler.
1639 */
1640static void ncsi_intr_handler(struct adapter *adap)
1641{
Joe Perches005b5712010-12-14 21:36:53 +00001642 static const struct intr_info ncsi_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001643 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1644 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1645 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1646 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1647 { 0 }
1648 };
1649
1650 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1651 t4_fatal_err(adap);
1652}
1653
1654/*
1655 * XGMAC interrupt handler.
1656 */
1657static void xgmac_intr_handler(struct adapter *adap, int port)
1658{
1659 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1660
1661 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1662 if (!v)
1663 return;
1664
1665 if (v & TXFIFO_PRTY_ERR)
1666 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1667 port);
1668 if (v & RXFIFO_PRTY_ERR)
1669 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1670 port);
1671 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1672 t4_fatal_err(adap);
1673}
1674
1675/*
1676 * PL interrupt handler.
1677 */
1678static void pl_intr_handler(struct adapter *adap)
1679{
Joe Perches005b5712010-12-14 21:36:53 +00001680 static const struct intr_info pl_intr_info[] = {
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001681 { FATALPERR, "T4 fatal parity error", -1, 1 },
1682 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1683 { 0 }
1684 };
1685
1686 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1687 t4_fatal_err(adap);
1688}
1689
Dimitris Michailidis63bccee2010-08-02 13:19:16 +00001690#define PF_INTR_MASK (PFSW)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001691#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1692 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1693 CPL_SWITCH | SGE | ULP_TX)
1694
1695/**
1696 * t4_slow_intr_handler - control path interrupt handler
1697 * @adapter: the adapter
1698 *
1699 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1700 * The designation 'slow' is because it involves register reads, while
1701 * data interrupts typically don't involve any MMIOs.
1702 */
1703int t4_slow_intr_handler(struct adapter *adapter)
1704{
1705 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1706
1707 if (!(cause & GLBL_INTR_MASK))
1708 return 0;
1709 if (cause & CIM)
1710 cim_intr_handler(adapter);
1711 if (cause & MPS)
1712 mps_intr_handler(adapter);
1713 if (cause & NCSI)
1714 ncsi_intr_handler(adapter);
1715 if (cause & PL)
1716 pl_intr_handler(adapter);
1717 if (cause & SMB)
1718 smb_intr_handler(adapter);
1719 if (cause & XGMAC0)
1720 xgmac_intr_handler(adapter, 0);
1721 if (cause & XGMAC1)
1722 xgmac_intr_handler(adapter, 1);
1723 if (cause & XGMAC_KR0)
1724 xgmac_intr_handler(adapter, 2);
1725 if (cause & XGMAC_KR1)
1726 xgmac_intr_handler(adapter, 3);
1727 if (cause & PCIE)
1728 pcie_intr_handler(adapter);
1729 if (cause & MC)
1730 mem_intr_handler(adapter, MEM_MC);
1731 if (cause & EDC0)
1732 mem_intr_handler(adapter, MEM_EDC0);
1733 if (cause & EDC1)
1734 mem_intr_handler(adapter, MEM_EDC1);
1735 if (cause & LE)
1736 le_intr_handler(adapter);
1737 if (cause & TP)
1738 tp_intr_handler(adapter);
1739 if (cause & MA)
1740 ma_intr_handler(adapter);
1741 if (cause & PM_TX)
1742 pmtx_intr_handler(adapter);
1743 if (cause & PM_RX)
1744 pmrx_intr_handler(adapter);
1745 if (cause & ULP_RX)
1746 ulprx_intr_handler(adapter);
1747 if (cause & CPL_SWITCH)
1748 cplsw_intr_handler(adapter);
1749 if (cause & SGE)
1750 sge_intr_handler(adapter);
1751 if (cause & ULP_TX)
1752 ulptx_intr_handler(adapter);
1753
1754 /* Clear the interrupts just processed for which we are the master. */
1755 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1756 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1757 return 1;
1758}
1759
1760/**
1761 * t4_intr_enable - enable interrupts
1762 * @adapter: the adapter whose interrupts should be enabled
1763 *
1764 * Enable PF-specific interrupts for the calling function and the top-level
1765 * interrupt concentrator for global interrupts. Interrupts are already
1766 * enabled at each module, here we just enable the roots of the interrupt
1767 * hierarchies.
1768 *
1769 * Note: this function should be called only when the driver manages
1770 * non PF-specific interrupts from the various HW modules. Only one PCI
1771 * function at a time should be doing this.
1772 */
1773void t4_intr_enable(struct adapter *adapter)
1774{
1775 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1776
1777 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1778 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1779 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1780 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1781 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1782 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1783 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
Vipul Pandya840f3002012-09-05 02:01:55 +00001784 DBFIFO_HP_INT | DBFIFO_LP_INT |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001785 EGRESS_SIZE_ERR);
1786 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1787 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1788}
1789
1790/**
1791 * t4_intr_disable - disable interrupts
1792 * @adapter: the adapter whose interrupts should be disabled
1793 *
1794 * Disable interrupts. We only disable the top-level interrupt
1795 * concentrators. The caller must be a PCI function managing global
1796 * interrupts.
1797 */
1798void t4_intr_disable(struct adapter *adapter)
1799{
1800 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1801
1802 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1803 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1804}
1805
1806/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001807 * hash_mac_addr - return the hash value of a MAC address
1808 * @addr: the 48-bit Ethernet MAC address
1809 *
1810 * Hashes a MAC address according to the hash function used by HW inexact
1811 * (hash) address matching.
1812 */
1813static int hash_mac_addr(const u8 *addr)
1814{
1815 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1816 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1817 a ^= b;
1818 a ^= (a >> 12);
1819 a ^= (a >> 6);
1820 return a & 0x3f;
1821}
1822
1823/**
1824 * t4_config_rss_range - configure a portion of the RSS mapping table
1825 * @adapter: the adapter
1826 * @mbox: mbox to use for the FW command
1827 * @viid: virtual interface whose RSS subtable is to be written
1828 * @start: start entry in the table to write
1829 * @n: how many table entries to write
1830 * @rspq: values for the response queue lookup table
1831 * @nrspq: number of values in @rspq
1832 *
1833 * Programs the selected part of the VI's RSS mapping table with the
1834 * provided values. If @nrspq < @n the supplied values are used repeatedly
1835 * until the full table range is populated.
1836 *
1837 * The caller must ensure the values in @rspq are in the range allowed for
1838 * @viid.
1839 */
1840int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1841 int start, int n, const u16 *rspq, unsigned int nrspq)
1842{
1843 int ret;
1844 const u16 *rsp = rspq;
1845 const u16 *rsp_end = rspq + nrspq;
1846 struct fw_rss_ind_tbl_cmd cmd;
1847
1848 memset(&cmd, 0, sizeof(cmd));
1849 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1850 FW_CMD_REQUEST | FW_CMD_WRITE |
1851 FW_RSS_IND_TBL_CMD_VIID(viid));
1852 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1853
1854 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1855 while (n > 0) {
1856 int nq = min(n, 32);
1857 __be32 *qp = &cmd.iq0_to_iq2;
1858
1859 cmd.niqid = htons(nq);
1860 cmd.startidx = htons(start);
1861
1862 start += nq;
1863 n -= nq;
1864
1865 while (nq > 0) {
1866 unsigned int v;
1867
1868 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1869 if (++rsp >= rsp_end)
1870 rsp = rspq;
1871 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1872 if (++rsp >= rsp_end)
1873 rsp = rspq;
1874 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1875 if (++rsp >= rsp_end)
1876 rsp = rspq;
1877
1878 *qp++ = htonl(v);
1879 nq -= 3;
1880 }
1881
1882 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1883 if (ret)
1884 return ret;
1885 }
1886 return 0;
1887}
1888
1889/**
1890 * t4_config_glbl_rss - configure the global RSS mode
1891 * @adapter: the adapter
1892 * @mbox: mbox to use for the FW command
1893 * @mode: global RSS mode
1894 * @flags: mode-specific flags
1895 *
1896 * Sets the global RSS mode.
1897 */
1898int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1899 unsigned int flags)
1900{
1901 struct fw_rss_glb_config_cmd c;
1902
1903 memset(&c, 0, sizeof(c));
1904 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1905 FW_CMD_REQUEST | FW_CMD_WRITE);
1906 c.retval_len16 = htonl(FW_LEN16(c));
1907 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1908 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1909 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1910 c.u.basicvirtual.mode_pkd =
1911 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1912 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1913 } else
1914 return -EINVAL;
1915 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1916}
1917
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001918/**
1919 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1920 * @adap: the adapter
1921 * @v4: holds the TCP/IP counter values
1922 * @v6: holds the TCP/IPv6 counter values
1923 *
1924 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1925 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1926 */
1927void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1928 struct tp_tcp_stats *v6)
1929{
1930 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1931
1932#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1933#define STAT(x) val[STAT_IDX(x)]
1934#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1935
1936 if (v4) {
1937 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1938 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1939 v4->tcpOutRsts = STAT(OUT_RST);
1940 v4->tcpInSegs = STAT64(IN_SEG);
1941 v4->tcpOutSegs = STAT64(OUT_SEG);
1942 v4->tcpRetransSegs = STAT64(RXT_SEG);
1943 }
1944 if (v6) {
1945 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1946 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1947 v6->tcpOutRsts = STAT(OUT_RST);
1948 v6->tcpInSegs = STAT64(IN_SEG);
1949 v6->tcpOutSegs = STAT64(OUT_SEG);
1950 v6->tcpRetransSegs = STAT64(RXT_SEG);
1951 }
1952#undef STAT64
1953#undef STAT
1954#undef STAT_IDX
1955}
1956
1957/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001958 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1959 * @adap: the adapter
1960 * @mtus: where to store the MTU values
1961 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1962 *
1963 * Reads the HW path MTU table.
1964 */
1965void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1966{
1967 u32 v;
1968 int i;
1969
1970 for (i = 0; i < NMTUS; ++i) {
1971 t4_write_reg(adap, TP_MTU_TABLE,
1972 MTUINDEX(0xff) | MTUVALUE(i));
1973 v = t4_read_reg(adap, TP_MTU_TABLE);
1974 mtus[i] = MTUVALUE_GET(v);
1975 if (mtu_log)
1976 mtu_log[i] = MTUWIDTH_GET(v);
1977 }
1978}
1979
1980/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00001981 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1982 * @adap: the adapter
1983 * @addr: the indirect TP register address
1984 * @mask: specifies the field within the register to modify
1985 * @val: new value for the field
1986 *
1987 * Sets a field of an indirect TP register to the given value.
1988 */
1989void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1990 unsigned int mask, unsigned int val)
1991{
1992 t4_write_reg(adap, TP_PIO_ADDR, addr);
1993 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1994 t4_write_reg(adap, TP_PIO_DATA, val);
1995}
1996
1997/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00001998 * init_cong_ctrl - initialize congestion control parameters
1999 * @a: the alpha values for congestion control
2000 * @b: the beta values for congestion control
2001 *
2002 * Initialize the congestion control parameters.
2003 */
2004static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2005{
2006 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2007 a[9] = 2;
2008 a[10] = 3;
2009 a[11] = 4;
2010 a[12] = 5;
2011 a[13] = 6;
2012 a[14] = 7;
2013 a[15] = 8;
2014 a[16] = 9;
2015 a[17] = 10;
2016 a[18] = 14;
2017 a[19] = 17;
2018 a[20] = 21;
2019 a[21] = 25;
2020 a[22] = 30;
2021 a[23] = 35;
2022 a[24] = 45;
2023 a[25] = 60;
2024 a[26] = 80;
2025 a[27] = 100;
2026 a[28] = 200;
2027 a[29] = 300;
2028 a[30] = 400;
2029 a[31] = 500;
2030
2031 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2032 b[9] = b[10] = 1;
2033 b[11] = b[12] = 2;
2034 b[13] = b[14] = b[15] = b[16] = 3;
2035 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2036 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2037 b[28] = b[29] = 6;
2038 b[30] = b[31] = 7;
2039}
2040
2041/* The minimum additive increment value for the congestion control table */
2042#define CC_MIN_INCR 2U
2043
2044/**
2045 * t4_load_mtus - write the MTU and congestion control HW tables
2046 * @adap: the adapter
2047 * @mtus: the values for the MTU table
2048 * @alpha: the values for the congestion control alpha parameter
2049 * @beta: the values for the congestion control beta parameter
2050 *
2051 * Write the HW MTU table with the supplied MTUs and the high-speed
2052 * congestion control table with the supplied alpha, beta, and MTUs.
2053 * We write the two tables together because the additive increments
2054 * depend on the MTUs.
2055 */
2056void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2057 const unsigned short *alpha, const unsigned short *beta)
2058{
2059 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2060 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2061 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2062 28672, 40960, 57344, 81920, 114688, 163840, 229376
2063 };
2064
2065 unsigned int i, w;
2066
2067 for (i = 0; i < NMTUS; ++i) {
2068 unsigned int mtu = mtus[i];
2069 unsigned int log2 = fls(mtu);
2070
2071 if (!(mtu & ((1 << log2) >> 2))) /* round */
2072 log2--;
2073 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2074 MTUWIDTH(log2) | MTUVALUE(mtu));
2075
2076 for (w = 0; w < NCCTRL_WIN; ++w) {
2077 unsigned int inc;
2078
2079 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2080 CC_MIN_INCR);
2081
2082 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2083 (w << 16) | (beta[w] << 13) | inc);
2084 }
2085 }
2086}
2087
2088/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002089 * get_mps_bg_map - return the buffer groups associated with a port
2090 * @adap: the adapter
2091 * @idx: the port index
2092 *
2093 * Returns a bitmap indicating which MPS buffer groups are associated
2094 * with the given port. Bit i is set if buffer group i is used by the
2095 * port.
2096 */
2097static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2098{
2099 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2100
2101 if (n == 0)
2102 return idx == 0 ? 0xf : 0;
2103 if (n == 1)
2104 return idx < 2 ? (3 << (2 * idx)) : 0;
2105 return 1 << idx;
2106}
2107
2108/**
2109 * t4_get_port_stats - collect port statistics
2110 * @adap: the adapter
2111 * @idx: the port index
2112 * @p: the stats structure to fill
2113 *
2114 * Collect statistics related to the given port from HW.
2115 */
2116void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2117{
2118 u32 bgmap = get_mps_bg_map(adap, idx);
2119
2120#define GET_STAT(name) \
2121 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2122#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2123
2124 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2125 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2126 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2127 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2128 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2129 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2130 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2131 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2132 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2133 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2134 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2135 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2136 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2137 p->tx_drop = GET_STAT(TX_PORT_DROP);
2138 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2139 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2140 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2141 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2142 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2143 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2144 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2145 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2146 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2147
2148 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2149 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2150 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2151 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2152 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2153 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2154 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2155 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2156 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2157 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2158 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2159 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2160 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2161 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2162 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2163 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2164 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2165 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2166 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2167 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2168 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2169 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2170 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2171 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2172 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2173 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2174 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2175
2176 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2177 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2178 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2179 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2180 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2181 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2182 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2183 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2184
2185#undef GET_STAT
2186#undef GET_STAT_COM
2187}
2188
2189/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002190 * t4_wol_magic_enable - enable/disable magic packet WoL
2191 * @adap: the adapter
2192 * @port: the physical port index
2193 * @addr: MAC address expected in magic packets, %NULL to disable
2194 *
2195 * Enables/disables magic packet wake-on-LAN for the selected port.
2196 */
2197void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2198 const u8 *addr)
2199{
2200 if (addr) {
2201 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2202 (addr[2] << 24) | (addr[3] << 16) |
2203 (addr[4] << 8) | addr[5]);
2204 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2205 (addr[0] << 8) | addr[1]);
2206 }
2207 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2208 addr ? MAGICEN : 0);
2209}
2210
2211/**
2212 * t4_wol_pat_enable - enable/disable pattern-based WoL
2213 * @adap: the adapter
2214 * @port: the physical port index
2215 * @map: bitmap of which HW pattern filters to set
2216 * @mask0: byte mask for bytes 0-63 of a packet
2217 * @mask1: byte mask for bytes 64-127 of a packet
2218 * @crc: Ethernet CRC for selected bytes
2219 * @enable: enable/disable switch
2220 *
2221 * Sets the pattern filters indicated in @map to mask out the bytes
2222 * specified in @mask0/@mask1 in received packets and compare the CRC of
2223 * the resulting packet against @crc. If @enable is %true pattern-based
2224 * WoL is enabled, otherwise disabled.
2225 */
2226int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2227 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2228{
2229 int i;
2230
2231 if (!enable) {
2232 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2233 PATEN, 0);
2234 return 0;
2235 }
2236 if (map > 0xff)
2237 return -EINVAL;
2238
2239#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2240
2241 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2242 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2243 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2244
2245 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2246 if (!(map & 1))
2247 continue;
2248
2249 /* write byte masks */
2250 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2251 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2252 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2253 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2254 return -ETIMEDOUT;
2255
2256 /* write CRC */
2257 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2258 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2259 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2260 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2261 return -ETIMEDOUT;
2262 }
2263#undef EPIO_REG
2264
2265 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2266 return 0;
2267}
2268
2269#define INIT_CMD(var, cmd, rd_wr) do { \
2270 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2271 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2272 (var).retval_len16 = htonl(FW_LEN16(var)); \
2273} while (0)
2274
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302275int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2276 u32 addr, u32 val)
2277{
2278 struct fw_ldst_cmd c;
2279
2280 memset(&c, 0, sizeof(c));
Vipul Pandya636f9d32012-09-26 02:39:39 +00002281 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2282 FW_CMD_WRITE |
2283 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302284 c.cycles_to_len16 = htonl(FW_LEN16(c));
2285 c.u.addrval.addr = htonl(addr);
2286 c.u.addrval.val = htonl(val);
2287
2288 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2289}
2290
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002291/**
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302292 * t4_mem_win_read_len - read memory through PCIE memory window
2293 * @adap: the adapter
2294 * @addr: address of first byte requested aligned on 32b.
2295 * @data: len bytes to hold the data read
2296 * @len: amount of data to read from window. Must be <=
2297 * MEMWIN0_APERATURE after adjusting for 16B alignment
2298 * requirements of the the memory window.
2299 *
2300 * Read len bytes of data from MC starting at @addr.
2301 */
2302int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2303{
2304 int i;
2305 int off;
2306
2307 /*
2308 * Align on a 16B boundary.
2309 */
2310 off = addr & 15;
2311 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2312 return -EINVAL;
2313
Vipul Pandya840f3002012-09-05 02:01:55 +00002314 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2315 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
Vipul Pandya8caa1e82012-05-18 15:29:25 +05302316
2317 for (i = 0; i < len; i += 4)
2318 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
2319
2320 return 0;
2321}
2322
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002323/**
2324 * t4_mdio_rd - read a PHY register through MDIO
2325 * @adap: the adapter
2326 * @mbox: mailbox to use for the FW command
2327 * @phy_addr: the PHY address
2328 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2329 * @reg: the register to read
2330 * @valp: where to store the value
2331 *
2332 * Issues a FW command through the given mailbox to read a PHY register.
2333 */
2334int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2335 unsigned int mmd, unsigned int reg, u16 *valp)
2336{
2337 int ret;
2338 struct fw_ldst_cmd c;
2339
2340 memset(&c, 0, sizeof(c));
2341 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2342 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2343 c.cycles_to_len16 = htonl(FW_LEN16(c));
2344 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2345 FW_LDST_CMD_MMD(mmd));
2346 c.u.mdio.raddr = htons(reg);
2347
2348 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2349 if (ret == 0)
2350 *valp = ntohs(c.u.mdio.rval);
2351 return ret;
2352}
2353
2354/**
2355 * t4_mdio_wr - write a PHY register through MDIO
2356 * @adap: the adapter
2357 * @mbox: mailbox to use for the FW command
2358 * @phy_addr: the PHY address
2359 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2360 * @reg: the register to write
2361 * @valp: value to write
2362 *
2363 * Issues a FW command through the given mailbox to write a PHY register.
2364 */
2365int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2366 unsigned int mmd, unsigned int reg, u16 val)
2367{
2368 struct fw_ldst_cmd c;
2369
2370 memset(&c, 0, sizeof(c));
2371 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2372 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2373 c.cycles_to_len16 = htonl(FW_LEN16(c));
2374 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2375 FW_LDST_CMD_MMD(mmd));
2376 c.u.mdio.raddr = htons(reg);
2377 c.u.mdio.rval = htons(val);
2378
2379 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2380}
2381
2382/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002383 * t4_fw_hello - establish communication with FW
2384 * @adap: the adapter
2385 * @mbox: mailbox to use for the FW command
2386 * @evt_mbox: mailbox to receive async FW events
2387 * @master: specifies the caller's willingness to be the device master
2388 * @state: returns the current device state (if non-NULL)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002389 *
Vipul Pandya636f9d32012-09-26 02:39:39 +00002390 * Issues a command to establish communication with FW. Returns either
2391 * an error (negative integer) or the mailbox of the Master PF.
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002392 */
2393int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2394 enum dev_master master, enum dev_state *state)
2395{
2396 int ret;
2397 struct fw_hello_cmd c;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002398 u32 v;
2399 unsigned int master_mbox;
2400 int retries = FW_CMD_HELLO_RETRIES;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002401
Vipul Pandya636f9d32012-09-26 02:39:39 +00002402retry:
2403 memset(&c, 0, sizeof(c));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002404 INIT_CMD(c, HELLO, WRITE);
2405 c.err_to_mbasyncnot = htonl(
2406 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2407 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
Vipul Pandya636f9d32012-09-26 02:39:39 +00002408 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2409 FW_HELLO_CMD_MBMASTER_MASK) |
2410 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2411 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2412 FW_HELLO_CMD_CLEARINIT);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002413
Vipul Pandya636f9d32012-09-26 02:39:39 +00002414 /*
2415 * Issue the HELLO command to the firmware. If it's not successful
2416 * but indicates that we got a "busy" or "timeout" condition, retry
2417 * the HELLO until we exhaust our retry limit.
2418 */
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002419 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
Vipul Pandya636f9d32012-09-26 02:39:39 +00002420 if (ret < 0) {
2421 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2422 goto retry;
2423 return ret;
2424 }
2425
2426 v = ntohl(c.err_to_mbasyncnot);
2427 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2428 if (state) {
2429 if (v & FW_HELLO_CMD_ERR)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002430 *state = DEV_STATE_ERR;
Vipul Pandya636f9d32012-09-26 02:39:39 +00002431 else if (v & FW_HELLO_CMD_INIT)
2432 *state = DEV_STATE_INIT;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002433 else
2434 *state = DEV_STATE_UNINIT;
2435 }
Vipul Pandya636f9d32012-09-26 02:39:39 +00002436
2437 /*
2438 * If we're not the Master PF then we need to wait around for the
2439 * Master PF Driver to finish setting up the adapter.
2440 *
2441 * Note that we also do this wait if we're a non-Master-capable PF and
2442 * there is no current Master PF; a Master PF may show up momentarily
2443 * and we wouldn't want to fail pointlessly. (This can happen when an
2444 * OS loads lots of different drivers rapidly at the same time). In
2445 * this case, the Master PF returned by the firmware will be
2446 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2447 */
2448 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2449 master_mbox != mbox) {
2450 int waiting = FW_CMD_HELLO_TIMEOUT;
2451
2452 /*
2453 * Wait for the firmware to either indicate an error or
2454 * initialized state. If we see either of these we bail out
2455 * and report the issue to the caller. If we exhaust the
2456 * "hello timeout" and we haven't exhausted our retries, try
2457 * again. Otherwise bail with a timeout error.
2458 */
2459 for (;;) {
2460 u32 pcie_fw;
2461
2462 msleep(50);
2463 waiting -= 50;
2464
2465 /*
2466 * If neither Error nor Initialialized are indicated
2467 * by the firmware keep waiting till we exaust our
2468 * timeout ... and then retry if we haven't exhausted
2469 * our retries ...
2470 */
2471 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2472 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2473 if (waiting <= 0) {
2474 if (retries-- > 0)
2475 goto retry;
2476
2477 return -ETIMEDOUT;
2478 }
2479 continue;
2480 }
2481
2482 /*
2483 * We either have an Error or Initialized condition
2484 * report errors preferentially.
2485 */
2486 if (state) {
2487 if (pcie_fw & FW_PCIE_FW_ERR)
2488 *state = DEV_STATE_ERR;
2489 else if (pcie_fw & FW_PCIE_FW_INIT)
2490 *state = DEV_STATE_INIT;
2491 }
2492
2493 /*
2494 * If we arrived before a Master PF was selected and
2495 * there's not a valid Master PF, grab its identity
2496 * for our caller.
2497 */
2498 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2499 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2500 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2501 break;
2502 }
2503 }
2504
2505 return master_mbox;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002506}
2507
2508/**
2509 * t4_fw_bye - end communication with FW
2510 * @adap: the adapter
2511 * @mbox: mailbox to use for the FW command
2512 *
2513 * Issues a command to terminate communication with FW.
2514 */
2515int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2516{
2517 struct fw_bye_cmd c;
2518
2519 INIT_CMD(c, BYE, WRITE);
2520 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2521}
2522
2523/**
2524 * t4_init_cmd - ask FW to initialize the device
2525 * @adap: the adapter
2526 * @mbox: mailbox to use for the FW command
2527 *
2528 * Issues a command to FW to partially initialize the device. This
2529 * performs initialization that generally doesn't depend on user input.
2530 */
2531int t4_early_init(struct adapter *adap, unsigned int mbox)
2532{
2533 struct fw_initialize_cmd c;
2534
2535 INIT_CMD(c, INITIALIZE, WRITE);
2536 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2537}
2538
2539/**
2540 * t4_fw_reset - issue a reset to FW
2541 * @adap: the adapter
2542 * @mbox: mailbox to use for the FW command
2543 * @reset: specifies the type of reset to perform
2544 *
2545 * Issues a reset command of the specified type to FW.
2546 */
2547int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2548{
2549 struct fw_reset_cmd c;
2550
2551 INIT_CMD(c, RESET, WRITE);
2552 c.val = htonl(reset);
2553 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2554}
2555
2556/**
Vipul Pandya26f7cbc2012-09-26 02:39:42 +00002557 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2558 * @adap: the adapter
2559 * @mbox: mailbox to use for the FW RESET command (if desired)
2560 * @force: force uP into RESET even if FW RESET command fails
2561 *
2562 * Issues a RESET command to firmware (if desired) with a HALT indication
2563 * and then puts the microprocessor into RESET state. The RESET command
2564 * will only be issued if a legitimate mailbox is provided (mbox <=
2565 * FW_PCIE_FW_MASTER_MASK).
2566 *
2567 * This is generally used in order for the host to safely manipulate the
2568 * adapter without fear of conflicting with whatever the firmware might
2569 * be doing. The only way out of this state is to RESTART the firmware
2570 * ...
2571 */
2572int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2573{
2574 int ret = 0;
2575
2576 /*
2577 * If a legitimate mailbox is provided, issue a RESET command
2578 * with a HALT indication.
2579 */
2580 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2581 struct fw_reset_cmd c;
2582
2583 memset(&c, 0, sizeof(c));
2584 INIT_CMD(c, RESET, WRITE);
2585 c.val = htonl(PIORST | PIORSTMODE);
2586 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2587 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2588 }
2589
2590 /*
2591 * Normally we won't complete the operation if the firmware RESET
2592 * command fails but if our caller insists we'll go ahead and put the
2593 * uP into RESET. This can be useful if the firmware is hung or even
2594 * missing ... We'll have to take the risk of putting the uP into
2595 * RESET without the cooperation of firmware in that case.
2596 *
2597 * We also force the firmware's HALT flag to be on in case we bypassed
2598 * the firmware RESET command above or we're dealing with old firmware
2599 * which doesn't have the HALT capability. This will serve as a flag
2600 * for the incoming firmware to know that it's coming out of a HALT
2601 * rather than a RESET ... if it's new enough to understand that ...
2602 */
2603 if (ret == 0 || force) {
2604 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2605 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2606 FW_PCIE_FW_HALT);
2607 }
2608
2609 /*
2610 * And we always return the result of the firmware RESET command
2611 * even when we force the uP into RESET ...
2612 */
2613 return ret;
2614}
2615
2616/**
2617 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2618 * @adap: the adapter
2619 * @reset: if we want to do a RESET to restart things
2620 *
2621 * Restart firmware previously halted by t4_fw_halt(). On successful
2622 * return the previous PF Master remains as the new PF Master and there
2623 * is no need to issue a new HELLO command, etc.
2624 *
2625 * We do this in two ways:
2626 *
2627 * 1. If we're dealing with newer firmware we'll simply want to take
2628 * the chip's microprocessor out of RESET. This will cause the
2629 * firmware to start up from its start vector. And then we'll loop
2630 * until the firmware indicates it's started again (PCIE_FW.HALT
2631 * reset to 0) or we timeout.
2632 *
2633 * 2. If we're dealing with older firmware then we'll need to RESET
2634 * the chip since older firmware won't recognize the PCIE_FW.HALT
2635 * flag and automatically RESET itself on startup.
2636 */
2637int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2638{
2639 if (reset) {
2640 /*
2641 * Since we're directing the RESET instead of the firmware
2642 * doing it automatically, we need to clear the PCIE_FW.HALT
2643 * bit.
2644 */
2645 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2646
2647 /*
2648 * If we've been given a valid mailbox, first try to get the
2649 * firmware to do the RESET. If that works, great and we can
2650 * return success. Otherwise, if we haven't been given a
2651 * valid mailbox or the RESET command failed, fall back to
2652 * hitting the chip with a hammer.
2653 */
2654 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2655 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2656 msleep(100);
2657 if (t4_fw_reset(adap, mbox,
2658 PIORST | PIORSTMODE) == 0)
2659 return 0;
2660 }
2661
2662 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2663 msleep(2000);
2664 } else {
2665 int ms;
2666
2667 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2668 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2669 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2670 return 0;
2671 msleep(100);
2672 ms += 100;
2673 }
2674 return -ETIMEDOUT;
2675 }
2676 return 0;
2677}
2678
2679/**
2680 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2681 * @adap: the adapter
2682 * @mbox: mailbox to use for the FW RESET command (if desired)
2683 * @fw_data: the firmware image to write
2684 * @size: image size
2685 * @force: force upgrade even if firmware doesn't cooperate
2686 *
2687 * Perform all of the steps necessary for upgrading an adapter's
2688 * firmware image. Normally this requires the cooperation of the
2689 * existing firmware in order to halt all existing activities
2690 * but if an invalid mailbox token is passed in we skip that step
2691 * (though we'll still put the adapter microprocessor into RESET in
2692 * that case).
2693 *
2694 * On successful return the new firmware will have been loaded and
2695 * the adapter will have been fully RESET losing all previous setup
2696 * state. On unsuccessful return the adapter may be completely hosed ...
2697 * positive errno indicates that the adapter is ~probably~ intact, a
2698 * negative errno indicates that things are looking bad ...
2699 */
2700int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2701 const u8 *fw_data, unsigned int size, int force)
2702{
2703 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2704 int reset, ret;
2705
2706 ret = t4_fw_halt(adap, mbox, force);
2707 if (ret < 0 && !force)
2708 return ret;
2709
2710 ret = t4_load_fw(adap, fw_data, size);
2711 if (ret < 0)
2712 return ret;
2713
2714 /*
2715 * Older versions of the firmware don't understand the new
2716 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2717 * restart. So for newly loaded older firmware we'll have to do the
2718 * RESET for it so it starts up on a clean slate. We can tell if
2719 * the newly loaded firmware will handle this right by checking
2720 * its header flags to see if it advertises the capability.
2721 */
2722 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2723 return t4_fw_restart(adap, mbox, reset);
2724}
2725
2726
2727/**
Vipul Pandya636f9d32012-09-26 02:39:39 +00002728 * t4_fw_config_file - setup an adapter via a Configuration File
2729 * @adap: the adapter
2730 * @mbox: mailbox to use for the FW command
2731 * @mtype: the memory type where the Configuration File is located
2732 * @maddr: the memory address where the Configuration File is located
2733 * @finiver: return value for CF [fini] version
2734 * @finicsum: return value for CF [fini] checksum
2735 * @cfcsum: return value for CF computed checksum
2736 *
2737 * Issue a command to get the firmware to process the Configuration
2738 * File located at the specified mtype/maddress. If the Configuration
2739 * File is processed successfully and return value pointers are
2740 * provided, the Configuration File "[fini] section version and
2741 * checksum values will be returned along with the computed checksum.
2742 * It's up to the caller to decide how it wants to respond to the
2743 * checksums not matching but it recommended that a prominant warning
2744 * be emitted in order to help people rapidly identify changed or
2745 * corrupted Configuration Files.
2746 *
2747 * Also note that it's possible to modify things like "niccaps",
2748 * "toecaps",etc. between processing the Configuration File and telling
2749 * the firmware to use the new configuration. Callers which want to
2750 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2751 * Configuration Files if they want to do this.
2752 */
2753int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2754 unsigned int mtype, unsigned int maddr,
2755 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2756{
2757 struct fw_caps_config_cmd caps_cmd;
2758 int ret;
2759
2760 /*
2761 * Tell the firmware to process the indicated Configuration File.
2762 * If there are no errors and the caller has provided return value
2763 * pointers for the [fini] section version, checksum and computed
2764 * checksum, pass those back to the caller.
2765 */
2766 memset(&caps_cmd, 0, sizeof(caps_cmd));
2767 caps_cmd.op_to_write =
2768 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2769 FW_CMD_REQUEST |
2770 FW_CMD_READ);
2771 caps_cmd.retval_len16 =
2772 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2773 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2774 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2775 FW_LEN16(caps_cmd));
2776 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2777 if (ret < 0)
2778 return ret;
2779
2780 if (finiver)
2781 *finiver = ntohl(caps_cmd.finiver);
2782 if (finicsum)
2783 *finicsum = ntohl(caps_cmd.finicsum);
2784 if (cfcsum)
2785 *cfcsum = ntohl(caps_cmd.cfcsum);
2786
2787 /*
2788 * And now tell the firmware to use the configuration we just loaded.
2789 */
2790 caps_cmd.op_to_write =
2791 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2792 FW_CMD_REQUEST |
2793 FW_CMD_WRITE);
2794 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2795 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2796}
2797
2798/**
2799 * t4_fixup_host_params - fix up host-dependent parameters
2800 * @adap: the adapter
2801 * @page_size: the host's Base Page Size
2802 * @cache_line_size: the host's Cache Line Size
2803 *
2804 * Various registers in T4 contain values which are dependent on the
2805 * host's Base Page and Cache Line Sizes. This function will fix all of
2806 * those registers with the appropriate values as passed in ...
2807 */
2808int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2809 unsigned int cache_line_size)
2810{
2811 unsigned int page_shift = fls(page_size) - 1;
2812 unsigned int sge_hps = page_shift - 10;
2813 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2814 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2815 unsigned int fl_align_log = fls(fl_align) - 1;
2816
2817 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2818 HOSTPAGESIZEPF0(sge_hps) |
2819 HOSTPAGESIZEPF1(sge_hps) |
2820 HOSTPAGESIZEPF2(sge_hps) |
2821 HOSTPAGESIZEPF3(sge_hps) |
2822 HOSTPAGESIZEPF4(sge_hps) |
2823 HOSTPAGESIZEPF5(sge_hps) |
2824 HOSTPAGESIZEPF6(sge_hps) |
2825 HOSTPAGESIZEPF7(sge_hps));
2826
2827 t4_set_reg_field(adap, SGE_CONTROL,
2828 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2829 EGRSTATUSPAGESIZE_MASK,
2830 INGPADBOUNDARY(fl_align_log - 5) |
2831 EGRSTATUSPAGESIZE(stat_len != 64));
2832
2833 /*
2834 * Adjust various SGE Free List Host Buffer Sizes.
2835 *
2836 * This is something of a crock since we're using fixed indices into
2837 * the array which are also known by the sge.c code and the T4
2838 * Firmware Configuration File. We need to come up with a much better
2839 * approach to managing this array. For now, the first four entries
2840 * are:
2841 *
2842 * 0: Host Page Size
2843 * 1: 64KB
2844 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2845 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2846 *
2847 * For the single-MTU buffers in unpacked mode we need to include
2848 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2849 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2850 * Padding boundry. All of these are accommodated in the Factory
2851 * Default Firmware Configuration File but we need to adjust it for
2852 * this host's cache line size.
2853 */
2854 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2855 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2856 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2857 & ~(fl_align-1));
2858 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2859 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2860 & ~(fl_align-1));
2861
2862 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2863
2864 return 0;
2865}
2866
2867/**
2868 * t4_fw_initialize - ask FW to initialize the device
2869 * @adap: the adapter
2870 * @mbox: mailbox to use for the FW command
2871 *
2872 * Issues a command to FW to partially initialize the device. This
2873 * performs initialization that generally doesn't depend on user input.
2874 */
2875int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2876{
2877 struct fw_initialize_cmd c;
2878
2879 memset(&c, 0, sizeof(c));
2880 INIT_CMD(c, INITIALIZE, WRITE);
2881 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2882}
2883
2884/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002885 * t4_query_params - query FW or device parameters
2886 * @adap: the adapter
2887 * @mbox: mailbox to use for the FW command
2888 * @pf: the PF
2889 * @vf: the VF
2890 * @nparams: the number of parameters
2891 * @params: the parameter names
2892 * @val: the parameter values
2893 *
2894 * Reads the value of FW or device parameters. Up to 7 parameters can be
2895 * queried at once.
2896 */
2897int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2898 unsigned int vf, unsigned int nparams, const u32 *params,
2899 u32 *val)
2900{
2901 int i, ret;
2902 struct fw_params_cmd c;
2903 __be32 *p = &c.param[0].mnem;
2904
2905 if (nparams > 7)
2906 return -EINVAL;
2907
2908 memset(&c, 0, sizeof(c));
2909 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2910 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2911 FW_PARAMS_CMD_VFN(vf));
2912 c.retval_len16 = htonl(FW_LEN16(c));
2913 for (i = 0; i < nparams; i++, p += 2)
2914 *p = htonl(*params++);
2915
2916 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2917 if (ret == 0)
2918 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2919 *val++ = ntohl(*p);
2920 return ret;
2921}
2922
2923/**
2924 * t4_set_params - sets FW or device parameters
2925 * @adap: the adapter
2926 * @mbox: mailbox to use for the FW command
2927 * @pf: the PF
2928 * @vf: the VF
2929 * @nparams: the number of parameters
2930 * @params: the parameter names
2931 * @val: the parameter values
2932 *
2933 * Sets the value of FW or device parameters. Up to 7 parameters can be
2934 * specified at once.
2935 */
2936int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2937 unsigned int vf, unsigned int nparams, const u32 *params,
2938 const u32 *val)
2939{
2940 struct fw_params_cmd c;
2941 __be32 *p = &c.param[0].mnem;
2942
2943 if (nparams > 7)
2944 return -EINVAL;
2945
2946 memset(&c, 0, sizeof(c));
2947 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2948 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2949 FW_PARAMS_CMD_VFN(vf));
2950 c.retval_len16 = htonl(FW_LEN16(c));
2951 while (nparams--) {
2952 *p++ = htonl(*params++);
2953 *p++ = htonl(*val++);
2954 }
2955
2956 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2957}
2958
2959/**
2960 * t4_cfg_pfvf - configure PF/VF resource limits
2961 * @adap: the adapter
2962 * @mbox: mailbox to use for the FW command
2963 * @pf: the PF being configured
2964 * @vf: the VF being configured
2965 * @txq: the max number of egress queues
2966 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2967 * @rxqi: the max number of interrupt-capable ingress queues
2968 * @rxq: the max number of interruptless ingress queues
2969 * @tc: the PCI traffic class
2970 * @vi: the max number of virtual interfaces
2971 * @cmask: the channel access rights mask for the PF/VF
2972 * @pmask: the port access rights mask for the PF/VF
2973 * @nexact: the maximum number of exact MPS filters
2974 * @rcaps: read capabilities
2975 * @wxcaps: write/execute capabilities
2976 *
2977 * Configures resource limits and capabilities for a physical or virtual
2978 * function.
2979 */
2980int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2981 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2982 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2983 unsigned int vi, unsigned int cmask, unsigned int pmask,
2984 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2985{
2986 struct fw_pfvf_cmd c;
2987
2988 memset(&c, 0, sizeof(c));
2989 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2990 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2991 FW_PFVF_CMD_VFN(vf));
2992 c.retval_len16 = htonl(FW_LEN16(c));
2993 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2994 FW_PFVF_CMD_NIQ(rxq));
Casey Leedom81323b72010-06-25 12:10:32 +00002995 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00002996 FW_PFVF_CMD_PMASK(pmask) |
2997 FW_PFVF_CMD_NEQ(txq));
2998 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2999 FW_PFVF_CMD_NEXACTF(nexact));
3000 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3001 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3002 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3003 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3004}
3005
3006/**
3007 * t4_alloc_vi - allocate a virtual interface
3008 * @adap: the adapter
3009 * @mbox: mailbox to use for the FW command
3010 * @port: physical port associated with the VI
3011 * @pf: the PF owning the VI
3012 * @vf: the VF owning the VI
3013 * @nmac: number of MAC addresses needed (1 to 5)
3014 * @mac: the MAC addresses of the VI
3015 * @rss_size: size of RSS table slice associated with this VI
3016 *
3017 * Allocates a virtual interface for the given physical port. If @mac is
3018 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3019 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3020 * stored consecutively so the space needed is @nmac * 6 bytes.
3021 * Returns a negative error number or the non-negative VI id.
3022 */
3023int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3024 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3025 unsigned int *rss_size)
3026{
3027 int ret;
3028 struct fw_vi_cmd c;
3029
3030 memset(&c, 0, sizeof(c));
3031 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3032 FW_CMD_WRITE | FW_CMD_EXEC |
3033 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3034 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3035 c.portid_pkd = FW_VI_CMD_PORTID(port);
3036 c.nmac = nmac - 1;
3037
3038 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3039 if (ret)
3040 return ret;
3041
3042 if (mac) {
3043 memcpy(mac, c.mac, sizeof(c.mac));
3044 switch (nmac) {
3045 case 5:
3046 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3047 case 4:
3048 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3049 case 3:
3050 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3051 case 2:
3052 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3053 }
3054 }
3055 if (rss_size)
3056 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003057 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003058}
3059
3060/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003061 * t4_set_rxmode - set Rx properties of a virtual interface
3062 * @adap: the adapter
3063 * @mbox: mailbox to use for the FW command
3064 * @viid: the VI id
3065 * @mtu: the new MTU or -1
3066 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3067 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3068 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003069 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003070 * @sleep_ok: if true we may sleep while awaiting command completion
3071 *
3072 * Sets Rx properties of a virtual interface.
3073 */
3074int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003075 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3076 bool sleep_ok)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003077{
3078 struct fw_vi_rxmode_cmd c;
3079
3080 /* convert to FW values */
3081 if (mtu < 0)
3082 mtu = FW_RXMODE_MTU_NO_CHG;
3083 if (promisc < 0)
3084 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3085 if (all_multi < 0)
3086 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3087 if (bcast < 0)
3088 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003089 if (vlanex < 0)
3090 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003091
3092 memset(&c, 0, sizeof(c));
3093 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3094 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3095 c.retval_len16 = htonl(FW_LEN16(c));
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00003096 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3097 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3098 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3099 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3100 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003101 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3102}
3103
3104/**
3105 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3106 * @adap: the adapter
3107 * @mbox: mailbox to use for the FW command
3108 * @viid: the VI id
3109 * @free: if true any existing filters for this VI id are first removed
3110 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3111 * @addr: the MAC address(es)
3112 * @idx: where to store the index of each allocated filter
3113 * @hash: pointer to hash address filter bitmap
3114 * @sleep_ok: call is allowed to sleep
3115 *
3116 * Allocates an exact-match filter for each of the supplied addresses and
3117 * sets it to the corresponding address. If @idx is not %NULL it should
3118 * have at least @naddr entries, each of which will be set to the index of
3119 * the filter allocated for the corresponding MAC address. If a filter
3120 * could not be allocated for an address its index is set to 0xffff.
3121 * If @hash is not %NULL addresses that fail to allocate an exact filter
3122 * are hashed and update the hash filter bitmap pointed at by @hash.
3123 *
3124 * Returns a negative error number or the number of filters allocated.
3125 */
3126int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3127 unsigned int viid, bool free, unsigned int naddr,
3128 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3129{
3130 int i, ret;
3131 struct fw_vi_mac_cmd c;
3132 struct fw_vi_mac_exact *p;
3133
3134 if (naddr > 7)
3135 return -EINVAL;
3136
3137 memset(&c, 0, sizeof(c));
3138 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3139 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3140 FW_VI_MAC_CMD_VIID(viid));
3141 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3142 FW_CMD_LEN16((naddr + 2) / 2));
3143
3144 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3145 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3146 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3147 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3148 }
3149
3150 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3151 if (ret)
3152 return ret;
3153
3154 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3155 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3156
3157 if (idx)
3158 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
3159 if (index < NEXACT_MAC)
3160 ret++;
3161 else if (hash)
Dimitris Michailidisce9aeb52010-12-03 10:39:04 +00003162 *hash |= (1ULL << hash_mac_addr(addr[i]));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003163 }
3164 return ret;
3165}
3166
3167/**
3168 * t4_change_mac - modifies the exact-match filter for a MAC address
3169 * @adap: the adapter
3170 * @mbox: mailbox to use for the FW command
3171 * @viid: the VI id
3172 * @idx: index of existing filter for old value of MAC address, or -1
3173 * @addr: the new MAC address value
3174 * @persist: whether a new MAC allocation should be persistent
3175 * @add_smt: if true also add the address to the HW SMT
3176 *
3177 * Modifies an exact-match filter and sets it to the new MAC address.
3178 * Note that in general it is not possible to modify the value of a given
3179 * filter so the generic way to modify an address filter is to free the one
3180 * being used by the old address value and allocate a new filter for the
3181 * new address value. @idx can be -1 if the address is a new addition.
3182 *
3183 * Returns a negative error number or the index of the filter with the new
3184 * MAC value.
3185 */
3186int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3187 int idx, const u8 *addr, bool persist, bool add_smt)
3188{
3189 int ret, mode;
3190 struct fw_vi_mac_cmd c;
3191 struct fw_vi_mac_exact *p = c.u.exact;
3192
3193 if (idx < 0) /* new allocation */
3194 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3195 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3196
3197 memset(&c, 0, sizeof(c));
3198 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3199 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3200 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3201 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3202 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3203 FW_VI_MAC_CMD_IDX(idx));
3204 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3205
3206 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3207 if (ret == 0) {
3208 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3209 if (ret >= NEXACT_MAC)
3210 ret = -ENOMEM;
3211 }
3212 return ret;
3213}
3214
3215/**
3216 * t4_set_addr_hash - program the MAC inexact-match hash filter
3217 * @adap: the adapter
3218 * @mbox: mailbox to use for the FW command
3219 * @viid: the VI id
3220 * @ucast: whether the hash filter should also match unicast addresses
3221 * @vec: the value to be written to the hash filter
3222 * @sleep_ok: call is allowed to sleep
3223 *
3224 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3225 */
3226int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3227 bool ucast, u64 vec, bool sleep_ok)
3228{
3229 struct fw_vi_mac_cmd c;
3230
3231 memset(&c, 0, sizeof(c));
3232 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3233 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3234 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3235 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3236 FW_CMD_LEN16(1));
3237 c.u.hash.hashvec = cpu_to_be64(vec);
3238 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3239}
3240
3241/**
3242 * t4_enable_vi - enable/disable a virtual interface
3243 * @adap: the adapter
3244 * @mbox: mailbox to use for the FW command
3245 * @viid: the VI id
3246 * @rx_en: 1=enable Rx, 0=disable Rx
3247 * @tx_en: 1=enable Tx, 0=disable Tx
3248 *
3249 * Enables/disables a virtual interface.
3250 */
3251int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3252 bool rx_en, bool tx_en)
3253{
3254 struct fw_vi_enable_cmd c;
3255
3256 memset(&c, 0, sizeof(c));
3257 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3258 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3259 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3260 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3261 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3262}
3263
3264/**
3265 * t4_identify_port - identify a VI's port by blinking its LED
3266 * @adap: the adapter
3267 * @mbox: mailbox to use for the FW command
3268 * @viid: the VI id
3269 * @nblinks: how many times to blink LED at 2.5 Hz
3270 *
3271 * Identifies a VI's port by blinking its LED.
3272 */
3273int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3274 unsigned int nblinks)
3275{
3276 struct fw_vi_enable_cmd c;
3277
3278 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3279 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3280 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3281 c.blinkdur = htons(nblinks);
3282 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3283}
3284
3285/**
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003286 * t4_iq_free - free an ingress queue and its FLs
3287 * @adap: the adapter
3288 * @mbox: mailbox to use for the FW command
3289 * @pf: the PF owning the queues
3290 * @vf: the VF owning the queues
3291 * @iqtype: the ingress queue type
3292 * @iqid: ingress queue id
3293 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3294 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3295 *
3296 * Frees an ingress queue and its associated FLs, if any.
3297 */
3298int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3299 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3300 unsigned int fl0id, unsigned int fl1id)
3301{
3302 struct fw_iq_cmd c;
3303
3304 memset(&c, 0, sizeof(c));
3305 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3306 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3307 FW_IQ_CMD_VFN(vf));
3308 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3309 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3310 c.iqid = htons(iqid);
3311 c.fl0id = htons(fl0id);
3312 c.fl1id = htons(fl1id);
3313 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3314}
3315
3316/**
3317 * t4_eth_eq_free - free an Ethernet egress queue
3318 * @adap: the adapter
3319 * @mbox: mailbox to use for the FW command
3320 * @pf: the PF owning the queue
3321 * @vf: the VF owning the queue
3322 * @eqid: egress queue id
3323 *
3324 * Frees an Ethernet egress queue.
3325 */
3326int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3327 unsigned int vf, unsigned int eqid)
3328{
3329 struct fw_eq_eth_cmd c;
3330
3331 memset(&c, 0, sizeof(c));
3332 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3333 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3334 FW_EQ_ETH_CMD_VFN(vf));
3335 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3336 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3337 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3338}
3339
3340/**
3341 * t4_ctrl_eq_free - free a control egress queue
3342 * @adap: the adapter
3343 * @mbox: mailbox to use for the FW command
3344 * @pf: the PF owning the queue
3345 * @vf: the VF owning the queue
3346 * @eqid: egress queue id
3347 *
3348 * Frees a control egress queue.
3349 */
3350int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3351 unsigned int vf, unsigned int eqid)
3352{
3353 struct fw_eq_ctrl_cmd c;
3354
3355 memset(&c, 0, sizeof(c));
3356 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3357 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3358 FW_EQ_CTRL_CMD_VFN(vf));
3359 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3360 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3361 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3362}
3363
3364/**
3365 * t4_ofld_eq_free - free an offload egress queue
3366 * @adap: the adapter
3367 * @mbox: mailbox to use for the FW command
3368 * @pf: the PF owning the queue
3369 * @vf: the VF owning the queue
3370 * @eqid: egress queue id
3371 *
3372 * Frees a control egress queue.
3373 */
3374int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3375 unsigned int vf, unsigned int eqid)
3376{
3377 struct fw_eq_ofld_cmd c;
3378
3379 memset(&c, 0, sizeof(c));
3380 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3381 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3382 FW_EQ_OFLD_CMD_VFN(vf));
3383 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3384 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3385 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3386}
3387
3388/**
3389 * t4_handle_fw_rpl - process a FW reply message
3390 * @adap: the adapter
3391 * @rpl: start of the FW message
3392 *
3393 * Processes a FW message, such as link state change messages.
3394 */
3395int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3396{
3397 u8 opcode = *(const u8 *)rpl;
3398
3399 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3400 int speed = 0, fc = 0;
3401 const struct fw_port_cmd *p = (void *)rpl;
3402 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3403 int port = adap->chan_map[chan];
3404 struct port_info *pi = adap2pinfo(adap, port);
3405 struct link_config *lc = &pi->link_cfg;
3406 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3407 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3408 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3409
3410 if (stat & FW_PORT_CMD_RXPAUSE)
3411 fc |= PAUSE_RX;
3412 if (stat & FW_PORT_CMD_TXPAUSE)
3413 fc |= PAUSE_TX;
3414 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3415 speed = SPEED_100;
3416 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3417 speed = SPEED_1000;
3418 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3419 speed = SPEED_10000;
3420
3421 if (link_ok != lc->link_ok || speed != lc->speed ||
3422 fc != lc->fc) { /* something changed */
3423 lc->link_ok = link_ok;
3424 lc->speed = speed;
3425 lc->fc = fc;
3426 t4_os_link_changed(adap, port, link_ok);
3427 }
3428 if (mod != pi->mod_type) {
3429 pi->mod_type = mod;
3430 t4_os_portmod_changed(adap, port);
3431 }
3432 }
3433 return 0;
3434}
3435
3436static void __devinit get_pci_mode(struct adapter *adapter,
3437 struct pci_params *p)
3438{
3439 u16 val;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003440
Jiang Liue5c8ae52012-08-20 13:53:19 -06003441 if (pci_is_pcie(adapter->pdev)) {
3442 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003443 p->speed = val & PCI_EXP_LNKSTA_CLS;
3444 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3445 }
3446}
3447
3448/**
3449 * init_link_config - initialize a link's SW state
3450 * @lc: structure holding the link state
3451 * @caps: link capabilities
3452 *
3453 * Initializes the SW state maintained for each link, including the link's
3454 * capabilities and default speed/flow-control/autonegotiation settings.
3455 */
3456static void __devinit init_link_config(struct link_config *lc,
3457 unsigned int caps)
3458{
3459 lc->supported = caps;
3460 lc->requested_speed = 0;
3461 lc->speed = 0;
3462 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3463 if (lc->supported & FW_PORT_CAP_ANEG) {
3464 lc->advertising = lc->supported & ADVERT_MASK;
3465 lc->autoneg = AUTONEG_ENABLE;
3466 lc->requested_fc |= PAUSE_AUTONEG;
3467 } else {
3468 lc->advertising = 0;
3469 lc->autoneg = AUTONEG_DISABLE;
3470 }
3471}
3472
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003473int t4_wait_dev_ready(struct adapter *adap)
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003474{
3475 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3476 return 0;
3477 msleep(500);
3478 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3479}
3480
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003481static int __devinit get_flash_params(struct adapter *adap)
3482{
3483 int ret;
3484 u32 info;
3485
3486 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3487 if (!ret)
3488 ret = sf1_read(adap, 3, 0, 1, &info);
3489 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3490 if (ret)
3491 return ret;
3492
3493 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3494 return -EINVAL;
3495 info >>= 16; /* log2 of size */
3496 if (info >= 0x14 && info < 0x18)
3497 adap->params.sf_nsec = 1 << (info - 16);
3498 else if (info == 0x18)
3499 adap->params.sf_nsec = 64;
3500 else
3501 return -EINVAL;
3502 adap->params.sf_size = 1 << info;
3503 adap->params.sf_fw_start =
3504 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3505 return 0;
3506}
3507
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003508/**
3509 * t4_prep_adapter - prepare SW and HW for operation
3510 * @adapter: the adapter
3511 * @reset: if true perform a HW reset
3512 *
3513 * Initialize adapter SW state for the various HW modules, set initial
3514 * values for some adapter tunables, take PHYs out of reset, and
3515 * initialize the MDIO interface.
3516 */
3517int __devinit t4_prep_adapter(struct adapter *adapter)
3518{
3519 int ret;
3520
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003521 ret = t4_wait_dev_ready(adapter);
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003522 if (ret < 0)
3523 return ret;
3524
3525 get_pci_mode(adapter, &adapter->params.pci);
3526 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3527
Dimitris Michailidis900a6592010-06-18 10:05:27 +00003528 ret = get_flash_params(adapter);
3529 if (ret < 0) {
3530 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3531 return ret;
3532 }
3533
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003534 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3535
3536 /*
3537 * Default port for debugging in case we can't reach FW.
3538 */
3539 adapter->params.nports = 1;
3540 adapter->params.portvec = 1;
Vipul Pandya636f9d32012-09-26 02:39:39 +00003541 adapter->params.vpd.cclk = 50000;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003542 return 0;
3543}
3544
3545int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3546{
3547 u8 addr[6];
3548 int ret, i, j = 0;
3549 struct fw_port_cmd c;
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003550 struct fw_rss_vi_config_cmd rvc;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003551
3552 memset(&c, 0, sizeof(c));
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003553 memset(&rvc, 0, sizeof(rvc));
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003554
3555 for_each_port(adap, i) {
3556 unsigned int rss_size;
3557 struct port_info *p = adap2pinfo(adap, i);
3558
3559 while ((adap->params.portvec & (1 << j)) == 0)
3560 j++;
3561
3562 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3563 FW_CMD_REQUEST | FW_CMD_READ |
3564 FW_PORT_CMD_PORTID(j));
3565 c.action_to_len16 = htonl(
3566 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3567 FW_LEN16(c));
3568 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3569 if (ret)
3570 return ret;
3571
3572 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3573 if (ret < 0)
3574 return ret;
3575
3576 p->viid = ret;
3577 p->tx_chan = j;
3578 p->lport = j;
3579 p->rss_size = rss_size;
3580 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3581 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
Dimitris Michailidisf21ce1c2010-06-18 10:05:30 +00003582 adap->port[i]->dev_id = j;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003583
3584 ret = ntohl(c.u.info.lstatus_to_modtype);
3585 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3586 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3587 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
Dimitris Michailidisa0881ca2010-06-18 10:05:34 +00003588 p->mod_type = FW_PORT_MOD_TYPE_NA;
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003589
Dimitris Michailidisf7965642010-07-11 12:01:18 +00003590 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3591 FW_CMD_REQUEST | FW_CMD_READ |
3592 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3593 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3594 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3595 if (ret)
3596 return ret;
3597 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3598
Dimitris Michailidis56d36be2010-04-01 15:28:23 +00003599 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3600 j++;
3601 }
3602 return 0;
3603}