blob: 257dc8d565106a86349c11830d652dcd16983d95 [file] [log] [blame]
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/pci_regs.h>
37#include <linux/firmware.h>
38#include <linux/stddef.h>
39#include <linux/delay.h>
40#include <linux/string.h>
41#include <linux/compiler.h>
42#include <linux/jiffies.h>
43#include <linux/kernel.h>
44#include <linux/log2.h>
45
46#include "csio_hw.h"
47#include "csio_lnode.h"
48#include "csio_rnode.h"
49
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +053050int csio_dbg_level = 0xFEFF;
51unsigned int csio_port_mask = 0xf;
52
53/* Default FW event queue entries. */
54static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
55
56/* Default MSI param level */
57int csio_msi = 2;
58
59/* FCoE function instances */
60static int dev_num;
61
62/* FCoE Adapter types & its description */
Arvind Bhushan7cc16382013-03-14 05:09:08 +000063static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +053064 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
65 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
66 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
67 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
68 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
69 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
70 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
71 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
72 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
73 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
74 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
75 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
76 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
77 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
78 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
Arvind Bhushan7cc16382013-03-14 05:09:08 +000079 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
81 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
82 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
83 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
84 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
85 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
86 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
87 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
88};
89
90static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
91 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
92 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
93 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
94 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
95 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
96 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
97 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
98 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
99 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
100 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
101 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
102 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
103 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
104 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
105 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
106 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
107 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
108 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
109 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
110 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530111};
112
113static void csio_mgmtm_cleanup(struct csio_mgmtm *);
114static void csio_hw_mbm_cleanup(struct csio_hw *);
115
116/* State machine forward declarations */
117static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
118static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
119static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
120static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
121static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
122static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
123static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
124static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
125static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
126
127static void csio_hw_initialize(struct csio_hw *hw);
128static void csio_evtq_stop(struct csio_hw *hw);
129static void csio_evtq_start(struct csio_hw *hw);
130
131int csio_is_hw_ready(struct csio_hw *hw)
132{
133 return csio_match_state(hw, csio_hws_ready);
134}
135
136int csio_is_hw_removing(struct csio_hw *hw)
137{
138 return csio_match_state(hw, csio_hws_removing);
139}
140
141
142/*
143 * csio_hw_wait_op_done_val - wait until an operation is completed
144 * @hw: the HW module
145 * @reg: the register to check for completion
146 * @mask: a single-bit field within @reg that indicates completion
147 * @polarity: the value of the field when the operation is completed
148 * @attempts: number of check iterations
149 * @delay: delay in usecs between iterations
150 * @valp: where to store the value of the register at completion time
151 *
152 * Wait until an operation is completed by checking a bit in a register
153 * up to @attempts times. If @valp is not NULL the value of the register
154 * at the time it indicated completion is stored there. Returns 0 if the
155 * operation completes and -EAGAIN otherwise.
156 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000157int
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530158csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
159 int polarity, int attempts, int delay, uint32_t *valp)
160{
161 uint32_t val;
162 while (1) {
163 val = csio_rd_reg32(hw, reg);
164
165 if (!!(val & mask) == polarity) {
166 if (valp)
167 *valp = val;
168 return 0;
169 }
170
171 if (--attempts == 0)
172 return -EAGAIN;
173 if (delay)
174 udelay(delay);
175 }
176}
177
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000178/*
179 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
180 * @hw: the adapter
181 * @addr: the indirect TP register address
182 * @mask: specifies the field within the register to modify
183 * @val: new value for the field
184 *
185 * Sets a field of an indirect TP register to the given value.
186 */
187void
188csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
189 unsigned int mask, unsigned int val)
190{
191 csio_wr_reg32(hw, addr, TP_PIO_ADDR);
192 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
193 csio_wr_reg32(hw, val, TP_PIO_DATA);
194}
195
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530196void
197csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
198 uint32_t value)
199{
200 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
201
202 csio_wr_reg32(hw, val | value, reg);
203 /* Flush */
204 csio_rd_reg32(hw, reg);
205
206}
207
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530208static int
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530209csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530210{
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000211 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
212 addr, len, buf, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530213}
214
215/*
216 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
217 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000218#define EEPROM_MAX_RD_POLL 40
219#define EEPROM_MAX_WR_POLL 6
220#define EEPROM_STAT_ADDR 0x7bfc
221#define VPD_BASE 0x400
222#define VPD_BASE_OLD 0
223#define VPD_LEN 1024
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530224#define VPD_INFO_FLD_HDR_SIZE 3
225
226/*
227 * csio_hw_seeprom_read - read a serial EEPROM location
228 * @hw: hw to read
229 * @addr: EEPROM virtual address
230 * @data: where to store the read data
231 *
232 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
233 * VPD capability. Note that this function must be called with a virtual
234 * address.
235 */
236static int
237csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
238{
239 uint16_t val = 0;
240 int attempts = EEPROM_MAX_RD_POLL;
241 uint32_t base = hw->params.pci.vpd_cap_addr;
242
243 if (addr >= EEPROMVSIZE || (addr & 3))
244 return -EINVAL;
245
246 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
247
248 do {
249 udelay(10);
250 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
251 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
252
253 if (!(val & PCI_VPD_ADDR_F)) {
254 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
255 return -EINVAL;
256 }
257
258 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
259 *data = le32_to_cpu(*data);
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530260
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530261 return 0;
262}
263
264/*
265 * Partial EEPROM Vital Product Data structure. Includes only the ID and
266 * VPD-R sections.
267 */
268struct t4_vpd_hdr {
269 u8 id_tag;
270 u8 id_len[2];
271 u8 id_data[ID_LEN];
272 u8 vpdr_tag;
273 u8 vpdr_len[2];
274};
275
276/*
277 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
278 * the VPD
279 * @v: Pointer to buffered vpd data structure
280 * @kw: The keyword to search for
281 *
282 * Returns the value of the information field keyword or
283 * -EINVAL otherwise.
284 */
285static int
286csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
287{
288 int32_t i;
289 int32_t offset , len;
290 const uint8_t *buf = &v->id_tag;
291 const uint8_t *vpdr_len = &v->vpdr_tag;
292 offset = sizeof(struct t4_vpd_hdr);
293 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
294
295 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
296 return -EINVAL;
297
298 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
299 if (memcmp(buf + i , kw, 2) == 0) {
300 i += VPD_INFO_FLD_HDR_SIZE;
301 return i;
302 }
303
304 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
305 }
306
307 return -EINVAL;
308}
309
310static int
311csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
312{
313 *pos = pci_find_capability(pdev, cap);
314 if (*pos)
315 return 0;
316
317 return -1;
318}
319
320/*
321 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
322 * @hw: HW module
323 * @p: where to store the parameters
324 *
325 * Reads card parameters stored in VPD EEPROM.
326 */
327static int
328csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
329{
330 int i, ret, ec, sn, addr;
331 uint8_t *vpd, csum;
332 const struct t4_vpd_hdr *v;
333 /* To get around compilation warning from strstrip */
334 char *s;
335
336 if (csio_is_valid_vpd(hw))
337 return 0;
338
339 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
340 &hw->params.pci.vpd_cap_addr);
341 if (ret)
342 return -EINVAL;
343
344 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
345 if (vpd == NULL)
346 return -ENOMEM;
347
348 /*
349 * Card information normally starts at VPD_BASE but early cards had
350 * it at 0.
351 */
352 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
353 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
354
355 for (i = 0; i < VPD_LEN; i += 4) {
356 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
357 if (ret) {
358 kfree(vpd);
359 return ret;
360 }
361 }
362
363 /* Reset the VPD flag! */
364 hw->flags &= (~CSIO_HWF_VPD_VALID);
365
366 v = (const struct t4_vpd_hdr *)vpd;
367
368#define FIND_VPD_KW(var, name) do { \
369 var = csio_hw_get_vpd_keyword_val(v, name); \
370 if (var < 0) { \
371 csio_err(hw, "missing VPD keyword " name "\n"); \
372 kfree(vpd); \
373 return -EINVAL; \
374 } \
375} while (0)
376
377 FIND_VPD_KW(i, "RV");
378 for (csum = 0; i >= 0; i--)
379 csum += vpd[i];
380
381 if (csum) {
382 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
383 kfree(vpd);
384 return -EINVAL;
385 }
386 FIND_VPD_KW(ec, "EC");
387 FIND_VPD_KW(sn, "SN");
388#undef FIND_VPD_KW
389
390 memcpy(p->id, v->id_data, ID_LEN);
391 s = strstrip(p->id);
392 memcpy(p->ec, vpd + ec, EC_LEN);
393 s = strstrip(p->ec);
394 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
395 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
396 s = strstrip(p->sn);
397
398 csio_valid_vpd_copied(hw);
399
400 kfree(vpd);
401 return 0;
402}
403
404/*
405 * csio_hw_sf1_read - read data from the serial flash
406 * @hw: the HW module
407 * @byte_cnt: number of bytes to read
408 * @cont: whether another operation will be chained
409 * @lock: whether to lock SF for PL access only
410 * @valp: where to store the read data
411 *
412 * Reads up to 4 bytes of data from the serial flash. The location of
413 * the read needs to be specified prior to calling this by issuing the
414 * appropriate commands to the serial flash.
415 */
416static int
417csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
418 int32_t lock, uint32_t *valp)
419{
420 int ret;
421
422 if (!byte_cnt || byte_cnt > 4)
423 return -EINVAL;
424 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
425 return -EBUSY;
426
427 cont = cont ? SF_CONT : 0;
428 lock = lock ? SF_LOCK : 0;
429
430 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
431 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
432 10, NULL);
433 if (!ret)
434 *valp = csio_rd_reg32(hw, SF_DATA);
435 return ret;
436}
437
438/*
439 * csio_hw_sf1_write - write data to the serial flash
440 * @hw: the HW module
441 * @byte_cnt: number of bytes to write
442 * @cont: whether another operation will be chained
443 * @lock: whether to lock SF for PL access only
444 * @val: value to write
445 *
446 * Writes up to 4 bytes of data to the serial flash. The location of
447 * the write needs to be specified prior to calling this by issuing the
448 * appropriate commands to the serial flash.
449 */
450static int
451csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
452 int32_t lock, uint32_t val)
453{
454 if (!byte_cnt || byte_cnt > 4)
455 return -EINVAL;
456 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
457 return -EBUSY;
458
459 cont = cont ? SF_CONT : 0;
460 lock = lock ? SF_LOCK : 0;
461
462 csio_wr_reg32(hw, val, SF_DATA);
463 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
464
465 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
466 10, NULL);
467}
468
469/*
470 * csio_hw_flash_wait_op - wait for a flash operation to complete
471 * @hw: the HW module
472 * @attempts: max number of polls of the status register
473 * @delay: delay between polls in ms
474 *
475 * Wait for a flash operation to complete by polling the status register.
476 */
477static int
478csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
479{
480 int ret;
481 uint32_t status;
482
483 while (1) {
484 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
485 if (ret != 0)
486 return ret;
487
488 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
489 if (ret != 0)
490 return ret;
491
492 if (!(status & 1))
493 return 0;
494 if (--attempts == 0)
495 return -EAGAIN;
496 if (delay)
497 msleep(delay);
498 }
499}
500
501/*
502 * csio_hw_read_flash - read words from serial flash
503 * @hw: the HW module
504 * @addr: the start address for the read
505 * @nwords: how many 32-bit words to read
506 * @data: where to store the read data
507 * @byte_oriented: whether to store data as bytes or as words
508 *
509 * Read the specified number of 32-bit words from the serial flash.
510 * If @byte_oriented is set the read data is stored as a byte array
511 * (i.e., big-endian), otherwise as 32-bit words in the platform's
512 * natural endianess.
513 */
514static int
515csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
516 uint32_t *data, int32_t byte_oriented)
517{
518 int ret;
519
520 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
521 return -EINVAL;
522
523 addr = swab32(addr) | SF_RD_DATA_FAST;
524
525 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
526 if (ret != 0)
527 return ret;
528
529 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
530 if (ret != 0)
531 return ret;
532
533 for ( ; nwords; nwords--, data++) {
534 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
535 if (nwords == 1)
536 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
537 if (ret)
538 return ret;
539 if (byte_oriented)
540 *data = htonl(*data);
541 }
542 return 0;
543}
544
545/*
546 * csio_hw_write_flash - write up to a page of data to the serial flash
547 * @hw: the hw
548 * @addr: the start address to write
549 * @n: length of data to write in bytes
550 * @data: the data to write
551 *
552 * Writes up to a page of data (256 bytes) to the serial flash starting
553 * at the given address. All the data must be written to the same page.
554 */
555static int
556csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
557 uint32_t n, const uint8_t *data)
558{
559 int ret = -EINVAL;
560 uint32_t buf[64];
561 uint32_t i, c, left, val, offset = addr & 0xff;
562
563 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
564 return -EINVAL;
565
566 val = swab32(addr) | SF_PROG_PAGE;
567
568 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
569 if (ret != 0)
570 goto unlock;
571
572 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
573 if (ret != 0)
574 goto unlock;
575
576 for (left = n; left; left -= c) {
577 c = min(left, 4U);
578 for (val = 0, i = 0; i < c; ++i)
579 val = (val << 8) + *data++;
580
581 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
582 if (ret)
583 goto unlock;
584 }
585 ret = csio_hw_flash_wait_op(hw, 8, 1);
586 if (ret)
587 goto unlock;
588
589 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
590
591 /* Read the page to verify the write succeeded */
592 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
593 if (ret)
594 return ret;
595
596 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
597 csio_err(hw,
598 "failed to correctly write the flash page at %#x\n",
599 addr);
600 return -EINVAL;
601 }
602
603 return 0;
604
605unlock:
606 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
607 return ret;
608}
609
610/*
611 * csio_hw_flash_erase_sectors - erase a range of flash sectors
612 * @hw: the HW module
613 * @start: the first sector to erase
614 * @end: the last sector to erase
615 *
616 * Erases the sectors in the given inclusive range.
617 */
618static int
619csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
620{
621 int ret = 0;
622
623 while (start <= end) {
624
625 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
626 if (ret != 0)
627 goto out;
628
629 ret = csio_hw_sf1_write(hw, 4, 0, 1,
630 SF_ERASE_SECTOR | (start << 8));
631 if (ret != 0)
632 goto out;
633
634 ret = csio_hw_flash_wait_op(hw, 14, 500);
635 if (ret != 0)
636 goto out;
637
638 start++;
639 }
640out:
641 if (ret)
642 csio_err(hw, "erase of flash sector %d failed, error %d\n",
643 start, ret);
644 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
645 return 0;
646}
647
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530648static void
649csio_hw_print_fw_version(struct csio_hw *hw, char *str)
650{
651 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +0530652 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
653 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
654 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
655 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530656}
657
658/*
659 * csio_hw_get_fw_version - read the firmware version
660 * @hw: HW module
661 * @vers: where to place the version
662 *
663 * Reads the FW version from flash.
664 */
665static int
666csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
667{
668 return csio_hw_read_flash(hw, FW_IMG_START +
669 offsetof(struct fw_hdr, fw_ver), 1,
670 vers, 0);
671}
672
673/*
674 * csio_hw_get_tp_version - read the TP microcode version
675 * @hw: HW module
676 * @vers: where to place the version
677 *
678 * Reads the TP microcode version from flash.
679 */
680static int
681csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
682{
683 return csio_hw_read_flash(hw, FLASH_FW_START +
684 offsetof(struct fw_hdr, tp_microcode_ver), 1,
685 vers, 0);
686}
687
688/*
689 * csio_hw_check_fw_version - check if the FW is compatible with
690 * this driver
691 * @hw: HW module
692 *
693 * Checks if an adapter's FW is compatible with the driver. Returns 0
694 * if there's exact match, a negative error if the version could not be
695 * read or there's a major/minor version mismatch/minor.
696 */
697static int
698csio_hw_check_fw_version(struct csio_hw *hw)
699{
700 int ret, major, minor, micro;
701
702 ret = csio_hw_get_fw_version(hw, &hw->fwrev);
703 if (!ret)
704 ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
705 if (ret)
706 return ret;
707
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +0530708 major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
709 minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
710 micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530711
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000712 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530713 csio_err(hw, "card FW has major version %u, driver wants %u\n",
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000714 major, FW_VERSION_MAJOR(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530715 return -EINVAL;
716 }
717
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000718 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530719 return 0; /* perfect match */
720
721 /* Minor/micro version mismatch */
722 return -EINVAL;
723}
724
725/*
726 * csio_hw_fw_dload - download firmware.
727 * @hw: HW module
728 * @fw_data: firmware image to write.
729 * @size: image size
730 *
731 * Write the supplied firmware image to the card's serial flash.
732 */
733static int
734csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
735{
736 uint32_t csum;
737 int32_t addr;
738 int ret;
739 uint32_t i;
740 uint8_t first_page[SF_PAGE_SIZE];
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530741 const __be32 *p = (const __be32 *)fw_data;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530742 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
743 uint32_t sf_sec_size;
744
745 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
746 csio_err(hw, "Serial Flash data invalid\n");
747 return -EINVAL;
748 }
749
750 if (!size) {
751 csio_err(hw, "FW image has no data\n");
752 return -EINVAL;
753 }
754
755 if (size & 511) {
756 csio_err(hw, "FW image size not multiple of 512 bytes\n");
757 return -EINVAL;
758 }
759
760 if (ntohs(hdr->len512) * 512 != size) {
761 csio_err(hw, "FW image size differs from size in FW header\n");
762 return -EINVAL;
763 }
764
765 if (size > FW_MAX_SIZE) {
766 csio_err(hw, "FW image too large, max is %u bytes\n",
767 FW_MAX_SIZE);
768 return -EINVAL;
769 }
770
771 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
772 csum += ntohl(p[i]);
773
774 if (csum != 0xffffffff) {
775 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
776 return -EINVAL;
777 }
778
779 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
780 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
781
782 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
783 FW_START_SEC, FW_START_SEC + i - 1);
784
785 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
786 FW_START_SEC + i - 1);
787 if (ret) {
788 csio_err(hw, "Flash Erase failed\n");
789 goto out;
790 }
791
792 /*
793 * We write the correct version at the end so the driver can see a bad
794 * version if the FW write fails. Start by writing a copy of the
795 * first page with a bad version.
796 */
797 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
799 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
800 if (ret)
801 goto out;
802
803 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
804 FW_IMG_START, FW_IMG_START + size);
805
806 addr = FW_IMG_START;
807 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
808 addr += SF_PAGE_SIZE;
809 fw_data += SF_PAGE_SIZE;
810 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
811 if (ret)
812 goto out;
813 }
814
815 ret = csio_hw_write_flash(hw,
816 FW_IMG_START +
817 offsetof(struct fw_hdr, fw_ver),
818 sizeof(hdr->fw_ver),
819 (const uint8_t *)&hdr->fw_ver);
820
821out:
822 if (ret)
823 csio_err(hw, "firmware download failed, error %d\n", ret);
824 return ret;
825}
826
827static int
828csio_hw_get_flash_params(struct csio_hw *hw)
829{
830 int ret;
831 uint32_t info = 0;
832
833 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
834 if (!ret)
835 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
836 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
837 if (ret != 0)
838 return ret;
839
840 if ((info & 0xff) != 0x20) /* not a Numonix flash */
841 return -EINVAL;
842 info >>= 16; /* log2 of size */
843 if (info >= 0x14 && info < 0x18)
844 hw->params.sf_nsec = 1 << (info - 16);
845 else if (info == 0x18)
846 hw->params.sf_nsec = 64;
847 else
848 return -EINVAL;
849 hw->params.sf_size = 1 << info;
850
851 return 0;
852}
853
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530854/*****************************************************************************/
855/* HW State machine assists */
856/*****************************************************************************/
857
858static int
859csio_hw_dev_ready(struct csio_hw *hw)
860{
861 uint32_t reg;
862 int cnt = 6;
863
864 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
865 (--cnt != 0))
866 mdelay(100);
867
868 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
869 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
870 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
871 return -EIO;
872 }
873
874 hw->pfn = SOURCEPF_GET(reg);
875
876 return 0;
877}
878
879/*
880 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
881 * @hw: HW module
882 * @state: Device state
883 *
884 * FW_HELLO_CMD has to be polled for completion.
885 */
886static int
887csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
888{
889 struct csio_mb *mbp;
890 int rv = 0;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530891 enum fw_retval retval;
892 uint8_t mpfn;
893 char state_str[16];
894 int retries = FW_CMD_HELLO_RETRIES;
895
896 memset(state_str, 0, sizeof(state_str));
897
898 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
899 if (!mbp) {
900 rv = -ENOMEM;
901 CSIO_INC_STATS(hw, n_err_nomem);
902 goto out;
903 }
904
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530905retry:
906 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
Hariprasad Shenai666224d2014-12-11 11:11:43 +0530907 hw->pfn, CSIO_MASTER_MAY, NULL);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530908
909 rv = csio_mb_issue(hw, mbp);
910 if (rv) {
911 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
912 goto out_free_mb;
913 }
914
915 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
916 if (retval != FW_SUCCESS) {
917 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
918 rv = -EINVAL;
919 goto out_free_mb;
920 }
921
922 /* Firmware has designated us to be master */
923 if (hw->pfn == mpfn) {
924 hw->flags |= CSIO_HWF_MASTER;
925 } else if (*state == CSIO_DEV_STATE_UNINIT) {
926 /*
927 * If we're not the Master PF then we need to wait around for
928 * the Master PF Driver to finish setting up the adapter.
929 *
930 * Note that we also do this wait if we're a non-Master-capable
931 * PF and there is no current Master PF; a Master PF may show up
932 * momentarily and we wouldn't want to fail pointlessly. (This
933 * can happen when an OS loads lots of different drivers rapidly
934 * at the same time). In this case, the Master PF returned by
935 * the firmware will be PCIE_FW_MASTER_MASK so the test below
936 * will work ...
937 */
938
939 int waiting = FW_CMD_HELLO_TIMEOUT;
940
941 /*
942 * Wait for the firmware to either indicate an error or
943 * initialized state. If we see either of these we bail out
944 * and report the issue to the caller. If we exhaust the
945 * "hello timeout" and we haven't exhausted our retries, try
946 * again. Otherwise bail with a timeout error.
947 */
948 for (;;) {
949 uint32_t pcie_fw;
950
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000951 spin_unlock_irq(&hw->lock);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530952 msleep(50);
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000953 spin_lock_irq(&hw->lock);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530954 waiting -= 50;
955
956 /*
957 * If neither Error nor Initialialized are indicated
958 * by the firmware keep waiting till we exaust our
959 * timeout ... and then retry if we haven't exhausted
960 * our retries ...
961 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530962 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
963 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530964 if (waiting <= 0) {
965 if (retries-- > 0)
966 goto retry;
967
968 rv = -ETIMEDOUT;
969 break;
970 }
971 continue;
972 }
973
974 /*
975 * We either have an Error or Initialized condition
976 * report errors preferentially.
977 */
978 if (state) {
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530979 if (pcie_fw & PCIE_FW_ERR_F) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530980 *state = CSIO_DEV_STATE_ERR;
981 rv = -ETIMEDOUT;
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530982 } else if (pcie_fw & PCIE_FW_INIT_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530983 *state = CSIO_DEV_STATE_INIT;
984 }
985
986 /*
987 * If we arrived before a Master PF was selected and
988 * there's not a valid Master PF, grab its identity
989 * for our caller.
990 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530991 if (mpfn == PCIE_FW_MASTER_M &&
992 (pcie_fw & PCIE_FW_MASTER_VLD_F))
993 mpfn = PCIE_FW_MASTER_G(pcie_fw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530994 break;
995 }
996 hw->flags &= ~CSIO_HWF_MASTER;
997 }
998
999 switch (*state) {
1000 case CSIO_DEV_STATE_UNINIT:
1001 strcpy(state_str, "Initializing");
1002 break;
1003 case CSIO_DEV_STATE_INIT:
1004 strcpy(state_str, "Initialized");
1005 break;
1006 case CSIO_DEV_STATE_ERR:
1007 strcpy(state_str, "Error");
1008 break;
1009 default:
1010 strcpy(state_str, "Unknown");
1011 break;
1012 }
1013
1014 if (hw->pfn == mpfn)
1015 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1016 hw->pfn, state_str);
1017 else
1018 csio_info(hw,
1019 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1020 hw->pfn, mpfn, state_str);
1021
1022out_free_mb:
1023 mempool_free(mbp, hw->mb_mempool);
1024out:
1025 return rv;
1026}
1027
1028/*
1029 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1030 * @hw: HW module
1031 *
1032 */
1033static int
1034csio_do_bye(struct csio_hw *hw)
1035{
1036 struct csio_mb *mbp;
1037 enum fw_retval retval;
1038
1039 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1040 if (!mbp) {
1041 CSIO_INC_STATS(hw, n_err_nomem);
1042 return -ENOMEM;
1043 }
1044
1045 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1046
1047 if (csio_mb_issue(hw, mbp)) {
1048 csio_err(hw, "Issue of BYE command failed\n");
1049 mempool_free(mbp, hw->mb_mempool);
1050 return -EINVAL;
1051 }
1052
1053 retval = csio_mb_fw_retval(mbp);
1054 if (retval != FW_SUCCESS) {
1055 mempool_free(mbp, hw->mb_mempool);
1056 return -EINVAL;
1057 }
1058
1059 mempool_free(mbp, hw->mb_mempool);
1060
1061 return 0;
1062}
1063
1064/*
1065 * csio_do_reset- Perform the device reset.
1066 * @hw: HW module
1067 * @fw_rst: FW reset
1068 *
1069 * If fw_rst is set, issues FW reset mbox cmd otherwise
1070 * does PIO reset.
1071 * Performs reset of the function.
1072 */
1073static int
1074csio_do_reset(struct csio_hw *hw, bool fw_rst)
1075{
1076 struct csio_mb *mbp;
1077 enum fw_retval retval;
1078
1079 if (!fw_rst) {
1080 /* PIO reset */
1081 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1082 mdelay(2000);
1083 return 0;
1084 }
1085
1086 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1087 if (!mbp) {
1088 CSIO_INC_STATS(hw, n_err_nomem);
1089 return -ENOMEM;
1090 }
1091
1092 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1093 PIORSTMODE | PIORST, 0, NULL);
1094
1095 if (csio_mb_issue(hw, mbp)) {
1096 csio_err(hw, "Issue of RESET command failed.n");
1097 mempool_free(mbp, hw->mb_mempool);
1098 return -EINVAL;
1099 }
1100
1101 retval = csio_mb_fw_retval(mbp);
1102 if (retval != FW_SUCCESS) {
1103 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1104 mempool_free(mbp, hw->mb_mempool);
1105 return -EINVAL;
1106 }
1107
1108 mempool_free(mbp, hw->mb_mempool);
1109
1110 return 0;
1111}
1112
1113static int
1114csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1115{
1116 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1117 uint16_t caps;
1118
1119 caps = ntohs(rsp->fcoecaps);
1120
1121 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1122 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1123 return -EINVAL;
1124 }
1125
1126 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1127 csio_err(hw, "No FCoE Control Offload capability\n");
1128 return -EINVAL;
1129 }
1130
1131 return 0;
1132}
1133
1134/*
1135 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1136 * @hw: the HW module
1137 * @mbox: mailbox to use for the FW RESET command (if desired)
1138 * @force: force uP into RESET even if FW RESET command fails
1139 *
1140 * Issues a RESET command to firmware (if desired) with a HALT indication
1141 * and then puts the microprocessor into RESET state. The RESET command
1142 * will only be issued if a legitimate mailbox is provided (mbox <=
1143 * PCIE_FW_MASTER_MASK).
1144 *
1145 * This is generally used in order for the host to safely manipulate the
1146 * adapter without fear of conflicting with whatever the firmware might
1147 * be doing. The only way out of this state is to RESTART the firmware
1148 * ...
1149 */
1150static int
1151csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1152{
1153 enum fw_retval retval = 0;
1154
1155 /*
1156 * If a legitimate mailbox is provided, issue a RESET command
1157 * with a HALT indication.
1158 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301159 if (mbox <= PCIE_FW_MASTER_M) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301160 struct csio_mb *mbp;
1161
1162 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1163 if (!mbp) {
1164 CSIO_INC_STATS(hw, n_err_nomem);
1165 return -ENOMEM;
1166 }
1167
1168 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
Hariprasad Shenai51678652014-11-21 12:52:02 +05301169 PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301170 NULL);
1171
1172 if (csio_mb_issue(hw, mbp)) {
1173 csio_err(hw, "Issue of RESET command failed!\n");
1174 mempool_free(mbp, hw->mb_mempool);
1175 return -EINVAL;
1176 }
1177
1178 retval = csio_mb_fw_retval(mbp);
1179 mempool_free(mbp, hw->mb_mempool);
1180 }
1181
1182 /*
1183 * Normally we won't complete the operation if the firmware RESET
1184 * command fails but if our caller insists we'll go ahead and put the
1185 * uP into RESET. This can be useful if the firmware is hung or even
1186 * missing ... We'll have to take the risk of putting the uP into
1187 * RESET without the cooperation of firmware in that case.
1188 *
1189 * We also force the firmware's HALT flag to be on in case we bypassed
1190 * the firmware RESET command above or we're dealing with old firmware
1191 * which doesn't have the HALT capability. This will serve as a flag
1192 * for the incoming firmware to know that it's coming out of a HALT
1193 * rather than a RESET ... if it's new enough to understand that ...
1194 */
1195 if (retval == 0 || force) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301196 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301197 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1198 PCIE_FW_HALT_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301199 }
1200
1201 /*
1202 * And we always return the result of the firmware RESET command
1203 * even when we force the uP into RESET ...
1204 */
1205 return retval ? -EINVAL : 0;
1206}
1207
1208/*
1209 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1210 * @hw: the HW module
1211 * @reset: if we want to do a RESET to restart things
1212 *
1213 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1214 * return the previous PF Master remains as the new PF Master and there
1215 * is no need to issue a new HELLO command, etc.
1216 *
1217 * We do this in two ways:
1218 *
1219 * 1. If we're dealing with newer firmware we'll simply want to take
1220 * the chip's microprocessor out of RESET. This will cause the
1221 * firmware to start up from its start vector. And then we'll loop
1222 * until the firmware indicates it's started again (PCIE_FW.HALT
1223 * reset to 0) or we timeout.
1224 *
1225 * 2. If we're dealing with older firmware then we'll need to RESET
1226 * the chip since older firmware won't recognize the PCIE_FW.HALT
1227 * flag and automatically RESET itself on startup.
1228 */
1229static int
1230csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1231{
1232 if (reset) {
1233 /*
1234 * Since we're directing the RESET instead of the firmware
1235 * doing it automatically, we need to clear the PCIE_FW.HALT
1236 * bit.
1237 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301238 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301239
1240 /*
1241 * If we've been given a valid mailbox, first try to get the
1242 * firmware to do the RESET. If that works, great and we can
1243 * return success. Otherwise, if we haven't been given a
1244 * valid mailbox or the RESET command failed, fall back to
1245 * hitting the chip with a hammer.
1246 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301247 if (mbox <= PCIE_FW_MASTER_M) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301248 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301249 msleep(100);
1250 if (csio_do_reset(hw, true) == 0)
1251 return 0;
1252 }
1253
1254 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1255 msleep(2000);
1256 } else {
1257 int ms;
1258
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301259 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301260 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301261 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301262 return 0;
1263 msleep(100);
1264 ms += 100;
1265 }
1266 return -ETIMEDOUT;
1267 }
1268 return 0;
1269}
1270
1271/*
1272 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1273 * @hw: the HW module
1274 * @mbox: mailbox to use for the FW RESET command (if desired)
1275 * @fw_data: the firmware image to write
1276 * @size: image size
1277 * @force: force upgrade even if firmware doesn't cooperate
1278 *
1279 * Perform all of the steps necessary for upgrading an adapter's
1280 * firmware image. Normally this requires the cooperation of the
1281 * existing firmware in order to halt all existing activities
1282 * but if an invalid mailbox token is passed in we skip that step
1283 * (though we'll still put the adapter microprocessor into RESET in
1284 * that case).
1285 *
1286 * On successful return the new firmware will have been loaded and
1287 * the adapter will have been fully RESET losing all previous setup
1288 * state. On unsuccessful return the adapter may be completely hosed ...
1289 * positive errno indicates that the adapter is ~probably~ intact, a
1290 * negative errno indicates that things are looking bad ...
1291 */
1292static int
1293csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1294 const u8 *fw_data, uint32_t size, int32_t force)
1295{
1296 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1297 int reset, ret;
1298
1299 ret = csio_hw_fw_halt(hw, mbox, force);
1300 if (ret != 0 && !force)
1301 return ret;
1302
1303 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1304 if (ret != 0)
1305 return ret;
1306
1307 /*
1308 * Older versions of the firmware don't understand the new
1309 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1310 * restart. So for newly loaded older firmware we'll have to do the
1311 * RESET for it so it starts up on a clean slate. We can tell if
1312 * the newly loaded firmware will handle this right by checking
1313 * its header flags to see if it advertises the capability.
1314 */
1315 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1316 return csio_hw_fw_restart(hw, mbox, reset);
1317}
1318
1319
1320/*
1321 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1322 * @hw: the HW module
1323 * @mbox: mailbox to use for the FW command
1324 * @mtype: the memory type where the Configuration File is located
1325 * @maddr: the memory address where the Configuration File is located
1326 * @finiver: return value for CF [fini] version
1327 * @finicsum: return value for CF [fini] checksum
1328 * @cfcsum: return value for CF computed checksum
1329 *
1330 * Issue a command to get the firmware to process the Configuration
1331 * File located at the specified mtype/maddress. If the Configuration
1332 * File is processed successfully and return value pointers are
1333 * provided, the Configuration File "[fini] section version and
1334 * checksum values will be returned along with the computed checksum.
1335 * It's up to the caller to decide how it wants to respond to the
1336 * checksums not matching but it recommended that a prominant warning
1337 * be emitted in order to help people rapidly identify changed or
1338 * corrupted Configuration Files.
1339 *
1340 * Also note that it's possible to modify things like "niccaps",
1341 * "toecaps",etc. between processing the Configuration File and telling
1342 * the firmware to use the new configuration. Callers which want to
1343 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1344 * Configuration Files if they want to do this.
1345 */
1346static int
1347csio_hw_fw_config_file(struct csio_hw *hw,
1348 unsigned int mtype, unsigned int maddr,
1349 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
1350{
1351 struct csio_mb *mbp;
1352 struct fw_caps_config_cmd *caps_cmd;
1353 int rv = -EINVAL;
1354 enum fw_retval ret;
1355
1356 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1357 if (!mbp) {
1358 CSIO_INC_STATS(hw, n_err_nomem);
1359 return -ENOMEM;
1360 }
1361 /*
1362 * Tell the firmware to process the indicated Configuration File.
1363 * If there are no errors and the caller has provided return value
1364 * pointers for the [fini] section version, checksum and computed
1365 * checksum, pass those back to the caller.
1366 */
1367 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1368 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1369 caps_cmd->op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301370 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1371 FW_CMD_REQUEST_F |
1372 FW_CMD_READ_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301373 caps_cmd->cfvalid_to_len16 =
Hariprasad Shenai51678652014-11-21 12:52:02 +05301374 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
1375 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
1376 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301377 FW_LEN16(*caps_cmd));
1378
1379 if (csio_mb_issue(hw, mbp)) {
1380 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1381 goto out;
1382 }
1383
1384 ret = csio_mb_fw_retval(mbp);
1385 if (ret != FW_SUCCESS) {
1386 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1387 goto out;
1388 }
1389
1390 if (finiver)
1391 *finiver = ntohl(caps_cmd->finiver);
1392 if (finicsum)
1393 *finicsum = ntohl(caps_cmd->finicsum);
1394 if (cfcsum)
1395 *cfcsum = ntohl(caps_cmd->cfcsum);
1396
1397 /* Validate device capabilities */
1398 if (csio_hw_validate_caps(hw, mbp)) {
1399 rv = -ENOENT;
1400 goto out;
1401 }
1402
1403 /*
1404 * And now tell the firmware to use the configuration we just loaded.
1405 */
1406 caps_cmd->op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301407 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1408 FW_CMD_REQUEST_F |
1409 FW_CMD_WRITE_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301410 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1411
1412 if (csio_mb_issue(hw, mbp)) {
1413 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1414 goto out;
1415 }
1416
1417 ret = csio_mb_fw_retval(mbp);
1418 if (ret != FW_SUCCESS) {
1419 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1420 goto out;
1421 }
1422
1423 rv = 0;
1424out:
1425 mempool_free(mbp, hw->mb_mempool);
1426 return rv;
1427}
1428
1429/*
1430 * csio_get_device_params - Get device parameters.
1431 * @hw: HW module
1432 *
1433 */
1434static int
1435csio_get_device_params(struct csio_hw *hw)
1436{
1437 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1438 struct csio_mb *mbp;
1439 enum fw_retval retval;
1440 u32 param[6];
1441 int i, j = 0;
1442
1443 /* Initialize portids to -1 */
1444 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1445 hw->pport[i].portid = -1;
1446
1447 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1448 if (!mbp) {
1449 CSIO_INC_STATS(hw, n_err_nomem);
1450 return -ENOMEM;
1451 }
1452
1453 /* Get port vec information. */
1454 param[0] = FW_PARAM_DEV(PORTVEC);
1455
1456 /* Get Core clock. */
1457 param[1] = FW_PARAM_DEV(CCLK);
1458
1459 /* Get EQ id start and end. */
1460 param[2] = FW_PARAM_PFVF(EQ_START);
1461 param[3] = FW_PARAM_PFVF(EQ_END);
1462
1463 /* Get IQ id start and end. */
1464 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1465 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1466
1467 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1468 ARRAY_SIZE(param), param, NULL, false, NULL);
1469 if (csio_mb_issue(hw, mbp)) {
1470 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1471 mempool_free(mbp, hw->mb_mempool);
1472 return -EINVAL;
1473 }
1474
1475 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1476 ARRAY_SIZE(param), param);
1477 if (retval != FW_SUCCESS) {
1478 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1479 retval);
1480 mempool_free(mbp, hw->mb_mempool);
1481 return -EINVAL;
1482 }
1483
1484 /* cache the information. */
1485 hw->port_vec = param[0];
1486 hw->vpd.cclk = param[1];
1487 wrm->fw_eq_start = param[2];
1488 wrm->fw_iq_start = param[4];
1489
1490 /* Using FW configured max iqs & eqs */
1491 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1492 !csio_is_hw_master(hw)) {
1493 hw->cfg_niq = param[5] - param[4] + 1;
1494 hw->cfg_neq = param[3] - param[2] + 1;
1495 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1496 hw->cfg_niq, hw->cfg_neq);
1497 }
1498
1499 hw->port_vec &= csio_port_mask;
1500
1501 hw->num_pports = hweight32(hw->port_vec);
1502
1503 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1504 hw->port_vec, hw->num_pports);
1505
1506 for (i = 0; i < hw->num_pports; i++) {
1507 while ((hw->port_vec & (1 << j)) == 0)
1508 j++;
1509 hw->pport[i].portid = j++;
1510 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1511 }
1512 mempool_free(mbp, hw->mb_mempool);
1513
1514 return 0;
1515}
1516
1517
1518/*
1519 * csio_config_device_caps - Get and set device capabilities.
1520 * @hw: HW module
1521 *
1522 */
1523static int
1524csio_config_device_caps(struct csio_hw *hw)
1525{
1526 struct csio_mb *mbp;
1527 enum fw_retval retval;
1528 int rv = -EINVAL;
1529
1530 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1531 if (!mbp) {
1532 CSIO_INC_STATS(hw, n_err_nomem);
1533 return -ENOMEM;
1534 }
1535
1536 /* Get device capabilities */
1537 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1538
1539 if (csio_mb_issue(hw, mbp)) {
1540 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1541 goto out;
1542 }
1543
1544 retval = csio_mb_fw_retval(mbp);
1545 if (retval != FW_SUCCESS) {
1546 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1547 goto out;
1548 }
1549
1550 /* Validate device capabilities */
1551 if (csio_hw_validate_caps(hw, mbp))
1552 goto out;
1553
1554 /* Don't config device capabilities if already configured */
1555 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1556 rv = 0;
1557 goto out;
1558 }
1559
1560 /* Write back desired device capabilities */
1561 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1562 false, true, NULL);
1563
1564 if (csio_mb_issue(hw, mbp)) {
1565 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1566 goto out;
1567 }
1568
1569 retval = csio_mb_fw_retval(mbp);
1570 if (retval != FW_SUCCESS) {
1571 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1572 goto out;
1573 }
1574
1575 rv = 0;
1576out:
1577 mempool_free(mbp, hw->mb_mempool);
1578 return rv;
1579}
1580
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301581/*
1582 * csio_enable_ports - Bring up all available ports.
1583 * @hw: HW module.
1584 *
1585 */
1586static int
1587csio_enable_ports(struct csio_hw *hw)
1588{
1589 struct csio_mb *mbp;
1590 enum fw_retval retval;
1591 uint8_t portid;
1592 int i;
1593
1594 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1595 if (!mbp) {
1596 CSIO_INC_STATS(hw, n_err_nomem);
1597 return -ENOMEM;
1598 }
1599
1600 for (i = 0; i < hw->num_pports; i++) {
1601 portid = hw->pport[i].portid;
1602
1603 /* Read PORT information */
1604 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1605 false, 0, 0, NULL);
1606
1607 if (csio_mb_issue(hw, mbp)) {
1608 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1609 portid);
1610 mempool_free(mbp, hw->mb_mempool);
1611 return -EINVAL;
1612 }
1613
1614 csio_mb_process_read_port_rsp(hw, mbp, &retval,
1615 &hw->pport[i].pcap);
1616 if (retval != FW_SUCCESS) {
1617 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1618 portid, retval);
1619 mempool_free(mbp, hw->mb_mempool);
1620 return -EINVAL;
1621 }
1622
1623 /* Write back PORT information */
1624 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
1625 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
1626
1627 if (csio_mb_issue(hw, mbp)) {
1628 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1629 portid);
1630 mempool_free(mbp, hw->mb_mempool);
1631 return -EINVAL;
1632 }
1633
1634 retval = csio_mb_fw_retval(mbp);
1635 if (retval != FW_SUCCESS) {
1636 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1637 portid, retval);
1638 mempool_free(mbp, hw->mb_mempool);
1639 return -EINVAL;
1640 }
1641
1642 } /* For all ports */
1643
1644 mempool_free(mbp, hw->mb_mempool);
1645
1646 return 0;
1647}
1648
1649/*
1650 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1651 * @hw: HW module
1652 * Issued with lock held.
1653 */
1654static int
1655csio_get_fcoe_resinfo(struct csio_hw *hw)
1656{
1657 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1658 struct fw_fcoe_res_info_cmd *rsp;
1659 struct csio_mb *mbp;
1660 enum fw_retval retval;
1661
1662 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1663 if (!mbp) {
1664 CSIO_INC_STATS(hw, n_err_nomem);
1665 return -ENOMEM;
1666 }
1667
1668 /* Get FCoE FW resource information */
1669 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1670
1671 if (csio_mb_issue(hw, mbp)) {
1672 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1673 mempool_free(mbp, hw->mb_mempool);
1674 return -EINVAL;
1675 }
1676
1677 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301678 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301679 if (retval != FW_SUCCESS) {
1680 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1681 retval);
1682 mempool_free(mbp, hw->mb_mempool);
1683 return -EINVAL;
1684 }
1685
1686 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1687 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1688 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1689 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1690 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1691 res_info->max_ssns = ntohl(rsp->max_ssns);
1692 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1693 res_info->used_ssns = ntohl(rsp->used_ssns);
1694 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1695 res_info->max_vnps = ntohl(rsp->max_vnps);
1696 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1697 res_info->used_vnps = ntohl(rsp->used_vnps);
1698
1699 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1700 res_info->max_xchgs);
1701 mempool_free(mbp, hw->mb_mempool);
1702
1703 return 0;
1704}
1705
1706static int
1707csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1708{
1709 struct csio_mb *mbp;
1710 enum fw_retval retval;
1711 u32 _param[1];
1712
1713 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1714 if (!mbp) {
1715 CSIO_INC_STATS(hw, n_err_nomem);
1716 return -ENOMEM;
1717 }
1718
1719 /*
1720 * Find out whether we're dealing with a version of
1721 * the firmware which has configuration file support.
1722 */
Hariprasad Shenai51678652014-11-21 12:52:02 +05301723 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1724 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301725
1726 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1727 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1728 if (csio_mb_issue(hw, mbp)) {
1729 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1730 mempool_free(mbp, hw->mb_mempool);
1731 return -EINVAL;
1732 }
1733
1734 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1735 ARRAY_SIZE(_param), _param);
1736 if (retval != FW_SUCCESS) {
1737 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1738 retval);
1739 mempool_free(mbp, hw->mb_mempool);
1740 return -EINVAL;
1741 }
1742
1743 mempool_free(mbp, hw->mb_mempool);
1744 *param = _param[0];
1745
1746 return 0;
1747}
1748
1749static int
1750csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1751{
1752 int ret = 0;
1753 const struct firmware *cf;
1754 struct pci_dev *pci_dev = hw->pdev;
1755 struct device *dev = &pci_dev->dev;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301756 unsigned int mtype = 0, maddr = 0;
1757 uint32_t *cfg_data;
1758 int value_to_add = 0;
1759
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001760 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
1761 csio_err(hw, "could not find config file %s, err: %d\n",
1762 CSIO_CF_FNAME(hw), ret);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301763 return -ENOENT;
1764 }
1765
1766 if (cf->size%4 != 0)
1767 value_to_add = 4 - (cf->size % 4);
1768
1769 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
Jesper Juhl02db3db2012-12-26 21:31:51 +01001770 if (cfg_data == NULL) {
1771 ret = -ENOMEM;
1772 goto leave;
1773 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301774
1775 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
Jesper Juhl02db3db2012-12-26 21:31:51 +01001776 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1777 ret = -EINVAL;
1778 goto leave;
1779 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301780
Hariprasad Shenai51678652014-11-21 12:52:02 +05301781 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1782 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301783
1784 ret = csio_memory_write(hw, mtype, maddr,
1785 cf->size + value_to_add, cfg_data);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001786
1787 if ((ret == 0) && (value_to_add != 0)) {
1788 union {
1789 u32 word;
1790 char buf[4];
1791 } last;
1792 size_t size = cf->size & ~0x3;
1793 int i;
1794
1795 last.word = cfg_data[size >> 2];
1796 for (i = value_to_add; i < 4; i++)
1797 last.buf[i] = 0;
1798 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1799 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301800 if (ret == 0) {
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001801 csio_info(hw, "config file upgraded to %s\n",
1802 CSIO_CF_FNAME(hw));
1803 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301804 }
1805
Jesper Juhl02db3db2012-12-26 21:31:51 +01001806leave:
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301807 kfree(cfg_data);
1808 release_firmware(cf);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301809 return ret;
1810}
1811
1812/*
1813 * HW initialization: contact FW, obtain config, perform basic init.
1814 *
1815 * If the firmware we're dealing with has Configuration File support, then
1816 * we use that to perform all configuration -- either using the configuration
1817 * file stored in flash on the adapter or using a filesystem-local file
1818 * if available.
1819 *
1820 * If we don't have configuration file support in the firmware, then we'll
1821 * have to set things up the old fashioned way with hard-coded register
1822 * writes and firmware commands ...
1823 */
1824
1825/*
1826 * Attempt to initialize the HW via a Firmware Configuration File.
1827 */
1828static int
1829csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1830{
1831 unsigned int mtype, maddr;
1832 int rv;
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001833 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301834 int using_flash;
1835 char path[64];
1836
1837 /*
1838 * Reset device if necessary
1839 */
1840 if (reset) {
1841 rv = csio_do_reset(hw, true);
1842 if (rv != 0)
1843 goto bye;
1844 }
1845
1846 /*
1847 * If we have a configuration file in host ,
1848 * then use that. Otherwise, use the configuration file stored
1849 * in the HW flash ...
1850 */
1851 spin_unlock_irq(&hw->lock);
1852 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
1853 spin_lock_irq(&hw->lock);
1854 if (rv != 0) {
1855 if (rv == -ENOENT) {
1856 /*
1857 * config file was not found. Use default
1858 * config file from flash.
1859 */
1860 mtype = FW_MEMTYPE_CF_FLASH;
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001861 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301862 using_flash = 1;
1863 } else {
1864 /*
1865 * we revert back to the hardwired config if
1866 * flashing failed.
1867 */
1868 goto bye;
1869 }
1870 } else {
Hariprasad Shenai51678652014-11-21 12:52:02 +05301871 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1872 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301873 using_flash = 0;
1874 }
1875
1876 hw->cfg_store = (uint8_t)mtype;
1877
1878 /*
1879 * Issue a Capability Configuration command to the firmware to get it
1880 * to parse the Configuration File.
1881 */
1882 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
1883 &finicsum, &cfcsum);
1884 if (rv != 0)
1885 goto bye;
1886
1887 hw->cfg_finiver = finiver;
1888 hw->cfg_finicsum = finicsum;
1889 hw->cfg_cfcsum = cfcsum;
1890 hw->cfg_csum_status = true;
1891
1892 if (finicsum != cfcsum) {
1893 csio_warn(hw,
1894 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1895 finicsum, cfcsum);
1896
1897 hw->cfg_csum_status = false;
1898 }
1899
1900 /*
1901 * Note that we're operating with parameters
1902 * not supplied by the driver, rather than from hard-wired
1903 * initialization constants buried in the driver.
1904 */
1905 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
1906
1907 /* device parameters */
1908 rv = csio_get_device_params(hw);
1909 if (rv != 0)
1910 goto bye;
1911
1912 /* Configure SGE */
1913 csio_wr_sge_init(hw);
1914
1915 /*
1916 * And finally tell the firmware to initialize itself using the
1917 * parameters from the Configuration File.
1918 */
1919 /* Post event to notify completion of configuration */
1920 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1921
1922 csio_info(hw,
1923 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1924 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
1925
1926 return 0;
1927
1928 /*
1929 * Something bad happened. Return the error ...
1930 */
1931bye:
1932 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
1933 csio_dbg(hw, "Configuration file error %d\n", rv);
1934 return rv;
1935}
1936
1937/*
1938 * Attempt to initialize the adapter via hard-coded, driver supplied
1939 * parameters ...
1940 */
1941static int
1942csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
1943{
1944 int rv;
1945 /*
1946 * Reset device if necessary
1947 */
1948 if (reset) {
1949 rv = csio_do_reset(hw, true);
1950 if (rv != 0)
1951 goto out;
1952 }
1953
1954 /* Get and set device capabilities */
1955 rv = csio_config_device_caps(hw);
1956 if (rv != 0)
1957 goto out;
1958
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301959 /* device parameters */
1960 rv = csio_get_device_params(hw);
1961 if (rv != 0)
1962 goto out;
1963
1964 /* Configure SGE */
1965 csio_wr_sge_init(hw);
1966
1967 /* Post event to notify completion of configuration */
1968 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1969
1970out:
1971 return rv;
1972}
1973
1974/*
1975 * Returns -EINVAL if attempts to flash the firmware failed
1976 * else returns 0,
1977 * if flashing was not attempted because the card had the
1978 * latest firmware ECANCELED is returned
1979 */
1980static int
1981csio_hw_flash_fw(struct csio_hw *hw)
1982{
1983 int ret = -ECANCELED;
1984 const struct firmware *fw;
1985 const struct fw_hdr *hdr;
1986 u32 fw_ver;
1987 struct pci_dev *pci_dev = hw->pdev;
1988 struct device *dev = &pci_dev->dev ;
1989
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001990 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
1991 csio_err(hw, "could not find firmware image %s, err: %d\n",
1992 CSIO_FW_FNAME(hw), ret);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301993 return -EINVAL;
1994 }
1995
1996 hdr = (const struct fw_hdr *)fw->data;
1997 fw_ver = ntohl(hdr->fw_ver);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05301998 if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301999 return -EINVAL; /* wrong major version, won't do */
2000
2001 /*
2002 * If the flash FW is unusable or we found something newer, load it.
2003 */
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302004 if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302005 fw_ver > hw->fwrev) {
2006 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2007 /*force=*/false);
2008 if (!ret)
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002009 csio_info(hw,
2010 "firmware upgraded to version %pI4 from %s\n",
2011 &hdr->fw_ver, CSIO_FW_FNAME(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302012 else
2013 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002014 } else
2015 ret = -EINVAL;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302016
2017 release_firmware(fw);
2018
2019 return ret;
2020}
2021
2022
2023/*
2024 * csio_hw_configure - Configure HW
2025 * @hw - HW module
2026 *
2027 */
2028static void
2029csio_hw_configure(struct csio_hw *hw)
2030{
2031 int reset = 1;
2032 int rv;
2033 u32 param[1];
2034
2035 rv = csio_hw_dev_ready(hw);
2036 if (rv != 0) {
2037 CSIO_INC_STATS(hw, n_err_fatal);
2038 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2039 goto out;
2040 }
2041
2042 /* HW version */
2043 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
2044
2045 /* Needed for FW download */
2046 rv = csio_hw_get_flash_params(hw);
2047 if (rv != 0) {
2048 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2049 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2050 goto out;
2051 }
2052
Yijing Wangad4d35f2013-09-05 15:55:26 +08002053 /* Set PCIe completion timeout to 4 seconds */
2054 if (pci_is_pcie(hw->pdev))
2055 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2056 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302057
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002058 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302059
2060 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2061 if (rv != 0)
2062 goto out;
2063
2064 csio_hw_print_fw_version(hw, "Firmware revision");
2065
2066 rv = csio_do_hello(hw, &hw->fw_state);
2067 if (rv != 0) {
2068 CSIO_INC_STATS(hw, n_err_fatal);
2069 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2070 goto out;
2071 }
2072
2073 /* Read vpd */
2074 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2075 if (rv != 0)
2076 goto out;
2077
2078 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2079 rv = csio_hw_check_fw_version(hw);
2080 if (rv == -EINVAL) {
2081
2082 /* Do firmware update */
2083 spin_unlock_irq(&hw->lock);
2084 rv = csio_hw_flash_fw(hw);
2085 spin_lock_irq(&hw->lock);
2086
2087 if (rv == 0) {
2088 reset = 0;
2089 /*
2090 * Note that the chip was reset as part of the
2091 * firmware upgrade so we don't reset it again
2092 * below and grab the new firmware version.
2093 */
2094 rv = csio_hw_check_fw_version(hw);
2095 }
2096 }
2097 /*
2098 * If the firmware doesn't support Configuration
2099 * Files, use the old Driver-based, hard-wired
2100 * initialization. Otherwise, try using the
2101 * Configuration File support and fall back to the
2102 * Driver-based initialization if there's no
2103 * Configuration File found.
2104 */
2105 if (csio_hw_check_fwconfig(hw, param) == 0) {
2106 rv = csio_hw_use_fwconfig(hw, reset, param);
2107 if (rv == -ENOENT)
2108 goto out;
2109 if (rv != 0) {
2110 csio_info(hw,
2111 "No Configuration File present "
2112 "on adapter. Using hard-wired "
2113 "configuration parameters.\n");
2114 rv = csio_hw_no_fwconfig(hw, reset);
2115 }
2116 } else {
2117 rv = csio_hw_no_fwconfig(hw, reset);
2118 }
2119
2120 if (rv != 0)
2121 goto out;
2122
2123 } else {
2124 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2125
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002126 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2127
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302128 /* device parameters */
2129 rv = csio_get_device_params(hw);
2130 if (rv != 0)
2131 goto out;
2132
2133 /* Get device capabilities */
2134 rv = csio_config_device_caps(hw);
2135 if (rv != 0)
2136 goto out;
2137
2138 /* Configure SGE */
2139 csio_wr_sge_init(hw);
2140
2141 /* Post event to notify completion of configuration */
2142 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2143 goto out;
2144 }
2145 } /* if not master */
2146
2147out:
2148 return;
2149}
2150
2151/*
2152 * csio_hw_initialize - Initialize HW
2153 * @hw - HW module
2154 *
2155 */
2156static void
2157csio_hw_initialize(struct csio_hw *hw)
2158{
2159 struct csio_mb *mbp;
2160 enum fw_retval retval;
2161 int rv;
2162 int i;
2163
2164 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2165 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2166 if (!mbp)
2167 goto out;
2168
2169 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2170
2171 if (csio_mb_issue(hw, mbp)) {
2172 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2173 goto free_and_out;
2174 }
2175
2176 retval = csio_mb_fw_retval(mbp);
2177 if (retval != FW_SUCCESS) {
2178 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2179 retval);
2180 goto free_and_out;
2181 }
2182
2183 mempool_free(mbp, hw->mb_mempool);
2184 }
2185
2186 rv = csio_get_fcoe_resinfo(hw);
2187 if (rv != 0) {
2188 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2189 goto out;
2190 }
2191
2192 spin_unlock_irq(&hw->lock);
2193 rv = csio_config_queues(hw);
2194 spin_lock_irq(&hw->lock);
2195
2196 if (rv != 0) {
2197 csio_err(hw, "Config of queues failed!: %d\n", rv);
2198 goto out;
2199 }
2200
2201 for (i = 0; i < hw->num_pports; i++)
2202 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2203
2204 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2205 rv = csio_enable_ports(hw);
2206 if (rv != 0) {
2207 csio_err(hw, "Failed to enable ports: %d\n", rv);
2208 goto out;
2209 }
2210 }
2211
2212 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2213 return;
2214
2215free_and_out:
2216 mempool_free(mbp, hw->mb_mempool);
2217out:
2218 return;
2219}
2220
2221#define PF_INTR_MASK (PFSW | PFCIM)
2222
2223/*
2224 * csio_hw_intr_enable - Enable HW interrupts
2225 * @hw: Pointer to HW module.
2226 *
2227 * Enable interrupts in HW registers.
2228 */
2229static void
2230csio_hw_intr_enable(struct csio_hw *hw)
2231{
2232 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2233 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2234 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
2235
2236 /*
2237 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2238 * by FW, so do nothing for INTX.
2239 */
2240 if (hw->intr_mode == CSIO_IM_MSIX)
Hariprasad Shenaif061de42015-01-05 16:30:44 +05302241 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2242 AIVEC_V(AIVEC_M), vec);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302243 else if (hw->intr_mode == CSIO_IM_MSI)
Hariprasad Shenaif061de42015-01-05 16:30:44 +05302244 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2245 AIVEC_V(AIVEC_M), 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302246
2247 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
2248
2249 /* Turn on MB interrupts - this will internally flush PIO as well */
2250 csio_mb_intr_enable(hw);
2251
2252 /* These are common registers - only a master can modify them */
2253 if (csio_is_hw_master(hw)) {
2254 /*
2255 * Disable the Serial FLASH interrupt, if enabled!
2256 */
2257 pl &= (~SF);
2258 csio_wr_reg32(hw, pl, PL_INT_ENABLE);
2259
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302260 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2261 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
2262 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
2263 ERR_DATA_CPL_ON_HIGH_QID1_F |
2264 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2265 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2266 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2267 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
2268 SGE_INT_ENABLE3_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302269 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
2270 }
2271
2272 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2273
2274}
2275
2276/*
2277 * csio_hw_intr_disable - Disable HW interrupts
2278 * @hw: Pointer to HW module.
2279 *
2280 * Turn off Mailbox and PCI_PF_CFG interrupts.
2281 */
2282void
2283csio_hw_intr_disable(struct csio_hw *hw)
2284{
2285 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2286
2287 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2288 return;
2289
2290 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2291
2292 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
2293 if (csio_is_hw_master(hw))
2294 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
2295
2296 /* Turn off MB interrupts */
2297 csio_mb_intr_disable(hw);
2298
2299}
2300
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002301void
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302302csio_hw_fatal_err(struct csio_hw *hw)
2303{
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302304 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302305 csio_hw_intr_disable(hw);
2306
2307 /* Do not reset HW, we may need FW state for debugging */
2308 csio_fatal(hw, "HW Fatal error encountered!\n");
2309}
2310
2311/*****************************************************************************/
2312/* START: HW SM */
2313/*****************************************************************************/
2314/*
2315 * csio_hws_uninit - Uninit state
2316 * @hw - HW module
2317 * @evt - Event
2318 *
2319 */
2320static void
2321csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2322{
2323 hw->prev_evt = hw->cur_evt;
2324 hw->cur_evt = evt;
2325 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2326
2327 switch (evt) {
2328 case CSIO_HWE_CFG:
2329 csio_set_state(&hw->sm, csio_hws_configuring);
2330 csio_hw_configure(hw);
2331 break;
2332
2333 default:
2334 CSIO_INC_STATS(hw, n_evt_unexp);
2335 break;
2336 }
2337}
2338
2339/*
2340 * csio_hws_configuring - Configuring state
2341 * @hw - HW module
2342 * @evt - Event
2343 *
2344 */
2345static void
2346csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2347{
2348 hw->prev_evt = hw->cur_evt;
2349 hw->cur_evt = evt;
2350 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2351
2352 switch (evt) {
2353 case CSIO_HWE_INIT:
2354 csio_set_state(&hw->sm, csio_hws_initializing);
2355 csio_hw_initialize(hw);
2356 break;
2357
2358 case CSIO_HWE_INIT_DONE:
2359 csio_set_state(&hw->sm, csio_hws_ready);
2360 /* Fan out event to all lnode SMs */
2361 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2362 break;
2363
2364 case CSIO_HWE_FATAL:
2365 csio_set_state(&hw->sm, csio_hws_uninit);
2366 break;
2367
2368 case CSIO_HWE_PCI_REMOVE:
2369 csio_do_bye(hw);
2370 break;
2371 default:
2372 CSIO_INC_STATS(hw, n_evt_unexp);
2373 break;
2374 }
2375}
2376
2377/*
2378 * csio_hws_initializing - Initialiazing state
2379 * @hw - HW module
2380 * @evt - Event
2381 *
2382 */
2383static void
2384csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2385{
2386 hw->prev_evt = hw->cur_evt;
2387 hw->cur_evt = evt;
2388 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2389
2390 switch (evt) {
2391 case CSIO_HWE_INIT_DONE:
2392 csio_set_state(&hw->sm, csio_hws_ready);
2393
2394 /* Fan out event to all lnode SMs */
2395 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2396
2397 /* Enable interrupts */
2398 csio_hw_intr_enable(hw);
2399 break;
2400
2401 case CSIO_HWE_FATAL:
2402 csio_set_state(&hw->sm, csio_hws_uninit);
2403 break;
2404
2405 case CSIO_HWE_PCI_REMOVE:
2406 csio_do_bye(hw);
2407 break;
2408
2409 default:
2410 CSIO_INC_STATS(hw, n_evt_unexp);
2411 break;
2412 }
2413}
2414
2415/*
2416 * csio_hws_ready - Ready state
2417 * @hw - HW module
2418 * @evt - Event
2419 *
2420 */
2421static void
2422csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2423{
2424 /* Remember the event */
2425 hw->evtflag = evt;
2426
2427 hw->prev_evt = hw->cur_evt;
2428 hw->cur_evt = evt;
2429 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2430
2431 switch (evt) {
2432 case CSIO_HWE_HBA_RESET:
2433 case CSIO_HWE_FW_DLOAD:
2434 case CSIO_HWE_SUSPEND:
2435 case CSIO_HWE_PCI_REMOVE:
2436 case CSIO_HWE_PCIERR_DETECTED:
2437 csio_set_state(&hw->sm, csio_hws_quiescing);
2438 /* cleanup all outstanding cmds */
2439 if (evt == CSIO_HWE_HBA_RESET ||
2440 evt == CSIO_HWE_PCIERR_DETECTED)
2441 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2442 else
2443 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2444
2445 csio_hw_intr_disable(hw);
2446 csio_hw_mbm_cleanup(hw);
2447 csio_evtq_stop(hw);
2448 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2449 csio_evtq_flush(hw);
2450 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2451 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2452 break;
2453
2454 case CSIO_HWE_FATAL:
2455 csio_set_state(&hw->sm, csio_hws_uninit);
2456 break;
2457
2458 default:
2459 CSIO_INC_STATS(hw, n_evt_unexp);
2460 break;
2461 }
2462}
2463
2464/*
2465 * csio_hws_quiescing - Quiescing state
2466 * @hw - HW module
2467 * @evt - Event
2468 *
2469 */
2470static void
2471csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2472{
2473 hw->prev_evt = hw->cur_evt;
2474 hw->cur_evt = evt;
2475 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2476
2477 switch (evt) {
2478 case CSIO_HWE_QUIESCED:
2479 switch (hw->evtflag) {
2480 case CSIO_HWE_FW_DLOAD:
2481 csio_set_state(&hw->sm, csio_hws_resetting);
2482 /* Download firmware */
2483 /* Fall through */
2484
2485 case CSIO_HWE_HBA_RESET:
2486 csio_set_state(&hw->sm, csio_hws_resetting);
2487 /* Start reset of the HBA */
2488 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2489 csio_wr_destroy_queues(hw, false);
2490 csio_do_reset(hw, false);
2491 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2492 break;
2493
2494 case CSIO_HWE_PCI_REMOVE:
2495 csio_set_state(&hw->sm, csio_hws_removing);
2496 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2497 csio_wr_destroy_queues(hw, true);
2498 /* Now send the bye command */
2499 csio_do_bye(hw);
2500 break;
2501
2502 case CSIO_HWE_SUSPEND:
2503 csio_set_state(&hw->sm, csio_hws_quiesced);
2504 break;
2505
2506 case CSIO_HWE_PCIERR_DETECTED:
2507 csio_set_state(&hw->sm, csio_hws_pcierr);
2508 csio_wr_destroy_queues(hw, false);
2509 break;
2510
2511 default:
2512 CSIO_INC_STATS(hw, n_evt_unexp);
2513 break;
2514
2515 }
2516 break;
2517
2518 default:
2519 CSIO_INC_STATS(hw, n_evt_unexp);
2520 break;
2521 }
2522}
2523
2524/*
2525 * csio_hws_quiesced - Quiesced state
2526 * @hw - HW module
2527 * @evt - Event
2528 *
2529 */
2530static void
2531csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2532{
2533 hw->prev_evt = hw->cur_evt;
2534 hw->cur_evt = evt;
2535 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2536
2537 switch (evt) {
2538 case CSIO_HWE_RESUME:
2539 csio_set_state(&hw->sm, csio_hws_configuring);
2540 csio_hw_configure(hw);
2541 break;
2542
2543 default:
2544 CSIO_INC_STATS(hw, n_evt_unexp);
2545 break;
2546 }
2547}
2548
2549/*
2550 * csio_hws_resetting - HW Resetting state
2551 * @hw - HW module
2552 * @evt - Event
2553 *
2554 */
2555static void
2556csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2557{
2558 hw->prev_evt = hw->cur_evt;
2559 hw->cur_evt = evt;
2560 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2561
2562 switch (evt) {
2563 case CSIO_HWE_HBA_RESET_DONE:
2564 csio_evtq_start(hw);
2565 csio_set_state(&hw->sm, csio_hws_configuring);
2566 csio_hw_configure(hw);
2567 break;
2568
2569 default:
2570 CSIO_INC_STATS(hw, n_evt_unexp);
2571 break;
2572 }
2573}
2574
2575/*
2576 * csio_hws_removing - PCI Hotplug removing state
2577 * @hw - HW module
2578 * @evt - Event
2579 *
2580 */
2581static void
2582csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
2583{
2584 hw->prev_evt = hw->cur_evt;
2585 hw->cur_evt = evt;
2586 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2587
2588 switch (evt) {
2589 case CSIO_HWE_HBA_RESET:
2590 if (!csio_is_hw_master(hw))
2591 break;
2592 /*
2593 * The BYE should have alerady been issued, so we cant
2594 * use the mailbox interface. Hence we use the PL_RST
2595 * register directly.
2596 */
2597 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
2598 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
2599 mdelay(2000);
2600 break;
2601
2602 /* Should never receive any new events */
2603 default:
2604 CSIO_INC_STATS(hw, n_evt_unexp);
2605 break;
2606
2607 }
2608}
2609
2610/*
2611 * csio_hws_pcierr - PCI Error state
2612 * @hw - HW module
2613 * @evt - Event
2614 *
2615 */
2616static void
2617csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2618{
2619 hw->prev_evt = hw->cur_evt;
2620 hw->cur_evt = evt;
2621 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2622
2623 switch (evt) {
2624 case CSIO_HWE_PCIERR_SLOT_RESET:
2625 csio_evtq_start(hw);
2626 csio_set_state(&hw->sm, csio_hws_configuring);
2627 csio_hw_configure(hw);
2628 break;
2629
2630 default:
2631 CSIO_INC_STATS(hw, n_evt_unexp);
2632 break;
2633 }
2634}
2635
2636/*****************************************************************************/
2637/* END: HW SM */
2638/*****************************************************************************/
2639
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302640/*
2641 * csio_handle_intr_status - table driven interrupt handler
2642 * @hw: HW instance
2643 * @reg: the interrupt status register to process
2644 * @acts: table of interrupt actions
2645 *
2646 * A table driven interrupt handler that applies a set of masks to an
2647 * interrupt status word and performs the corresponding actions if the
2648 * interrupts described by the mask have occured. The actions include
2649 * optionally emitting a warning or alert message. The table is terminated
2650 * by an entry specifying mask 0. Returns the number of fatal interrupt
2651 * conditions.
2652 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002653int
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302654csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
2655 const struct intr_info *acts)
2656{
2657 int fatal = 0;
2658 unsigned int mask = 0;
2659 unsigned int status = csio_rd_reg32(hw, reg);
2660
2661 for ( ; acts->mask; ++acts) {
2662 if (!(status & acts->mask))
2663 continue;
2664 if (acts->fatal) {
2665 fatal++;
2666 csio_fatal(hw, "Fatal %s (0x%x)\n",
2667 acts->msg, status & acts->mask);
2668 } else if (acts->msg)
2669 csio_info(hw, "%s (0x%x)\n",
2670 acts->msg, status & acts->mask);
2671 mask |= acts->mask;
2672 }
2673 status &= mask;
2674 if (status) /* clear processed interrupts */
2675 csio_wr_reg32(hw, status, reg);
2676 return fatal;
2677}
2678
2679/*
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302680 * TP interrupt handler.
2681 */
2682static void csio_tp_intr_handler(struct csio_hw *hw)
2683{
2684 static struct intr_info tp_intr_info[] = {
2685 { 0x3fffffff, "TP parity error", -1, 1 },
2686 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2687 { 0, NULL, 0, 0 }
2688 };
2689
2690 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
2691 csio_hw_fatal_err(hw);
2692}
2693
2694/*
2695 * SGE interrupt handler.
2696 */
2697static void csio_sge_intr_handler(struct csio_hw *hw)
2698{
2699 uint64_t v;
2700
2701 static struct intr_info sge_intr_info[] = {
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302702 { ERR_CPL_EXCEED_IQE_SIZE_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302703 "SGE received CPL exceeding IQE size", -1, 1 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302704 { ERR_INVALID_CIDX_INC_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302705 "SGE GTS CIDX increment too large", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302706 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2707 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
2708 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302709 "SGE IQID > 1023 received CPL for FL", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302710 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302711 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302712 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302713 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302714 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302715 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302716 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302717 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302718 { ERR_ING_CTXT_PRIO_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302719 "SGE too many priority ingress contexts", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302720 { ERR_EGR_CTXT_PRIO_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302721 "SGE too many priority egress contexts", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302722 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2723 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302724 { 0, NULL, 0, 0 }
2725 };
2726
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302727 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
2728 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302729 if (v) {
2730 csio_fatal(hw, "SGE parity error (%#llx)\n",
2731 (unsigned long long)v);
2732 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302733 SGE_INT_CAUSE1_A);
2734 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302735 }
2736
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302737 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302738
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302739 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302740 v != 0)
2741 csio_hw_fatal_err(hw);
2742}
2743
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302744#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2745 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2746#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2747 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302748
2749/*
2750 * CIM interrupt handler.
2751 */
2752static void csio_cim_intr_handler(struct csio_hw *hw)
2753{
2754 static struct intr_info cim_intr_info[] = {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302755 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302756 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2757 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302758 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2759 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2760 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2761 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302762 { 0, NULL, 0, 0 }
2763 };
2764 static struct intr_info cim_upintr_info[] = {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302765 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2766 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2767 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2768 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2769 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2770 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2771 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2772 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2773 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2774 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2775 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2776 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2777 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2778 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2779 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2780 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2781 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2782 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2783 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2784 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2785 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2786 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2787 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2788 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2789 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2790 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2791 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2792 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302793 { 0, NULL, 0, 0 }
2794 };
2795
2796 int fat;
2797
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302798 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
2799 cim_intr_info) +
2800 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
2801 cim_upintr_info);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302802 if (fat)
2803 csio_hw_fatal_err(hw);
2804}
2805
2806/*
2807 * ULP RX interrupt handler.
2808 */
2809static void csio_ulprx_intr_handler(struct csio_hw *hw)
2810{
2811 static struct intr_info ulprx_intr_info[] = {
2812 { 0x1800000, "ULPRX context error", -1, 1 },
2813 { 0x7fffff, "ULPRX parity error", -1, 1 },
2814 { 0, NULL, 0, 0 }
2815 };
2816
2817 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
2818 csio_hw_fatal_err(hw);
2819}
2820
2821/*
2822 * ULP TX interrupt handler.
2823 */
2824static void csio_ulptx_intr_handler(struct csio_hw *hw)
2825{
2826 static struct intr_info ulptx_intr_info[] = {
2827 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2828 0 },
2829 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2830 0 },
2831 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2832 0 },
2833 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2834 0 },
2835 { 0xfffffff, "ULPTX parity error", -1, 1 },
2836 { 0, NULL, 0, 0 }
2837 };
2838
2839 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
2840 csio_hw_fatal_err(hw);
2841}
2842
2843/*
2844 * PM TX interrupt handler.
2845 */
2846static void csio_pmtx_intr_handler(struct csio_hw *hw)
2847{
2848 static struct intr_info pmtx_intr_info[] = {
2849 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2850 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2851 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2852 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2853 { 0xffffff0, "PMTX framing error", -1, 1 },
2854 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2855 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2856 1 },
2857 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2858 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2859 { 0, NULL, 0, 0 }
2860 };
2861
2862 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
2863 csio_hw_fatal_err(hw);
2864}
2865
2866/*
2867 * PM RX interrupt handler.
2868 */
2869static void csio_pmrx_intr_handler(struct csio_hw *hw)
2870{
2871 static struct intr_info pmrx_intr_info[] = {
2872 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2873 { 0x3ffff0, "PMRX framing error", -1, 1 },
2874 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2875 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2876 1 },
2877 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2878 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2879 { 0, NULL, 0, 0 }
2880 };
2881
2882 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
2883 csio_hw_fatal_err(hw);
2884}
2885
2886/*
2887 * CPL switch interrupt handler.
2888 */
2889static void csio_cplsw_intr_handler(struct csio_hw *hw)
2890{
2891 static struct intr_info cplsw_intr_info[] = {
2892 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2893 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2894 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2895 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2896 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2897 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2898 { 0, NULL, 0, 0 }
2899 };
2900
2901 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
2902 csio_hw_fatal_err(hw);
2903}
2904
2905/*
2906 * LE interrupt handler.
2907 */
2908static void csio_le_intr_handler(struct csio_hw *hw)
2909{
2910 static struct intr_info le_intr_info[] = {
2911 { LIPMISS, "LE LIP miss", -1, 0 },
2912 { LIP0, "LE 0 LIP error", -1, 0 },
2913 { PARITYERR, "LE parity error", -1, 1 },
2914 { UNKNOWNCMD, "LE unknown command", -1, 1 },
2915 { REQQPARERR, "LE request queue parity error", -1, 1 },
2916 { 0, NULL, 0, 0 }
2917 };
2918
2919 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
2920 csio_hw_fatal_err(hw);
2921}
2922
2923/*
2924 * MPS interrupt handler.
2925 */
2926static void csio_mps_intr_handler(struct csio_hw *hw)
2927{
2928 static struct intr_info mps_rx_intr_info[] = {
2929 { 0xffffff, "MPS Rx parity error", -1, 1 },
2930 { 0, NULL, 0, 0 }
2931 };
2932 static struct intr_info mps_tx_intr_info[] = {
2933 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
2934 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2935 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
2936 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
2937 { BUBBLE, "MPS Tx underflow", -1, 1 },
2938 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2939 { FRMERR, "MPS Tx framing error", -1, 1 },
2940 { 0, NULL, 0, 0 }
2941 };
2942 static struct intr_info mps_trc_intr_info[] = {
2943 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
2944 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
2945 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
2946 { 0, NULL, 0, 0 }
2947 };
2948 static struct intr_info mps_stat_sram_intr_info[] = {
2949 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2950 { 0, NULL, 0, 0 }
2951 };
2952 static struct intr_info mps_stat_tx_intr_info[] = {
2953 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2954 { 0, NULL, 0, 0 }
2955 };
2956 static struct intr_info mps_stat_rx_intr_info[] = {
2957 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2958 { 0, NULL, 0, 0 }
2959 };
2960 static struct intr_info mps_cls_intr_info[] = {
2961 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2962 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2963 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2964 { 0, NULL, 0, 0 }
2965 };
2966
2967 int fat;
2968
2969 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
2970 mps_rx_intr_info) +
2971 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
2972 mps_tx_intr_info) +
2973 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
2974 mps_trc_intr_info) +
2975 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
2976 mps_stat_sram_intr_info) +
2977 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2978 mps_stat_tx_intr_info) +
2979 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2980 mps_stat_rx_intr_info) +
2981 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
2982 mps_cls_intr_info);
2983
2984 csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
2985 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
2986 if (fat)
2987 csio_hw_fatal_err(hw);
2988}
2989
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302990#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
2991 ECC_UE_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302992
2993/*
2994 * EDC/MC interrupt handler.
2995 */
2996static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
2997{
2998 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2999
3000 unsigned int addr, cnt_addr, v;
3001
3002 if (idx <= MEM_EDC1) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303003 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3004 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303005 } else {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303006 addr = MC_INT_CAUSE_A;
3007 cnt_addr = MC_ECC_STATUS_A;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303008 }
3009
3010 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303011 if (v & PERR_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303012 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303013 if (v & ECC_CE_INT_CAUSE_F) {
3014 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303015
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303016 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303017 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3018 cnt, name[idx], cnt > 1 ? "s" : "");
3019 }
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303020 if (v & ECC_UE_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303021 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3022
3023 csio_wr_reg32(hw, v, addr);
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303024 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303025 csio_hw_fatal_err(hw);
3026}
3027
3028/*
3029 * MA interrupt handler.
3030 */
3031static void csio_ma_intr_handler(struct csio_hw *hw)
3032{
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303033 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303034
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303035 if (status & MEM_PERR_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303036 csio_fatal(hw, "MA parity error, parity status %#x\n",
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303037 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3038 if (status & MEM_WRAP_INT_CAUSE_F) {
3039 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303040 csio_fatal(hw,
3041 "MA address wrap-around error by client %u to address %#x\n",
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303042 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303043 }
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303044 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303045 csio_hw_fatal_err(hw);
3046}
3047
3048/*
3049 * SMB interrupt handler.
3050 */
3051static void csio_smb_intr_handler(struct csio_hw *hw)
3052{
3053 static struct intr_info smb_intr_info[] = {
3054 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
3055 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
3056 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
3057 { 0, NULL, 0, 0 }
3058 };
3059
3060 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
3061 csio_hw_fatal_err(hw);
3062}
3063
3064/*
3065 * NC-SI interrupt handler.
3066 */
3067static void csio_ncsi_intr_handler(struct csio_hw *hw)
3068{
3069 static struct intr_info ncsi_intr_info[] = {
3070 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
3071 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
3072 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
3073 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
3074 { 0, NULL, 0, 0 }
3075 };
3076
3077 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
3078 csio_hw_fatal_err(hw);
3079}
3080
3081/*
3082 * XGMAC interrupt handler.
3083 */
3084static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3085{
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003086 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303087
3088 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3089 if (!v)
3090 return;
3091
3092 if (v & TXFIFO_PRTY_ERR)
3093 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3094 if (v & RXFIFO_PRTY_ERR)
3095 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003096 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303097 csio_hw_fatal_err(hw);
3098}
3099
3100/*
3101 * PL interrupt handler.
3102 */
3103static void csio_pl_intr_handler(struct csio_hw *hw)
3104{
3105 static struct intr_info pl_intr_info[] = {
3106 { FATALPERR, "T4 fatal parity error", -1, 1 },
3107 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
3108 { 0, NULL, 0, 0 }
3109 };
3110
3111 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
3112 csio_hw_fatal_err(hw);
3113}
3114
3115/*
3116 * csio_hw_slow_intr_handler - control path interrupt handler
3117 * @hw: HW module
3118 *
3119 * Interrupt handler for non-data global interrupt events, e.g., errors.
3120 * The designation 'slow' is because it involves register reads, while
3121 * data interrupts typically don't involve any MMIOs.
3122 */
3123int
3124csio_hw_slow_intr_handler(struct csio_hw *hw)
3125{
3126 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
3127
3128 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3129 CSIO_INC_STATS(hw, n_plint_unexp);
3130 return 0;
3131 }
3132
3133 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3134
3135 CSIO_INC_STATS(hw, n_plint_cnt);
3136
3137 if (cause & CIM)
3138 csio_cim_intr_handler(hw);
3139
3140 if (cause & MPS)
3141 csio_mps_intr_handler(hw);
3142
3143 if (cause & NCSI)
3144 csio_ncsi_intr_handler(hw);
3145
3146 if (cause & PL)
3147 csio_pl_intr_handler(hw);
3148
3149 if (cause & SMB)
3150 csio_smb_intr_handler(hw);
3151
3152 if (cause & XGMAC0)
3153 csio_xgmac_intr_handler(hw, 0);
3154
3155 if (cause & XGMAC1)
3156 csio_xgmac_intr_handler(hw, 1);
3157
3158 if (cause & XGMAC_KR0)
3159 csio_xgmac_intr_handler(hw, 2);
3160
3161 if (cause & XGMAC_KR1)
3162 csio_xgmac_intr_handler(hw, 3);
3163
3164 if (cause & PCIE)
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003165 hw->chip_ops->chip_pcie_intr_handler(hw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303166
3167 if (cause & MC)
3168 csio_mem_intr_handler(hw, MEM_MC);
3169
3170 if (cause & EDC0)
3171 csio_mem_intr_handler(hw, MEM_EDC0);
3172
3173 if (cause & EDC1)
3174 csio_mem_intr_handler(hw, MEM_EDC1);
3175
3176 if (cause & LE)
3177 csio_le_intr_handler(hw);
3178
3179 if (cause & TP)
3180 csio_tp_intr_handler(hw);
3181
3182 if (cause & MA)
3183 csio_ma_intr_handler(hw);
3184
3185 if (cause & PM_TX)
3186 csio_pmtx_intr_handler(hw);
3187
3188 if (cause & PM_RX)
3189 csio_pmrx_intr_handler(hw);
3190
3191 if (cause & ULP_RX)
3192 csio_ulprx_intr_handler(hw);
3193
3194 if (cause & CPL_SWITCH)
3195 csio_cplsw_intr_handler(hw);
3196
3197 if (cause & SGE)
3198 csio_sge_intr_handler(hw);
3199
3200 if (cause & ULP_TX)
3201 csio_ulptx_intr_handler(hw);
3202
3203 /* Clear the interrupts just processed for which we are the master. */
3204 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
3205 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
3206
3207 return 1;
3208}
3209
3210/*****************************************************************************
3211 * HW <--> mailbox interfacing routines.
3212 ****************************************************************************/
3213/*
3214 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3215 *
3216 * @data: Private data pointer.
3217 *
3218 * Called from worker thread context.
3219 */
3220static void
3221csio_mberr_worker(void *data)
3222{
3223 struct csio_hw *hw = (struct csio_hw *)data;
3224 struct csio_mbm *mbm = &hw->mbm;
3225 LIST_HEAD(cbfn_q);
3226 struct csio_mb *mbp_next;
3227 int rv;
3228
3229 del_timer_sync(&mbm->timer);
3230
3231 spin_lock_irq(&hw->lock);
3232 if (list_empty(&mbm->cbfn_q)) {
3233 spin_unlock_irq(&hw->lock);
3234 return;
3235 }
3236
3237 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3238 mbm->stats.n_cbfnq = 0;
3239
3240 /* Try to start waiting mailboxes */
3241 if (!list_empty(&mbm->req_q)) {
3242 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3243 list_del_init(&mbp_next->list);
3244
3245 rv = csio_mb_issue(hw, mbp_next);
3246 if (rv != 0)
3247 list_add_tail(&mbp_next->list, &mbm->req_q);
3248 else
3249 CSIO_DEC_STATS(mbm, n_activeq);
3250 }
3251 spin_unlock_irq(&hw->lock);
3252
3253 /* Now callback completions */
3254 csio_mb_completions(hw, &cbfn_q);
3255}
3256
3257/*
3258 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3259 *
3260 * @data: private data pointer
3261 *
3262 **/
3263static void
3264csio_hw_mb_timer(uintptr_t data)
3265{
3266 struct csio_hw *hw = (struct csio_hw *)data;
3267 struct csio_mb *mbp = NULL;
3268
3269 spin_lock_irq(&hw->lock);
3270 mbp = csio_mb_tmo_handler(hw);
3271 spin_unlock_irq(&hw->lock);
3272
3273 /* Call back the function for the timed-out Mailbox */
3274 if (mbp)
3275 mbp->mb_cbfn(hw, mbp);
3276
3277}
3278
3279/*
3280 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3281 * @hw: HW module
3282 *
3283 * Called with lock held, should exit with lock held.
3284 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3285 * into a local queue. Drops lock and calls the completions. Holds
3286 * lock and returns.
3287 */
3288static void
3289csio_hw_mbm_cleanup(struct csio_hw *hw)
3290{
3291 LIST_HEAD(cbfn_q);
3292
3293 csio_mb_cancel_all(hw, &cbfn_q);
3294
3295 spin_unlock_irq(&hw->lock);
3296 csio_mb_completions(hw, &cbfn_q);
3297 spin_lock_irq(&hw->lock);
3298}
3299
3300/*****************************************************************************
3301 * Event handling
3302 ****************************************************************************/
3303int
3304csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3305 uint16_t len)
3306{
3307 struct csio_evt_msg *evt_entry = NULL;
3308
3309 if (type >= CSIO_EVT_MAX)
3310 return -EINVAL;
3311
3312 if (len > CSIO_EVT_MSG_SIZE)
3313 return -EINVAL;
3314
3315 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3316 return -EINVAL;
3317
3318 if (list_empty(&hw->evt_free_q)) {
3319 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3320 type, len);
3321 return -ENOMEM;
3322 }
3323
3324 evt_entry = list_first_entry(&hw->evt_free_q,
3325 struct csio_evt_msg, list);
3326 list_del_init(&evt_entry->list);
3327
3328 /* copy event msg and queue the event */
3329 evt_entry->type = type;
3330 memcpy((void *)evt_entry->data, evt_msg, len);
3331 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3332
3333 CSIO_DEC_STATS(hw, n_evt_freeq);
3334 CSIO_INC_STATS(hw, n_evt_activeq);
3335
3336 return 0;
3337}
3338
3339static int
3340csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3341 uint16_t len, bool msg_sg)
3342{
3343 struct csio_evt_msg *evt_entry = NULL;
3344 struct csio_fl_dma_buf *fl_sg;
3345 uint32_t off = 0;
3346 unsigned long flags;
3347 int n, ret = 0;
3348
3349 if (type >= CSIO_EVT_MAX)
3350 return -EINVAL;
3351
3352 if (len > CSIO_EVT_MSG_SIZE)
3353 return -EINVAL;
3354
3355 spin_lock_irqsave(&hw->lock, flags);
3356 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3357 ret = -EINVAL;
3358 goto out;
3359 }
3360
3361 if (list_empty(&hw->evt_free_q)) {
3362 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3363 type, len);
3364 ret = -ENOMEM;
3365 goto out;
3366 }
3367
3368 evt_entry = list_first_entry(&hw->evt_free_q,
3369 struct csio_evt_msg, list);
3370 list_del_init(&evt_entry->list);
3371
3372 /* copy event msg and queue the event */
3373 evt_entry->type = type;
3374
3375 /* If Payload in SG list*/
3376 if (msg_sg) {
3377 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3378 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3379 memcpy((void *)((uintptr_t)evt_entry->data + off),
3380 fl_sg->flbufs[n].vaddr,
3381 fl_sg->flbufs[n].len);
3382 off += fl_sg->flbufs[n].len;
3383 }
3384 } else
3385 memcpy((void *)evt_entry->data, evt_msg, len);
3386
3387 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3388 CSIO_DEC_STATS(hw, n_evt_freeq);
3389 CSIO_INC_STATS(hw, n_evt_activeq);
3390out:
3391 spin_unlock_irqrestore(&hw->lock, flags);
3392 return ret;
3393}
3394
3395static void
3396csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3397{
3398 if (evt_entry) {
3399 spin_lock_irq(&hw->lock);
3400 list_del_init(&evt_entry->list);
3401 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3402 CSIO_DEC_STATS(hw, n_evt_activeq);
3403 CSIO_INC_STATS(hw, n_evt_freeq);
3404 spin_unlock_irq(&hw->lock);
3405 }
3406}
3407
3408void
3409csio_evtq_flush(struct csio_hw *hw)
3410{
3411 uint32_t count;
3412 count = 30;
3413 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3414 spin_unlock_irq(&hw->lock);
3415 msleep(2000);
3416 spin_lock_irq(&hw->lock);
3417 }
3418
3419 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3420}
3421
3422static void
3423csio_evtq_stop(struct csio_hw *hw)
3424{
3425 hw->flags |= CSIO_HWF_FWEVT_STOP;
3426}
3427
3428static void
3429csio_evtq_start(struct csio_hw *hw)
3430{
3431 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3432}
3433
3434static void
3435csio_evtq_cleanup(struct csio_hw *hw)
3436{
3437 struct list_head *evt_entry, *next_entry;
3438
3439 /* Release outstanding events from activeq to freeq*/
3440 if (!list_empty(&hw->evt_active_q))
3441 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3442
3443 hw->stats.n_evt_activeq = 0;
3444 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3445
3446 /* Freeup event entry */
3447 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3448 kfree(evt_entry);
3449 CSIO_DEC_STATS(hw, n_evt_freeq);
3450 }
3451
3452 hw->stats.n_evt_freeq = 0;
3453}
3454
3455
3456static void
3457csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3458 struct csio_fl_dma_buf *flb, void *priv)
3459{
3460 __u8 op;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303461 void *msg = NULL;
3462 uint32_t msg_len = 0;
3463 bool msg_sg = 0;
3464
3465 op = ((struct rss_header *) wr)->opcode;
3466 if (op == CPL_FW6_PLD) {
3467 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3468 if (!flb || !flb->totlen) {
3469 CSIO_INC_STATS(hw, n_cpl_unexp);
3470 return;
3471 }
3472
3473 msg = (void *) flb;
3474 msg_len = flb->totlen;
3475 msg_sg = 1;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303476 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3477
3478 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3479 /* skip RSS header */
3480 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3481 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3482 sizeof(struct cpl_fw4_msg);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303483 } else {
3484 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3485 CSIO_INC_STATS(hw, n_cpl_unexp);
3486 return;
3487 }
3488
3489 /*
3490 * Enqueue event to EventQ. Events processing happens
3491 * in Event worker thread context
3492 */
3493 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3494 (uint16_t)msg_len, msg_sg))
3495 CSIO_INC_STATS(hw, n_evt_drop);
3496}
3497
3498void
3499csio_evtq_worker(struct work_struct *work)
3500{
3501 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3502 struct list_head *evt_entry, *next_entry;
3503 LIST_HEAD(evt_q);
3504 struct csio_evt_msg *evt_msg;
3505 struct cpl_fw6_msg *msg;
3506 struct csio_rnode *rn;
3507 int rv = 0;
3508 uint8_t evtq_stop = 0;
3509
3510 csio_dbg(hw, "event worker thread active evts#%d\n",
3511 hw->stats.n_evt_activeq);
3512
3513 spin_lock_irq(&hw->lock);
3514 while (!list_empty(&hw->evt_active_q)) {
3515 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3516 spin_unlock_irq(&hw->lock);
3517
3518 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3519 evt_msg = (struct csio_evt_msg *) evt_entry;
3520
3521 /* Drop events if queue is STOPPED */
3522 spin_lock_irq(&hw->lock);
3523 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3524 evtq_stop = 1;
3525 spin_unlock_irq(&hw->lock);
3526 if (evtq_stop) {
3527 CSIO_INC_STATS(hw, n_evt_drop);
3528 goto free_evt;
3529 }
3530
3531 switch (evt_msg->type) {
3532 case CSIO_EVT_FW:
3533 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3534
3535 if ((msg->opcode == CPL_FW6_MSG ||
3536 msg->opcode == CPL_FW4_MSG) &&
3537 !msg->type) {
3538 rv = csio_mb_fwevt_handler(hw,
3539 msg->data);
3540 if (!rv)
3541 break;
3542 /* Handle any remaining fw events */
3543 csio_fcoe_fwevt_handler(hw,
3544 msg->opcode, msg->data);
3545 } else if (msg->opcode == CPL_FW6_PLD) {
3546
3547 csio_fcoe_fwevt_handler(hw,
3548 msg->opcode, msg->data);
3549 } else {
3550 csio_warn(hw,
3551 "Unhandled FW msg op %x type %x\n",
3552 msg->opcode, msg->type);
3553 CSIO_INC_STATS(hw, n_evt_drop);
3554 }
3555 break;
3556
3557 case CSIO_EVT_MBX:
3558 csio_mberr_worker(hw);
3559 break;
3560
3561 case CSIO_EVT_DEV_LOSS:
3562 memcpy(&rn, evt_msg->data, sizeof(rn));
3563 csio_rnode_devloss_handler(rn);
3564 break;
3565
3566 default:
3567 csio_warn(hw, "Unhandled event %x on evtq\n",
3568 evt_msg->type);
3569 CSIO_INC_STATS(hw, n_evt_unexp);
3570 break;
3571 }
3572free_evt:
3573 csio_free_evt(hw, evt_msg);
3574 }
3575
3576 spin_lock_irq(&hw->lock);
3577 }
3578 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3579 spin_unlock_irq(&hw->lock);
3580}
3581
3582int
3583csio_fwevtq_handler(struct csio_hw *hw)
3584{
3585 int rv;
3586
3587 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
3588 CSIO_INC_STATS(hw, n_int_stray);
3589 return -EINVAL;
3590 }
3591
3592 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
3593 csio_process_fwevtq_entry, NULL);
3594 return rv;
3595}
3596
3597/****************************************************************************
3598 * Entry points
3599 ****************************************************************************/
3600
3601/* Management module */
3602/*
3603 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3604 * mgmt - mgmt module
3605 * @io_req - io request
3606 *
3607 * Return - 0:if given IO Req exists in active Q.
3608 * -EINVAL :if lookup fails.
3609 */
3610int
3611csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
3612{
3613 struct list_head *tmp;
3614
3615 /* Lookup ioreq in the ACTIVEQ */
3616 list_for_each(tmp, &mgmtm->active_q) {
3617 if (io_req == (struct csio_ioreq *)tmp)
3618 return 0;
3619 }
3620 return -EINVAL;
3621}
3622
3623#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3624
3625/*
3626 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3627 * @data - Event data.
3628 *
3629 * Return - none.
3630 */
3631static void
3632csio_mgmt_tmo_handler(uintptr_t data)
3633{
3634 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
3635 struct list_head *tmp;
3636 struct csio_ioreq *io_req;
3637
3638 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
3639
3640 spin_lock_irq(&mgmtm->hw->lock);
3641
3642 list_for_each(tmp, &mgmtm->active_q) {
3643 io_req = (struct csio_ioreq *) tmp;
3644 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
3645
3646 if (!io_req->tmo) {
3647 /* Dequeue the request from retry Q. */
3648 tmp = csio_list_prev(tmp);
3649 list_del_init(&io_req->sm.sm_list);
3650 if (io_req->io_cbfn) {
3651 /* io_req will be freed by completion handler */
3652 io_req->wr_status = -ETIMEDOUT;
3653 io_req->io_cbfn(mgmtm->hw, io_req);
3654 } else {
3655 CSIO_DB_ASSERT(0);
3656 }
3657 }
3658 }
3659
3660 /* If retry queue is not empty, re-arm timer */
3661 if (!list_empty(&mgmtm->active_q))
3662 mod_timer(&mgmtm->mgmt_timer,
3663 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
3664 spin_unlock_irq(&mgmtm->hw->lock);
3665}
3666
3667static void
3668csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
3669{
3670 struct csio_hw *hw = mgmtm->hw;
3671 struct csio_ioreq *io_req;
3672 struct list_head *tmp;
3673 uint32_t count;
3674
3675 count = 30;
3676 /* Wait for all outstanding req to complete gracefully */
3677 while ((!list_empty(&mgmtm->active_q)) && count--) {
3678 spin_unlock_irq(&hw->lock);
3679 msleep(2000);
3680 spin_lock_irq(&hw->lock);
3681 }
3682
3683 /* release outstanding req from ACTIVEQ */
3684 list_for_each(tmp, &mgmtm->active_q) {
3685 io_req = (struct csio_ioreq *) tmp;
3686 tmp = csio_list_prev(tmp);
3687 list_del_init(&io_req->sm.sm_list);
3688 mgmtm->stats.n_active--;
3689 if (io_req->io_cbfn) {
3690 /* io_req will be freed by completion handler */
3691 io_req->wr_status = -ETIMEDOUT;
3692 io_req->io_cbfn(mgmtm->hw, io_req);
3693 }
3694 }
3695}
3696
3697/*
3698 * csio_mgmt_init - Mgmt module init entry point
3699 * @mgmtsm - mgmt module
3700 * @hw - HW module
3701 *
3702 * Initialize mgmt timer, resource wait queue, active queue,
3703 * completion q. Allocate Egress and Ingress
3704 * WR queues and save off the queue index returned by the WR
3705 * module for future use. Allocate and save off mgmt reqs in the
3706 * mgmt_req_freelist for future use. Make sure their SM is initialized
3707 * to uninit state.
3708 * Returns: 0 - on success
3709 * -ENOMEM - on error.
3710 */
3711static int
3712csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
3713{
3714 struct timer_list *timer = &mgmtm->mgmt_timer;
3715
3716 init_timer(timer);
3717 timer->function = csio_mgmt_tmo_handler;
3718 timer->data = (unsigned long)mgmtm;
3719
3720 INIT_LIST_HEAD(&mgmtm->active_q);
3721 INIT_LIST_HEAD(&mgmtm->cbfn_q);
3722
3723 mgmtm->hw = hw;
3724 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3725
3726 return 0;
3727}
3728
3729/*
3730 * csio_mgmtm_exit - MGMT module exit entry point
3731 * @mgmtsm - mgmt module
3732 *
3733 * This function called during MGMT module uninit.
3734 * Stop timers, free ioreqs allocated.
3735 * Returns: None
3736 *
3737 */
3738static void
3739csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
3740{
3741 del_timer_sync(&mgmtm->mgmt_timer);
3742}
3743
3744
3745/**
3746 * csio_hw_start - Kicks off the HW State machine
3747 * @hw: Pointer to HW module.
3748 *
3749 * It is assumed that the initialization is a synchronous operation.
3750 * So when we return afer posting the event, the HW SM should be in
3751 * the ready state, if there were no errors during init.
3752 */
3753int
3754csio_hw_start(struct csio_hw *hw)
3755{
3756 spin_lock_irq(&hw->lock);
3757 csio_post_event(&hw->sm, CSIO_HWE_CFG);
3758 spin_unlock_irq(&hw->lock);
3759
3760 if (csio_is_hw_ready(hw))
3761 return 0;
3762 else
3763 return -EINVAL;
3764}
3765
3766int
3767csio_hw_stop(struct csio_hw *hw)
3768{
3769 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
3770
3771 if (csio_is_hw_removing(hw))
3772 return 0;
3773 else
3774 return -EINVAL;
3775}
3776
3777/* Max reset retries */
3778#define CSIO_MAX_RESET_RETRIES 3
3779
3780/**
3781 * csio_hw_reset - Reset the hardware
3782 * @hw: HW module.
3783 *
3784 * Caller should hold lock across this function.
3785 */
3786int
3787csio_hw_reset(struct csio_hw *hw)
3788{
3789 if (!csio_is_hw_master(hw))
3790 return -EPERM;
3791
3792 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
3793 csio_dbg(hw, "Max hw reset attempts reached..");
3794 return -EINVAL;
3795 }
3796
3797 hw->rst_retries++;
3798 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
3799
3800 if (csio_is_hw_ready(hw)) {
3801 hw->rst_retries = 0;
3802 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
3803 return 0;
3804 } else
3805 return -EINVAL;
3806}
3807
3808/*
3809 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3810 * @hw: HW module.
3811 */
3812static void
3813csio_hw_get_device_id(struct csio_hw *hw)
3814{
3815 /* Is the adapter device id cached already ?*/
3816 if (csio_is_dev_id_cached(hw))
3817 return;
3818
3819 /* Get the PCI vendor & device id */
3820 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
3821 &hw->params.pci.vendor_id);
3822 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
3823 &hw->params.pci.device_id);
3824
3825 csio_dev_id_cached(hw);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003826 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303827
3828} /* csio_hw_get_device_id */
3829
3830/*
3831 * csio_hw_set_description - Set the model, description of the hw.
3832 * @hw: HW module.
3833 * @ven_id: PCI Vendor ID
3834 * @dev_id: PCI Device ID
3835 */
3836static void
3837csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
3838{
3839 uint32_t adap_type, prot_type;
3840
3841 if (ven_id == CSIO_VENDOR_ID) {
3842 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
3843 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
3844
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003845 if (prot_type == CSIO_T4_FCOE_ASIC) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303846 memcpy(hw->hw_ver,
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003847 csio_t4_fcoe_adapters[adap_type].model_no, 16);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303848 memcpy(hw->model_desc,
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003849 csio_t4_fcoe_adapters[adap_type].description,
3850 32);
3851 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
3852 memcpy(hw->hw_ver,
3853 csio_t5_fcoe_adapters[adap_type].model_no, 16);
3854 memcpy(hw->model_desc,
3855 csio_t5_fcoe_adapters[adap_type].description,
3856 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303857 } else {
3858 char tempName[32] = "Chelsio FCoE Controller";
3859 memcpy(hw->model_desc, tempName, 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303860 }
3861 }
3862} /* csio_hw_set_description */
3863
3864/**
3865 * csio_hw_init - Initialize HW module.
3866 * @hw: Pointer to HW module.
3867 *
3868 * Initialize the members of the HW module.
3869 */
3870int
3871csio_hw_init(struct csio_hw *hw)
3872{
3873 int rv = -EINVAL;
3874 uint32_t i;
3875 uint16_t ven_id, dev_id;
3876 struct csio_evt_msg *evt_entry;
3877
3878 INIT_LIST_HEAD(&hw->sm.sm_list);
3879 csio_init_state(&hw->sm, csio_hws_uninit);
3880 spin_lock_init(&hw->lock);
3881 INIT_LIST_HEAD(&hw->sln_head);
3882
3883 /* Get the PCI vendor & device id */
3884 csio_hw_get_device_id(hw);
3885
3886 strcpy(hw->name, CSIO_HW_NAME);
3887
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003888 /* Initialize the HW chip ops with T4/T5 specific ops */
3889 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3890
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303891 /* Set the model & its description */
3892
3893 ven_id = hw->params.pci.vendor_id;
3894 dev_id = hw->params.pci.device_id;
3895
3896 csio_hw_set_description(hw, ven_id, dev_id);
3897
3898 /* Initialize default log level */
3899 hw->params.log_level = (uint32_t) csio_dbg_level;
3900
3901 csio_set_fwevt_intr_idx(hw, -1);
3902 csio_set_nondata_intr_idx(hw, -1);
3903
3904 /* Init all the modules: Mailbox, WorkRequest and Transport */
3905 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
3906 goto err;
3907
3908 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
3909 if (rv)
3910 goto err_mbm_exit;
3911
3912 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
3913 if (rv)
3914 goto err_wrm_exit;
3915
3916 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
3917 if (rv)
3918 goto err_scsim_exit;
3919 /* Pre-allocate evtq and initialize them */
3920 INIT_LIST_HEAD(&hw->evt_active_q);
3921 INIT_LIST_HEAD(&hw->evt_free_q);
3922 for (i = 0; i < csio_evtq_sz; i++) {
3923
3924 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3925 if (!evt_entry) {
3926 csio_err(hw, "Failed to initialize eventq");
3927 goto err_evtq_cleanup;
3928 }
3929
3930 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3931 CSIO_INC_STATS(hw, n_evt_freeq);
3932 }
3933
3934 hw->dev_num = dev_num;
3935 dev_num++;
3936
3937 return 0;
3938
3939err_evtq_cleanup:
3940 csio_evtq_cleanup(hw);
3941 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3942err_scsim_exit:
3943 csio_scsim_exit(csio_hw_to_scsim(hw));
3944err_wrm_exit:
3945 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3946err_mbm_exit:
3947 csio_mbm_exit(csio_hw_to_mbm(hw));
3948err:
3949 return rv;
3950}
3951
3952/**
3953 * csio_hw_exit - Un-initialize HW module.
3954 * @hw: Pointer to HW module.
3955 *
3956 */
3957void
3958csio_hw_exit(struct csio_hw *hw)
3959{
3960 csio_evtq_cleanup(hw);
3961 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3962 csio_scsim_exit(csio_hw_to_scsim(hw));
3963 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3964 csio_mbm_exit(csio_hw_to_mbm(hw));
3965}