blob: c641931d4ae18e9092bd855583a0d9799cc67423 [file] [log] [blame]
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/pci_regs.h>
37#include <linux/firmware.h>
38#include <linux/stddef.h>
39#include <linux/delay.h>
40#include <linux/string.h>
41#include <linux/compiler.h>
42#include <linux/jiffies.h>
43#include <linux/kernel.h>
44#include <linux/log2.h>
45
46#include "csio_hw.h"
47#include "csio_lnode.h"
48#include "csio_rnode.h"
49
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +053050int csio_dbg_level = 0xFEFF;
51unsigned int csio_port_mask = 0xf;
52
53/* Default FW event queue entries. */
54static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
55
56/* Default MSI param level */
57int csio_msi = 2;
58
59/* FCoE function instances */
60static int dev_num;
61
62/* FCoE Adapter types & its description */
Arvind Bhushan7cc16382013-03-14 05:09:08 +000063static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +053064 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
65 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
66 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
67 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
68 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
69 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
70 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
71 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
72 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
73 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
74 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
75 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
76 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
77 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
78 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
Arvind Bhushan7cc16382013-03-14 05:09:08 +000079 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
81 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
82 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
83 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
84 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
85 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
86 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
87 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
88};
89
90static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
91 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
92 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
93 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
94 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
95 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
96 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
97 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
98 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
99 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
100 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
101 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
102 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
103 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
104 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
105 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
106 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
107 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
108 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
109 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
110 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530111};
112
113static void csio_mgmtm_cleanup(struct csio_mgmtm *);
114static void csio_hw_mbm_cleanup(struct csio_hw *);
115
116/* State machine forward declarations */
117static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
118static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
119static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
120static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
121static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
122static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
123static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
124static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
125static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
126
127static void csio_hw_initialize(struct csio_hw *hw);
128static void csio_evtq_stop(struct csio_hw *hw);
129static void csio_evtq_start(struct csio_hw *hw);
130
131int csio_is_hw_ready(struct csio_hw *hw)
132{
133 return csio_match_state(hw, csio_hws_ready);
134}
135
136int csio_is_hw_removing(struct csio_hw *hw)
137{
138 return csio_match_state(hw, csio_hws_removing);
139}
140
141
142/*
143 * csio_hw_wait_op_done_val - wait until an operation is completed
144 * @hw: the HW module
145 * @reg: the register to check for completion
146 * @mask: a single-bit field within @reg that indicates completion
147 * @polarity: the value of the field when the operation is completed
148 * @attempts: number of check iterations
149 * @delay: delay in usecs between iterations
150 * @valp: where to store the value of the register at completion time
151 *
152 * Wait until an operation is completed by checking a bit in a register
153 * up to @attempts times. If @valp is not NULL the value of the register
154 * at the time it indicated completion is stored there. Returns 0 if the
155 * operation completes and -EAGAIN otherwise.
156 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000157int
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530158csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
159 int polarity, int attempts, int delay, uint32_t *valp)
160{
161 uint32_t val;
162 while (1) {
163 val = csio_rd_reg32(hw, reg);
164
165 if (!!(val & mask) == polarity) {
166 if (valp)
167 *valp = val;
168 return 0;
169 }
170
171 if (--attempts == 0)
172 return -EAGAIN;
173 if (delay)
174 udelay(delay);
175 }
176}
177
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000178/*
179 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
180 * @hw: the adapter
181 * @addr: the indirect TP register address
182 * @mask: specifies the field within the register to modify
183 * @val: new value for the field
184 *
185 * Sets a field of an indirect TP register to the given value.
186 */
187void
188csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
189 unsigned int mask, unsigned int val)
190{
Hariprasad Shenai837e4a42015-01-05 16:30:46 +0530191 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
192 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
193 csio_wr_reg32(hw, val, TP_PIO_DATA_A);
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000194}
195
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530196void
197csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
198 uint32_t value)
199{
200 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
201
202 csio_wr_reg32(hw, val | value, reg);
203 /* Flush */
204 csio_rd_reg32(hw, reg);
205
206}
207
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530208static int
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530209csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530210{
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000211 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
212 addr, len, buf, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530213}
214
215/*
216 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
217 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000218#define EEPROM_MAX_RD_POLL 40
219#define EEPROM_MAX_WR_POLL 6
220#define EEPROM_STAT_ADDR 0x7bfc
221#define VPD_BASE 0x400
222#define VPD_BASE_OLD 0
223#define VPD_LEN 1024
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530224#define VPD_INFO_FLD_HDR_SIZE 3
225
226/*
227 * csio_hw_seeprom_read - read a serial EEPROM location
228 * @hw: hw to read
229 * @addr: EEPROM virtual address
230 * @data: where to store the read data
231 *
232 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
233 * VPD capability. Note that this function must be called with a virtual
234 * address.
235 */
236static int
237csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
238{
239 uint16_t val = 0;
240 int attempts = EEPROM_MAX_RD_POLL;
241 uint32_t base = hw->params.pci.vpd_cap_addr;
242
243 if (addr >= EEPROMVSIZE || (addr & 3))
244 return -EINVAL;
245
246 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
247
248 do {
249 udelay(10);
250 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
251 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
252
253 if (!(val & PCI_VPD_ADDR_F)) {
254 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
255 return -EINVAL;
256 }
257
258 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
259 *data = le32_to_cpu(*data);
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530260
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530261 return 0;
262}
263
264/*
265 * Partial EEPROM Vital Product Data structure. Includes only the ID and
266 * VPD-R sections.
267 */
268struct t4_vpd_hdr {
269 u8 id_tag;
270 u8 id_len[2];
271 u8 id_data[ID_LEN];
272 u8 vpdr_tag;
273 u8 vpdr_len[2];
274};
275
276/*
277 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
278 * the VPD
279 * @v: Pointer to buffered vpd data structure
280 * @kw: The keyword to search for
281 *
282 * Returns the value of the information field keyword or
283 * -EINVAL otherwise.
284 */
285static int
286csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
287{
288 int32_t i;
289 int32_t offset , len;
290 const uint8_t *buf = &v->id_tag;
291 const uint8_t *vpdr_len = &v->vpdr_tag;
292 offset = sizeof(struct t4_vpd_hdr);
293 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
294
295 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
296 return -EINVAL;
297
298 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
299 if (memcmp(buf + i , kw, 2) == 0) {
300 i += VPD_INFO_FLD_HDR_SIZE;
301 return i;
302 }
303
304 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
305 }
306
307 return -EINVAL;
308}
309
310static int
311csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
312{
313 *pos = pci_find_capability(pdev, cap);
314 if (*pos)
315 return 0;
316
317 return -1;
318}
319
320/*
321 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
322 * @hw: HW module
323 * @p: where to store the parameters
324 *
325 * Reads card parameters stored in VPD EEPROM.
326 */
327static int
328csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
329{
330 int i, ret, ec, sn, addr;
331 uint8_t *vpd, csum;
332 const struct t4_vpd_hdr *v;
333 /* To get around compilation warning from strstrip */
334 char *s;
335
336 if (csio_is_valid_vpd(hw))
337 return 0;
338
339 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
340 &hw->params.pci.vpd_cap_addr);
341 if (ret)
342 return -EINVAL;
343
344 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
345 if (vpd == NULL)
346 return -ENOMEM;
347
348 /*
349 * Card information normally starts at VPD_BASE but early cards had
350 * it at 0.
351 */
352 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
353 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
354
355 for (i = 0; i < VPD_LEN; i += 4) {
356 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
357 if (ret) {
358 kfree(vpd);
359 return ret;
360 }
361 }
362
363 /* Reset the VPD flag! */
364 hw->flags &= (~CSIO_HWF_VPD_VALID);
365
366 v = (const struct t4_vpd_hdr *)vpd;
367
368#define FIND_VPD_KW(var, name) do { \
369 var = csio_hw_get_vpd_keyword_val(v, name); \
370 if (var < 0) { \
371 csio_err(hw, "missing VPD keyword " name "\n"); \
372 kfree(vpd); \
373 return -EINVAL; \
374 } \
375} while (0)
376
377 FIND_VPD_KW(i, "RV");
378 for (csum = 0; i >= 0; i--)
379 csum += vpd[i];
380
381 if (csum) {
382 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
383 kfree(vpd);
384 return -EINVAL;
385 }
386 FIND_VPD_KW(ec, "EC");
387 FIND_VPD_KW(sn, "SN");
388#undef FIND_VPD_KW
389
390 memcpy(p->id, v->id_data, ID_LEN);
391 s = strstrip(p->id);
392 memcpy(p->ec, vpd + ec, EC_LEN);
393 s = strstrip(p->ec);
394 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
395 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
396 s = strstrip(p->sn);
397
398 csio_valid_vpd_copied(hw);
399
400 kfree(vpd);
401 return 0;
402}
403
404/*
405 * csio_hw_sf1_read - read data from the serial flash
406 * @hw: the HW module
407 * @byte_cnt: number of bytes to read
408 * @cont: whether another operation will be chained
409 * @lock: whether to lock SF for PL access only
410 * @valp: where to store the read data
411 *
412 * Reads up to 4 bytes of data from the serial flash. The location of
413 * the read needs to be specified prior to calling this by issuing the
414 * appropriate commands to the serial flash.
415 */
416static int
417csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
418 int32_t lock, uint32_t *valp)
419{
420 int ret;
421
422 if (!byte_cnt || byte_cnt > 4)
423 return -EINVAL;
424 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
425 return -EBUSY;
426
427 cont = cont ? SF_CONT : 0;
428 lock = lock ? SF_LOCK : 0;
429
430 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
431 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
432 10, NULL);
433 if (!ret)
434 *valp = csio_rd_reg32(hw, SF_DATA);
435 return ret;
436}
437
438/*
439 * csio_hw_sf1_write - write data to the serial flash
440 * @hw: the HW module
441 * @byte_cnt: number of bytes to write
442 * @cont: whether another operation will be chained
443 * @lock: whether to lock SF for PL access only
444 * @val: value to write
445 *
446 * Writes up to 4 bytes of data to the serial flash. The location of
447 * the write needs to be specified prior to calling this by issuing the
448 * appropriate commands to the serial flash.
449 */
450static int
451csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
452 int32_t lock, uint32_t val)
453{
454 if (!byte_cnt || byte_cnt > 4)
455 return -EINVAL;
456 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
457 return -EBUSY;
458
459 cont = cont ? SF_CONT : 0;
460 lock = lock ? SF_LOCK : 0;
461
462 csio_wr_reg32(hw, val, SF_DATA);
463 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
464
465 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
466 10, NULL);
467}
468
469/*
470 * csio_hw_flash_wait_op - wait for a flash operation to complete
471 * @hw: the HW module
472 * @attempts: max number of polls of the status register
473 * @delay: delay between polls in ms
474 *
475 * Wait for a flash operation to complete by polling the status register.
476 */
477static int
478csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
479{
480 int ret;
481 uint32_t status;
482
483 while (1) {
484 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
485 if (ret != 0)
486 return ret;
487
488 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
489 if (ret != 0)
490 return ret;
491
492 if (!(status & 1))
493 return 0;
494 if (--attempts == 0)
495 return -EAGAIN;
496 if (delay)
497 msleep(delay);
498 }
499}
500
501/*
502 * csio_hw_read_flash - read words from serial flash
503 * @hw: the HW module
504 * @addr: the start address for the read
505 * @nwords: how many 32-bit words to read
506 * @data: where to store the read data
507 * @byte_oriented: whether to store data as bytes or as words
508 *
509 * Read the specified number of 32-bit words from the serial flash.
510 * If @byte_oriented is set the read data is stored as a byte array
511 * (i.e., big-endian), otherwise as 32-bit words in the platform's
512 * natural endianess.
513 */
514static int
515csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
516 uint32_t *data, int32_t byte_oriented)
517{
518 int ret;
519
520 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
521 return -EINVAL;
522
523 addr = swab32(addr) | SF_RD_DATA_FAST;
524
525 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
526 if (ret != 0)
527 return ret;
528
529 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
530 if (ret != 0)
531 return ret;
532
533 for ( ; nwords; nwords--, data++) {
534 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
535 if (nwords == 1)
536 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
537 if (ret)
538 return ret;
539 if (byte_oriented)
540 *data = htonl(*data);
541 }
542 return 0;
543}
544
545/*
546 * csio_hw_write_flash - write up to a page of data to the serial flash
547 * @hw: the hw
548 * @addr: the start address to write
549 * @n: length of data to write in bytes
550 * @data: the data to write
551 *
552 * Writes up to a page of data (256 bytes) to the serial flash starting
553 * at the given address. All the data must be written to the same page.
554 */
555static int
556csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
557 uint32_t n, const uint8_t *data)
558{
559 int ret = -EINVAL;
560 uint32_t buf[64];
561 uint32_t i, c, left, val, offset = addr & 0xff;
562
563 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
564 return -EINVAL;
565
566 val = swab32(addr) | SF_PROG_PAGE;
567
568 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
569 if (ret != 0)
570 goto unlock;
571
572 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
573 if (ret != 0)
574 goto unlock;
575
576 for (left = n; left; left -= c) {
577 c = min(left, 4U);
578 for (val = 0, i = 0; i < c; ++i)
579 val = (val << 8) + *data++;
580
581 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
582 if (ret)
583 goto unlock;
584 }
585 ret = csio_hw_flash_wait_op(hw, 8, 1);
586 if (ret)
587 goto unlock;
588
589 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
590
591 /* Read the page to verify the write succeeded */
592 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
593 if (ret)
594 return ret;
595
596 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
597 csio_err(hw,
598 "failed to correctly write the flash page at %#x\n",
599 addr);
600 return -EINVAL;
601 }
602
603 return 0;
604
605unlock:
606 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
607 return ret;
608}
609
610/*
611 * csio_hw_flash_erase_sectors - erase a range of flash sectors
612 * @hw: the HW module
613 * @start: the first sector to erase
614 * @end: the last sector to erase
615 *
616 * Erases the sectors in the given inclusive range.
617 */
618static int
619csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
620{
621 int ret = 0;
622
623 while (start <= end) {
624
625 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
626 if (ret != 0)
627 goto out;
628
629 ret = csio_hw_sf1_write(hw, 4, 0, 1,
630 SF_ERASE_SECTOR | (start << 8));
631 if (ret != 0)
632 goto out;
633
634 ret = csio_hw_flash_wait_op(hw, 14, 500);
635 if (ret != 0)
636 goto out;
637
638 start++;
639 }
640out:
641 if (ret)
642 csio_err(hw, "erase of flash sector %d failed, error %d\n",
643 start, ret);
644 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
645 return 0;
646}
647
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530648static void
649csio_hw_print_fw_version(struct csio_hw *hw, char *str)
650{
651 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +0530652 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
653 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
654 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
655 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530656}
657
658/*
659 * csio_hw_get_fw_version - read the firmware version
660 * @hw: HW module
661 * @vers: where to place the version
662 *
663 * Reads the FW version from flash.
664 */
665static int
666csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
667{
668 return csio_hw_read_flash(hw, FW_IMG_START +
669 offsetof(struct fw_hdr, fw_ver), 1,
670 vers, 0);
671}
672
673/*
674 * csio_hw_get_tp_version - read the TP microcode version
675 * @hw: HW module
676 * @vers: where to place the version
677 *
678 * Reads the TP microcode version from flash.
679 */
680static int
681csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
682{
683 return csio_hw_read_flash(hw, FLASH_FW_START +
684 offsetof(struct fw_hdr, tp_microcode_ver), 1,
685 vers, 0);
686}
687
688/*
689 * csio_hw_check_fw_version - check if the FW is compatible with
690 * this driver
691 * @hw: HW module
692 *
693 * Checks if an adapter's FW is compatible with the driver. Returns 0
694 * if there's exact match, a negative error if the version could not be
695 * read or there's a major/minor version mismatch/minor.
696 */
697static int
698csio_hw_check_fw_version(struct csio_hw *hw)
699{
700 int ret, major, minor, micro;
701
702 ret = csio_hw_get_fw_version(hw, &hw->fwrev);
703 if (!ret)
704 ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
705 if (ret)
706 return ret;
707
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +0530708 major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
709 minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
710 micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530711
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000712 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530713 csio_err(hw, "card FW has major version %u, driver wants %u\n",
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000714 major, FW_VERSION_MAJOR(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530715 return -EINVAL;
716 }
717
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000718 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530719 return 0; /* perfect match */
720
721 /* Minor/micro version mismatch */
722 return -EINVAL;
723}
724
725/*
726 * csio_hw_fw_dload - download firmware.
727 * @hw: HW module
728 * @fw_data: firmware image to write.
729 * @size: image size
730 *
731 * Write the supplied firmware image to the card's serial flash.
732 */
733static int
734csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
735{
736 uint32_t csum;
737 int32_t addr;
738 int ret;
739 uint32_t i;
740 uint8_t first_page[SF_PAGE_SIZE];
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +0530741 const __be32 *p = (const __be32 *)fw_data;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530742 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
743 uint32_t sf_sec_size;
744
745 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
746 csio_err(hw, "Serial Flash data invalid\n");
747 return -EINVAL;
748 }
749
750 if (!size) {
751 csio_err(hw, "FW image has no data\n");
752 return -EINVAL;
753 }
754
755 if (size & 511) {
756 csio_err(hw, "FW image size not multiple of 512 bytes\n");
757 return -EINVAL;
758 }
759
760 if (ntohs(hdr->len512) * 512 != size) {
761 csio_err(hw, "FW image size differs from size in FW header\n");
762 return -EINVAL;
763 }
764
765 if (size > FW_MAX_SIZE) {
766 csio_err(hw, "FW image too large, max is %u bytes\n",
767 FW_MAX_SIZE);
768 return -EINVAL;
769 }
770
771 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
772 csum += ntohl(p[i]);
773
774 if (csum != 0xffffffff) {
775 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
776 return -EINVAL;
777 }
778
779 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
780 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
781
782 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
783 FW_START_SEC, FW_START_SEC + i - 1);
784
785 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
786 FW_START_SEC + i - 1);
787 if (ret) {
788 csio_err(hw, "Flash Erase failed\n");
789 goto out;
790 }
791
792 /*
793 * We write the correct version at the end so the driver can see a bad
794 * version if the FW write fails. Start by writing a copy of the
795 * first page with a bad version.
796 */
797 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
799 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
800 if (ret)
801 goto out;
802
803 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
804 FW_IMG_START, FW_IMG_START + size);
805
806 addr = FW_IMG_START;
807 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
808 addr += SF_PAGE_SIZE;
809 fw_data += SF_PAGE_SIZE;
810 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
811 if (ret)
812 goto out;
813 }
814
815 ret = csio_hw_write_flash(hw,
816 FW_IMG_START +
817 offsetof(struct fw_hdr, fw_ver),
818 sizeof(hdr->fw_ver),
819 (const uint8_t *)&hdr->fw_ver);
820
821out:
822 if (ret)
823 csio_err(hw, "firmware download failed, error %d\n", ret);
824 return ret;
825}
826
827static int
828csio_hw_get_flash_params(struct csio_hw *hw)
829{
830 int ret;
831 uint32_t info = 0;
832
833 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
834 if (!ret)
835 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
836 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
837 if (ret != 0)
838 return ret;
839
840 if ((info & 0xff) != 0x20) /* not a Numonix flash */
841 return -EINVAL;
842 info >>= 16; /* log2 of size */
843 if (info >= 0x14 && info < 0x18)
844 hw->params.sf_nsec = 1 << (info - 16);
845 else if (info == 0x18)
846 hw->params.sf_nsec = 64;
847 else
848 return -EINVAL;
849 hw->params.sf_size = 1 << info;
850
851 return 0;
852}
853
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530854/*****************************************************************************/
855/* HW State machine assists */
856/*****************************************************************************/
857
858static int
859csio_hw_dev_ready(struct csio_hw *hw)
860{
861 uint32_t reg;
862 int cnt = 6;
863
864 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
865 (--cnt != 0))
866 mdelay(100);
867
868 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
869 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
870 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
871 return -EIO;
872 }
873
874 hw->pfn = SOURCEPF_GET(reg);
875
876 return 0;
877}
878
879/*
880 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
881 * @hw: HW module
882 * @state: Device state
883 *
884 * FW_HELLO_CMD has to be polled for completion.
885 */
886static int
887csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
888{
889 struct csio_mb *mbp;
890 int rv = 0;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530891 enum fw_retval retval;
892 uint8_t mpfn;
893 char state_str[16];
894 int retries = FW_CMD_HELLO_RETRIES;
895
896 memset(state_str, 0, sizeof(state_str));
897
898 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
899 if (!mbp) {
900 rv = -ENOMEM;
901 CSIO_INC_STATS(hw, n_err_nomem);
902 goto out;
903 }
904
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530905retry:
906 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
Hariprasad Shenai666224d2014-12-11 11:11:43 +0530907 hw->pfn, CSIO_MASTER_MAY, NULL);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530908
909 rv = csio_mb_issue(hw, mbp);
910 if (rv) {
911 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
912 goto out_free_mb;
913 }
914
915 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
916 if (retval != FW_SUCCESS) {
917 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
918 rv = -EINVAL;
919 goto out_free_mb;
920 }
921
922 /* Firmware has designated us to be master */
923 if (hw->pfn == mpfn) {
924 hw->flags |= CSIO_HWF_MASTER;
925 } else if (*state == CSIO_DEV_STATE_UNINIT) {
926 /*
927 * If we're not the Master PF then we need to wait around for
928 * the Master PF Driver to finish setting up the adapter.
929 *
930 * Note that we also do this wait if we're a non-Master-capable
931 * PF and there is no current Master PF; a Master PF may show up
932 * momentarily and we wouldn't want to fail pointlessly. (This
933 * can happen when an OS loads lots of different drivers rapidly
934 * at the same time). In this case, the Master PF returned by
935 * the firmware will be PCIE_FW_MASTER_MASK so the test below
936 * will work ...
937 */
938
939 int waiting = FW_CMD_HELLO_TIMEOUT;
940
941 /*
942 * Wait for the firmware to either indicate an error or
943 * initialized state. If we see either of these we bail out
944 * and report the issue to the caller. If we exhaust the
945 * "hello timeout" and we haven't exhausted our retries, try
946 * again. Otherwise bail with a timeout error.
947 */
948 for (;;) {
949 uint32_t pcie_fw;
950
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000951 spin_unlock_irq(&hw->lock);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530952 msleep(50);
Arvind Bhushan7cc16382013-03-14 05:09:08 +0000953 spin_lock_irq(&hw->lock);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530954 waiting -= 50;
955
956 /*
957 * If neither Error nor Initialialized are indicated
958 * by the firmware keep waiting till we exaust our
959 * timeout ... and then retry if we haven't exhausted
960 * our retries ...
961 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530962 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
963 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530964 if (waiting <= 0) {
965 if (retries-- > 0)
966 goto retry;
967
968 rv = -ETIMEDOUT;
969 break;
970 }
971 continue;
972 }
973
974 /*
975 * We either have an Error or Initialized condition
976 * report errors preferentially.
977 */
978 if (state) {
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530979 if (pcie_fw & PCIE_FW_ERR_F) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530980 *state = CSIO_DEV_STATE_ERR;
981 rv = -ETIMEDOUT;
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530982 } else if (pcie_fw & PCIE_FW_INIT_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530983 *state = CSIO_DEV_STATE_INIT;
984 }
985
986 /*
987 * If we arrived before a Master PF was selected and
988 * there's not a valid Master PF, grab its identity
989 * for our caller.
990 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +0530991 if (mpfn == PCIE_FW_MASTER_M &&
992 (pcie_fw & PCIE_FW_MASTER_VLD_F))
993 mpfn = PCIE_FW_MASTER_G(pcie_fw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530994 break;
995 }
996 hw->flags &= ~CSIO_HWF_MASTER;
997 }
998
999 switch (*state) {
1000 case CSIO_DEV_STATE_UNINIT:
1001 strcpy(state_str, "Initializing");
1002 break;
1003 case CSIO_DEV_STATE_INIT:
1004 strcpy(state_str, "Initialized");
1005 break;
1006 case CSIO_DEV_STATE_ERR:
1007 strcpy(state_str, "Error");
1008 break;
1009 default:
1010 strcpy(state_str, "Unknown");
1011 break;
1012 }
1013
1014 if (hw->pfn == mpfn)
1015 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1016 hw->pfn, state_str);
1017 else
1018 csio_info(hw,
1019 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1020 hw->pfn, mpfn, state_str);
1021
1022out_free_mb:
1023 mempool_free(mbp, hw->mb_mempool);
1024out:
1025 return rv;
1026}
1027
1028/*
1029 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1030 * @hw: HW module
1031 *
1032 */
1033static int
1034csio_do_bye(struct csio_hw *hw)
1035{
1036 struct csio_mb *mbp;
1037 enum fw_retval retval;
1038
1039 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1040 if (!mbp) {
1041 CSIO_INC_STATS(hw, n_err_nomem);
1042 return -ENOMEM;
1043 }
1044
1045 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1046
1047 if (csio_mb_issue(hw, mbp)) {
1048 csio_err(hw, "Issue of BYE command failed\n");
1049 mempool_free(mbp, hw->mb_mempool);
1050 return -EINVAL;
1051 }
1052
1053 retval = csio_mb_fw_retval(mbp);
1054 if (retval != FW_SUCCESS) {
1055 mempool_free(mbp, hw->mb_mempool);
1056 return -EINVAL;
1057 }
1058
1059 mempool_free(mbp, hw->mb_mempool);
1060
1061 return 0;
1062}
1063
1064/*
1065 * csio_do_reset- Perform the device reset.
1066 * @hw: HW module
1067 * @fw_rst: FW reset
1068 *
1069 * If fw_rst is set, issues FW reset mbox cmd otherwise
1070 * does PIO reset.
1071 * Performs reset of the function.
1072 */
1073static int
1074csio_do_reset(struct csio_hw *hw, bool fw_rst)
1075{
1076 struct csio_mb *mbp;
1077 enum fw_retval retval;
1078
1079 if (!fw_rst) {
1080 /* PIO reset */
1081 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1082 mdelay(2000);
1083 return 0;
1084 }
1085
1086 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1087 if (!mbp) {
1088 CSIO_INC_STATS(hw, n_err_nomem);
1089 return -ENOMEM;
1090 }
1091
1092 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1093 PIORSTMODE | PIORST, 0, NULL);
1094
1095 if (csio_mb_issue(hw, mbp)) {
1096 csio_err(hw, "Issue of RESET command failed.n");
1097 mempool_free(mbp, hw->mb_mempool);
1098 return -EINVAL;
1099 }
1100
1101 retval = csio_mb_fw_retval(mbp);
1102 if (retval != FW_SUCCESS) {
1103 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1104 mempool_free(mbp, hw->mb_mempool);
1105 return -EINVAL;
1106 }
1107
1108 mempool_free(mbp, hw->mb_mempool);
1109
1110 return 0;
1111}
1112
1113static int
1114csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1115{
1116 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1117 uint16_t caps;
1118
1119 caps = ntohs(rsp->fcoecaps);
1120
1121 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1122 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1123 return -EINVAL;
1124 }
1125
1126 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1127 csio_err(hw, "No FCoE Control Offload capability\n");
1128 return -EINVAL;
1129 }
1130
1131 return 0;
1132}
1133
1134/*
1135 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1136 * @hw: the HW module
1137 * @mbox: mailbox to use for the FW RESET command (if desired)
1138 * @force: force uP into RESET even if FW RESET command fails
1139 *
1140 * Issues a RESET command to firmware (if desired) with a HALT indication
1141 * and then puts the microprocessor into RESET state. The RESET command
1142 * will only be issued if a legitimate mailbox is provided (mbox <=
1143 * PCIE_FW_MASTER_MASK).
1144 *
1145 * This is generally used in order for the host to safely manipulate the
1146 * adapter without fear of conflicting with whatever the firmware might
1147 * be doing. The only way out of this state is to RESTART the firmware
1148 * ...
1149 */
1150static int
1151csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1152{
1153 enum fw_retval retval = 0;
1154
1155 /*
1156 * If a legitimate mailbox is provided, issue a RESET command
1157 * with a HALT indication.
1158 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301159 if (mbox <= PCIE_FW_MASTER_M) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301160 struct csio_mb *mbp;
1161
1162 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1163 if (!mbp) {
1164 CSIO_INC_STATS(hw, n_err_nomem);
1165 return -ENOMEM;
1166 }
1167
1168 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
Hariprasad Shenai51678652014-11-21 12:52:02 +05301169 PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301170 NULL);
1171
1172 if (csio_mb_issue(hw, mbp)) {
1173 csio_err(hw, "Issue of RESET command failed!\n");
1174 mempool_free(mbp, hw->mb_mempool);
1175 return -EINVAL;
1176 }
1177
1178 retval = csio_mb_fw_retval(mbp);
1179 mempool_free(mbp, hw->mb_mempool);
1180 }
1181
1182 /*
1183 * Normally we won't complete the operation if the firmware RESET
1184 * command fails but if our caller insists we'll go ahead and put the
1185 * uP into RESET. This can be useful if the firmware is hung or even
1186 * missing ... We'll have to take the risk of putting the uP into
1187 * RESET without the cooperation of firmware in that case.
1188 *
1189 * We also force the firmware's HALT flag to be on in case we bypassed
1190 * the firmware RESET command above or we're dealing with old firmware
1191 * which doesn't have the HALT capability. This will serve as a flag
1192 * for the incoming firmware to know that it's coming out of a HALT
1193 * rather than a RESET ... if it's new enough to understand that ...
1194 */
1195 if (retval == 0 || force) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301196 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301197 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1198 PCIE_FW_HALT_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301199 }
1200
1201 /*
1202 * And we always return the result of the firmware RESET command
1203 * even when we force the uP into RESET ...
1204 */
1205 return retval ? -EINVAL : 0;
1206}
1207
1208/*
1209 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1210 * @hw: the HW module
1211 * @reset: if we want to do a RESET to restart things
1212 *
1213 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1214 * return the previous PF Master remains as the new PF Master and there
1215 * is no need to issue a new HELLO command, etc.
1216 *
1217 * We do this in two ways:
1218 *
1219 * 1. If we're dealing with newer firmware we'll simply want to take
1220 * the chip's microprocessor out of RESET. This will cause the
1221 * firmware to start up from its start vector. And then we'll loop
1222 * until the firmware indicates it's started again (PCIE_FW.HALT
1223 * reset to 0) or we timeout.
1224 *
1225 * 2. If we're dealing with older firmware then we'll need to RESET
1226 * the chip since older firmware won't recognize the PCIE_FW.HALT
1227 * flag and automatically RESET itself on startup.
1228 */
1229static int
1230csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1231{
1232 if (reset) {
1233 /*
1234 * Since we're directing the RESET instead of the firmware
1235 * doing it automatically, we need to clear the PCIE_FW.HALT
1236 * bit.
1237 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301238 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301239
1240 /*
1241 * If we've been given a valid mailbox, first try to get the
1242 * firmware to do the RESET. If that works, great and we can
1243 * return success. Otherwise, if we haven't been given a
1244 * valid mailbox or the RESET command failed, fall back to
1245 * hitting the chip with a hammer.
1246 */
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301247 if (mbox <= PCIE_FW_MASTER_M) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301248 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301249 msleep(100);
1250 if (csio_do_reset(hw, true) == 0)
1251 return 0;
1252 }
1253
1254 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1255 msleep(2000);
1256 } else {
1257 int ms;
1258
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05301259 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301260 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
Hariprasad Shenaif061de42015-01-05 16:30:44 +05301261 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301262 return 0;
1263 msleep(100);
1264 ms += 100;
1265 }
1266 return -ETIMEDOUT;
1267 }
1268 return 0;
1269}
1270
1271/*
1272 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1273 * @hw: the HW module
1274 * @mbox: mailbox to use for the FW RESET command (if desired)
1275 * @fw_data: the firmware image to write
1276 * @size: image size
1277 * @force: force upgrade even if firmware doesn't cooperate
1278 *
1279 * Perform all of the steps necessary for upgrading an adapter's
1280 * firmware image. Normally this requires the cooperation of the
1281 * existing firmware in order to halt all existing activities
1282 * but if an invalid mailbox token is passed in we skip that step
1283 * (though we'll still put the adapter microprocessor into RESET in
1284 * that case).
1285 *
1286 * On successful return the new firmware will have been loaded and
1287 * the adapter will have been fully RESET losing all previous setup
1288 * state. On unsuccessful return the adapter may be completely hosed ...
1289 * positive errno indicates that the adapter is ~probably~ intact, a
1290 * negative errno indicates that things are looking bad ...
1291 */
1292static int
1293csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1294 const u8 *fw_data, uint32_t size, int32_t force)
1295{
1296 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1297 int reset, ret;
1298
1299 ret = csio_hw_fw_halt(hw, mbox, force);
1300 if (ret != 0 && !force)
1301 return ret;
1302
1303 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1304 if (ret != 0)
1305 return ret;
1306
1307 /*
1308 * Older versions of the firmware don't understand the new
1309 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1310 * restart. So for newly loaded older firmware we'll have to do the
1311 * RESET for it so it starts up on a clean slate. We can tell if
1312 * the newly loaded firmware will handle this right by checking
1313 * its header flags to see if it advertises the capability.
1314 */
1315 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1316 return csio_hw_fw_restart(hw, mbox, reset);
1317}
1318
1319
1320/*
1321 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1322 * @hw: the HW module
1323 * @mbox: mailbox to use for the FW command
1324 * @mtype: the memory type where the Configuration File is located
1325 * @maddr: the memory address where the Configuration File is located
1326 * @finiver: return value for CF [fini] version
1327 * @finicsum: return value for CF [fini] checksum
1328 * @cfcsum: return value for CF computed checksum
1329 *
1330 * Issue a command to get the firmware to process the Configuration
1331 * File located at the specified mtype/maddress. If the Configuration
1332 * File is processed successfully and return value pointers are
1333 * provided, the Configuration File "[fini] section version and
1334 * checksum values will be returned along with the computed checksum.
1335 * It's up to the caller to decide how it wants to respond to the
1336 * checksums not matching but it recommended that a prominant warning
1337 * be emitted in order to help people rapidly identify changed or
1338 * corrupted Configuration Files.
1339 *
1340 * Also note that it's possible to modify things like "niccaps",
1341 * "toecaps",etc. between processing the Configuration File and telling
1342 * the firmware to use the new configuration. Callers which want to
1343 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1344 * Configuration Files if they want to do this.
1345 */
1346static int
1347csio_hw_fw_config_file(struct csio_hw *hw,
1348 unsigned int mtype, unsigned int maddr,
1349 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
1350{
1351 struct csio_mb *mbp;
1352 struct fw_caps_config_cmd *caps_cmd;
1353 int rv = -EINVAL;
1354 enum fw_retval ret;
1355
1356 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1357 if (!mbp) {
1358 CSIO_INC_STATS(hw, n_err_nomem);
1359 return -ENOMEM;
1360 }
1361 /*
1362 * Tell the firmware to process the indicated Configuration File.
1363 * If there are no errors and the caller has provided return value
1364 * pointers for the [fini] section version, checksum and computed
1365 * checksum, pass those back to the caller.
1366 */
1367 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1368 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1369 caps_cmd->op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301370 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1371 FW_CMD_REQUEST_F |
1372 FW_CMD_READ_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301373 caps_cmd->cfvalid_to_len16 =
Hariprasad Shenai51678652014-11-21 12:52:02 +05301374 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
1375 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
1376 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301377 FW_LEN16(*caps_cmd));
1378
1379 if (csio_mb_issue(hw, mbp)) {
1380 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1381 goto out;
1382 }
1383
1384 ret = csio_mb_fw_retval(mbp);
1385 if (ret != FW_SUCCESS) {
1386 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1387 goto out;
1388 }
1389
1390 if (finiver)
1391 *finiver = ntohl(caps_cmd->finiver);
1392 if (finicsum)
1393 *finicsum = ntohl(caps_cmd->finicsum);
1394 if (cfcsum)
1395 *cfcsum = ntohl(caps_cmd->cfcsum);
1396
1397 /* Validate device capabilities */
1398 if (csio_hw_validate_caps(hw, mbp)) {
1399 rv = -ENOENT;
1400 goto out;
1401 }
1402
1403 /*
1404 * And now tell the firmware to use the configuration we just loaded.
1405 */
1406 caps_cmd->op_to_write =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301407 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
1408 FW_CMD_REQUEST_F |
1409 FW_CMD_WRITE_F);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301410 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1411
1412 if (csio_mb_issue(hw, mbp)) {
1413 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1414 goto out;
1415 }
1416
1417 ret = csio_mb_fw_retval(mbp);
1418 if (ret != FW_SUCCESS) {
1419 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1420 goto out;
1421 }
1422
1423 rv = 0;
1424out:
1425 mempool_free(mbp, hw->mb_mempool);
1426 return rv;
1427}
1428
1429/*
1430 * csio_get_device_params - Get device parameters.
1431 * @hw: HW module
1432 *
1433 */
1434static int
1435csio_get_device_params(struct csio_hw *hw)
1436{
1437 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1438 struct csio_mb *mbp;
1439 enum fw_retval retval;
1440 u32 param[6];
1441 int i, j = 0;
1442
1443 /* Initialize portids to -1 */
1444 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1445 hw->pport[i].portid = -1;
1446
1447 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1448 if (!mbp) {
1449 CSIO_INC_STATS(hw, n_err_nomem);
1450 return -ENOMEM;
1451 }
1452
1453 /* Get port vec information. */
1454 param[0] = FW_PARAM_DEV(PORTVEC);
1455
1456 /* Get Core clock. */
1457 param[1] = FW_PARAM_DEV(CCLK);
1458
1459 /* Get EQ id start and end. */
1460 param[2] = FW_PARAM_PFVF(EQ_START);
1461 param[3] = FW_PARAM_PFVF(EQ_END);
1462
1463 /* Get IQ id start and end. */
1464 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1465 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1466
1467 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1468 ARRAY_SIZE(param), param, NULL, false, NULL);
1469 if (csio_mb_issue(hw, mbp)) {
1470 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1471 mempool_free(mbp, hw->mb_mempool);
1472 return -EINVAL;
1473 }
1474
1475 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1476 ARRAY_SIZE(param), param);
1477 if (retval != FW_SUCCESS) {
1478 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1479 retval);
1480 mempool_free(mbp, hw->mb_mempool);
1481 return -EINVAL;
1482 }
1483
1484 /* cache the information. */
1485 hw->port_vec = param[0];
1486 hw->vpd.cclk = param[1];
1487 wrm->fw_eq_start = param[2];
1488 wrm->fw_iq_start = param[4];
1489
1490 /* Using FW configured max iqs & eqs */
1491 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1492 !csio_is_hw_master(hw)) {
1493 hw->cfg_niq = param[5] - param[4] + 1;
1494 hw->cfg_neq = param[3] - param[2] + 1;
1495 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1496 hw->cfg_niq, hw->cfg_neq);
1497 }
1498
1499 hw->port_vec &= csio_port_mask;
1500
1501 hw->num_pports = hweight32(hw->port_vec);
1502
1503 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1504 hw->port_vec, hw->num_pports);
1505
1506 for (i = 0; i < hw->num_pports; i++) {
1507 while ((hw->port_vec & (1 << j)) == 0)
1508 j++;
1509 hw->pport[i].portid = j++;
1510 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1511 }
1512 mempool_free(mbp, hw->mb_mempool);
1513
1514 return 0;
1515}
1516
1517
1518/*
1519 * csio_config_device_caps - Get and set device capabilities.
1520 * @hw: HW module
1521 *
1522 */
1523static int
1524csio_config_device_caps(struct csio_hw *hw)
1525{
1526 struct csio_mb *mbp;
1527 enum fw_retval retval;
1528 int rv = -EINVAL;
1529
1530 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1531 if (!mbp) {
1532 CSIO_INC_STATS(hw, n_err_nomem);
1533 return -ENOMEM;
1534 }
1535
1536 /* Get device capabilities */
1537 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1538
1539 if (csio_mb_issue(hw, mbp)) {
1540 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1541 goto out;
1542 }
1543
1544 retval = csio_mb_fw_retval(mbp);
1545 if (retval != FW_SUCCESS) {
1546 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1547 goto out;
1548 }
1549
1550 /* Validate device capabilities */
1551 if (csio_hw_validate_caps(hw, mbp))
1552 goto out;
1553
1554 /* Don't config device capabilities if already configured */
1555 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1556 rv = 0;
1557 goto out;
1558 }
1559
1560 /* Write back desired device capabilities */
1561 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1562 false, true, NULL);
1563
1564 if (csio_mb_issue(hw, mbp)) {
1565 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1566 goto out;
1567 }
1568
1569 retval = csio_mb_fw_retval(mbp);
1570 if (retval != FW_SUCCESS) {
1571 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1572 goto out;
1573 }
1574
1575 rv = 0;
1576out:
1577 mempool_free(mbp, hw->mb_mempool);
1578 return rv;
1579}
1580
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301581/*
1582 * csio_enable_ports - Bring up all available ports.
1583 * @hw: HW module.
1584 *
1585 */
1586static int
1587csio_enable_ports(struct csio_hw *hw)
1588{
1589 struct csio_mb *mbp;
1590 enum fw_retval retval;
1591 uint8_t portid;
1592 int i;
1593
1594 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1595 if (!mbp) {
1596 CSIO_INC_STATS(hw, n_err_nomem);
1597 return -ENOMEM;
1598 }
1599
1600 for (i = 0; i < hw->num_pports; i++) {
1601 portid = hw->pport[i].portid;
1602
1603 /* Read PORT information */
1604 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1605 false, 0, 0, NULL);
1606
1607 if (csio_mb_issue(hw, mbp)) {
1608 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1609 portid);
1610 mempool_free(mbp, hw->mb_mempool);
1611 return -EINVAL;
1612 }
1613
1614 csio_mb_process_read_port_rsp(hw, mbp, &retval,
1615 &hw->pport[i].pcap);
1616 if (retval != FW_SUCCESS) {
1617 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1618 portid, retval);
1619 mempool_free(mbp, hw->mb_mempool);
1620 return -EINVAL;
1621 }
1622
1623 /* Write back PORT information */
1624 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
1625 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
1626
1627 if (csio_mb_issue(hw, mbp)) {
1628 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1629 portid);
1630 mempool_free(mbp, hw->mb_mempool);
1631 return -EINVAL;
1632 }
1633
1634 retval = csio_mb_fw_retval(mbp);
1635 if (retval != FW_SUCCESS) {
1636 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1637 portid, retval);
1638 mempool_free(mbp, hw->mb_mempool);
1639 return -EINVAL;
1640 }
1641
1642 } /* For all ports */
1643
1644 mempool_free(mbp, hw->mb_mempool);
1645
1646 return 0;
1647}
1648
1649/*
1650 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1651 * @hw: HW module
1652 * Issued with lock held.
1653 */
1654static int
1655csio_get_fcoe_resinfo(struct csio_hw *hw)
1656{
1657 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1658 struct fw_fcoe_res_info_cmd *rsp;
1659 struct csio_mb *mbp;
1660 enum fw_retval retval;
1661
1662 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1663 if (!mbp) {
1664 CSIO_INC_STATS(hw, n_err_nomem);
1665 return -ENOMEM;
1666 }
1667
1668 /* Get FCoE FW resource information */
1669 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1670
1671 if (csio_mb_issue(hw, mbp)) {
1672 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1673 mempool_free(mbp, hw->mb_mempool);
1674 return -EINVAL;
1675 }
1676
1677 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301678 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301679 if (retval != FW_SUCCESS) {
1680 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1681 retval);
1682 mempool_free(mbp, hw->mb_mempool);
1683 return -EINVAL;
1684 }
1685
1686 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1687 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1688 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1689 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1690 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1691 res_info->max_ssns = ntohl(rsp->max_ssns);
1692 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1693 res_info->used_ssns = ntohl(rsp->used_ssns);
1694 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1695 res_info->max_vnps = ntohl(rsp->max_vnps);
1696 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1697 res_info->used_vnps = ntohl(rsp->used_vnps);
1698
1699 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1700 res_info->max_xchgs);
1701 mempool_free(mbp, hw->mb_mempool);
1702
1703 return 0;
1704}
1705
1706static int
1707csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1708{
1709 struct csio_mb *mbp;
1710 enum fw_retval retval;
1711 u32 _param[1];
1712
1713 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1714 if (!mbp) {
1715 CSIO_INC_STATS(hw, n_err_nomem);
1716 return -ENOMEM;
1717 }
1718
1719 /*
1720 * Find out whether we're dealing with a version of
1721 * the firmware which has configuration file support.
1722 */
Hariprasad Shenai51678652014-11-21 12:52:02 +05301723 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1724 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301725
1726 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1727 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1728 if (csio_mb_issue(hw, mbp)) {
1729 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1730 mempool_free(mbp, hw->mb_mempool);
1731 return -EINVAL;
1732 }
1733
1734 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1735 ARRAY_SIZE(_param), _param);
1736 if (retval != FW_SUCCESS) {
1737 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1738 retval);
1739 mempool_free(mbp, hw->mb_mempool);
1740 return -EINVAL;
1741 }
1742
1743 mempool_free(mbp, hw->mb_mempool);
1744 *param = _param[0];
1745
1746 return 0;
1747}
1748
1749static int
1750csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1751{
1752 int ret = 0;
1753 const struct firmware *cf;
1754 struct pci_dev *pci_dev = hw->pdev;
1755 struct device *dev = &pci_dev->dev;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301756 unsigned int mtype = 0, maddr = 0;
1757 uint32_t *cfg_data;
1758 int value_to_add = 0;
1759
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001760 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
1761 csio_err(hw, "could not find config file %s, err: %d\n",
1762 CSIO_CF_FNAME(hw), ret);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301763 return -ENOENT;
1764 }
1765
1766 if (cf->size%4 != 0)
1767 value_to_add = 4 - (cf->size % 4);
1768
1769 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
Jesper Juhl02db3db2012-12-26 21:31:51 +01001770 if (cfg_data == NULL) {
1771 ret = -ENOMEM;
1772 goto leave;
1773 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301774
1775 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
Jesper Juhl02db3db2012-12-26 21:31:51 +01001776 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1777 ret = -EINVAL;
1778 goto leave;
1779 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301780
Hariprasad Shenai51678652014-11-21 12:52:02 +05301781 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1782 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301783
1784 ret = csio_memory_write(hw, mtype, maddr,
1785 cf->size + value_to_add, cfg_data);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001786
1787 if ((ret == 0) && (value_to_add != 0)) {
1788 union {
1789 u32 word;
1790 char buf[4];
1791 } last;
1792 size_t size = cf->size & ~0x3;
1793 int i;
1794
1795 last.word = cfg_data[size >> 2];
1796 for (i = value_to_add; i < 4; i++)
1797 last.buf[i] = 0;
1798 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1799 }
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301800 if (ret == 0) {
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001801 csio_info(hw, "config file upgraded to %s\n",
1802 CSIO_CF_FNAME(hw));
1803 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301804 }
1805
Jesper Juhl02db3db2012-12-26 21:31:51 +01001806leave:
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301807 kfree(cfg_data);
1808 release_firmware(cf);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301809 return ret;
1810}
1811
1812/*
1813 * HW initialization: contact FW, obtain config, perform basic init.
1814 *
1815 * If the firmware we're dealing with has Configuration File support, then
1816 * we use that to perform all configuration -- either using the configuration
1817 * file stored in flash on the adapter or using a filesystem-local file
1818 * if available.
1819 *
1820 * If we don't have configuration file support in the firmware, then we'll
1821 * have to set things up the old fashioned way with hard-coded register
1822 * writes and firmware commands ...
1823 */
1824
1825/*
1826 * Attempt to initialize the HW via a Firmware Configuration File.
1827 */
1828static int
1829csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1830{
1831 unsigned int mtype, maddr;
1832 int rv;
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001833 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301834 int using_flash;
1835 char path[64];
1836
1837 /*
1838 * Reset device if necessary
1839 */
1840 if (reset) {
1841 rv = csio_do_reset(hw, true);
1842 if (rv != 0)
1843 goto bye;
1844 }
1845
1846 /*
1847 * If we have a configuration file in host ,
1848 * then use that. Otherwise, use the configuration file stored
1849 * in the HW flash ...
1850 */
1851 spin_unlock_irq(&hw->lock);
1852 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
1853 spin_lock_irq(&hw->lock);
1854 if (rv != 0) {
1855 if (rv == -ENOENT) {
1856 /*
1857 * config file was not found. Use default
1858 * config file from flash.
1859 */
1860 mtype = FW_MEMTYPE_CF_FLASH;
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001861 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301862 using_flash = 1;
1863 } else {
1864 /*
1865 * we revert back to the hardwired config if
1866 * flashing failed.
1867 */
1868 goto bye;
1869 }
1870 } else {
Hariprasad Shenai51678652014-11-21 12:52:02 +05301871 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
1872 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301873 using_flash = 0;
1874 }
1875
1876 hw->cfg_store = (uint8_t)mtype;
1877
1878 /*
1879 * Issue a Capability Configuration command to the firmware to get it
1880 * to parse the Configuration File.
1881 */
1882 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
1883 &finicsum, &cfcsum);
1884 if (rv != 0)
1885 goto bye;
1886
1887 hw->cfg_finiver = finiver;
1888 hw->cfg_finicsum = finicsum;
1889 hw->cfg_cfcsum = cfcsum;
1890 hw->cfg_csum_status = true;
1891
1892 if (finicsum != cfcsum) {
1893 csio_warn(hw,
1894 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1895 finicsum, cfcsum);
1896
1897 hw->cfg_csum_status = false;
1898 }
1899
1900 /*
1901 * Note that we're operating with parameters
1902 * not supplied by the driver, rather than from hard-wired
1903 * initialization constants buried in the driver.
1904 */
1905 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
1906
1907 /* device parameters */
1908 rv = csio_get_device_params(hw);
1909 if (rv != 0)
1910 goto bye;
1911
1912 /* Configure SGE */
1913 csio_wr_sge_init(hw);
1914
1915 /*
1916 * And finally tell the firmware to initialize itself using the
1917 * parameters from the Configuration File.
1918 */
1919 /* Post event to notify completion of configuration */
1920 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1921
1922 csio_info(hw,
1923 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1924 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
1925
1926 return 0;
1927
1928 /*
1929 * Something bad happened. Return the error ...
1930 */
1931bye:
1932 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
1933 csio_dbg(hw, "Configuration file error %d\n", rv);
1934 return rv;
1935}
1936
1937/*
1938 * Attempt to initialize the adapter via hard-coded, driver supplied
1939 * parameters ...
1940 */
1941static int
1942csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
1943{
1944 int rv;
1945 /*
1946 * Reset device if necessary
1947 */
1948 if (reset) {
1949 rv = csio_do_reset(hw, true);
1950 if (rv != 0)
1951 goto out;
1952 }
1953
1954 /* Get and set device capabilities */
1955 rv = csio_config_device_caps(hw);
1956 if (rv != 0)
1957 goto out;
1958
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301959 /* device parameters */
1960 rv = csio_get_device_params(hw);
1961 if (rv != 0)
1962 goto out;
1963
1964 /* Configure SGE */
1965 csio_wr_sge_init(hw);
1966
1967 /* Post event to notify completion of configuration */
1968 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1969
1970out:
1971 return rv;
1972}
1973
1974/*
1975 * Returns -EINVAL if attempts to flash the firmware failed
1976 * else returns 0,
1977 * if flashing was not attempted because the card had the
1978 * latest firmware ECANCELED is returned
1979 */
1980static int
1981csio_hw_flash_fw(struct csio_hw *hw)
1982{
1983 int ret = -ECANCELED;
1984 const struct firmware *fw;
1985 const struct fw_hdr *hdr;
1986 u32 fw_ver;
1987 struct pci_dev *pci_dev = hw->pdev;
1988 struct device *dev = &pci_dev->dev ;
1989
Arvind Bhushan7cc16382013-03-14 05:09:08 +00001990 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
1991 csio_err(hw, "could not find firmware image %s, err: %d\n",
1992 CSIO_FW_FNAME(hw), ret);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301993 return -EINVAL;
1994 }
1995
1996 hdr = (const struct fw_hdr *)fw->data;
1997 fw_ver = ntohl(hdr->fw_ver);
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05301998 if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301999 return -EINVAL; /* wrong major version, won't do */
2000
2001 /*
2002 * If the flash FW is unusable or we found something newer, load it.
2003 */
Hariprasad Shenaib2e1a3f2014-11-21 12:52:05 +05302004 if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302005 fw_ver > hw->fwrev) {
2006 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2007 /*force=*/false);
2008 if (!ret)
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002009 csio_info(hw,
2010 "firmware upgraded to version %pI4 from %s\n",
2011 &hdr->fw_ver, CSIO_FW_FNAME(hw));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302012 else
2013 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002014 } else
2015 ret = -EINVAL;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302016
2017 release_firmware(fw);
2018
2019 return ret;
2020}
2021
2022
2023/*
2024 * csio_hw_configure - Configure HW
2025 * @hw - HW module
2026 *
2027 */
2028static void
2029csio_hw_configure(struct csio_hw *hw)
2030{
2031 int reset = 1;
2032 int rv;
2033 u32 param[1];
2034
2035 rv = csio_hw_dev_ready(hw);
2036 if (rv != 0) {
2037 CSIO_INC_STATS(hw, n_err_fatal);
2038 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2039 goto out;
2040 }
2041
2042 /* HW version */
2043 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
2044
2045 /* Needed for FW download */
2046 rv = csio_hw_get_flash_params(hw);
2047 if (rv != 0) {
2048 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2049 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2050 goto out;
2051 }
2052
Yijing Wangad4d35f2013-09-05 15:55:26 +08002053 /* Set PCIe completion timeout to 4 seconds */
2054 if (pci_is_pcie(hw->pdev))
2055 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2056 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302057
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002058 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302059
2060 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2061 if (rv != 0)
2062 goto out;
2063
2064 csio_hw_print_fw_version(hw, "Firmware revision");
2065
2066 rv = csio_do_hello(hw, &hw->fw_state);
2067 if (rv != 0) {
2068 CSIO_INC_STATS(hw, n_err_fatal);
2069 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2070 goto out;
2071 }
2072
2073 /* Read vpd */
2074 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2075 if (rv != 0)
2076 goto out;
2077
2078 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2079 rv = csio_hw_check_fw_version(hw);
2080 if (rv == -EINVAL) {
2081
2082 /* Do firmware update */
2083 spin_unlock_irq(&hw->lock);
2084 rv = csio_hw_flash_fw(hw);
2085 spin_lock_irq(&hw->lock);
2086
2087 if (rv == 0) {
2088 reset = 0;
2089 /*
2090 * Note that the chip was reset as part of the
2091 * firmware upgrade so we don't reset it again
2092 * below and grab the new firmware version.
2093 */
2094 rv = csio_hw_check_fw_version(hw);
2095 }
2096 }
2097 /*
2098 * If the firmware doesn't support Configuration
2099 * Files, use the old Driver-based, hard-wired
2100 * initialization. Otherwise, try using the
2101 * Configuration File support and fall back to the
2102 * Driver-based initialization if there's no
2103 * Configuration File found.
2104 */
2105 if (csio_hw_check_fwconfig(hw, param) == 0) {
2106 rv = csio_hw_use_fwconfig(hw, reset, param);
2107 if (rv == -ENOENT)
2108 goto out;
2109 if (rv != 0) {
2110 csio_info(hw,
2111 "No Configuration File present "
2112 "on adapter. Using hard-wired "
2113 "configuration parameters.\n");
2114 rv = csio_hw_no_fwconfig(hw, reset);
2115 }
2116 } else {
2117 rv = csio_hw_no_fwconfig(hw, reset);
2118 }
2119
2120 if (rv != 0)
2121 goto out;
2122
2123 } else {
2124 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2125
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002126 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2127
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302128 /* device parameters */
2129 rv = csio_get_device_params(hw);
2130 if (rv != 0)
2131 goto out;
2132
2133 /* Get device capabilities */
2134 rv = csio_config_device_caps(hw);
2135 if (rv != 0)
2136 goto out;
2137
2138 /* Configure SGE */
2139 csio_wr_sge_init(hw);
2140
2141 /* Post event to notify completion of configuration */
2142 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2143 goto out;
2144 }
2145 } /* if not master */
2146
2147out:
2148 return;
2149}
2150
2151/*
2152 * csio_hw_initialize - Initialize HW
2153 * @hw - HW module
2154 *
2155 */
2156static void
2157csio_hw_initialize(struct csio_hw *hw)
2158{
2159 struct csio_mb *mbp;
2160 enum fw_retval retval;
2161 int rv;
2162 int i;
2163
2164 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2165 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2166 if (!mbp)
2167 goto out;
2168
2169 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2170
2171 if (csio_mb_issue(hw, mbp)) {
2172 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2173 goto free_and_out;
2174 }
2175
2176 retval = csio_mb_fw_retval(mbp);
2177 if (retval != FW_SUCCESS) {
2178 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2179 retval);
2180 goto free_and_out;
2181 }
2182
2183 mempool_free(mbp, hw->mb_mempool);
2184 }
2185
2186 rv = csio_get_fcoe_resinfo(hw);
2187 if (rv != 0) {
2188 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2189 goto out;
2190 }
2191
2192 spin_unlock_irq(&hw->lock);
2193 rv = csio_config_queues(hw);
2194 spin_lock_irq(&hw->lock);
2195
2196 if (rv != 0) {
2197 csio_err(hw, "Config of queues failed!: %d\n", rv);
2198 goto out;
2199 }
2200
2201 for (i = 0; i < hw->num_pports; i++)
2202 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2203
2204 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2205 rv = csio_enable_ports(hw);
2206 if (rv != 0) {
2207 csio_err(hw, "Failed to enable ports: %d\n", rv);
2208 goto out;
2209 }
2210 }
2211
2212 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2213 return;
2214
2215free_and_out:
2216 mempool_free(mbp, hw->mb_mempool);
2217out:
2218 return;
2219}
2220
2221#define PF_INTR_MASK (PFSW | PFCIM)
2222
2223/*
2224 * csio_hw_intr_enable - Enable HW interrupts
2225 * @hw: Pointer to HW module.
2226 *
2227 * Enable interrupts in HW registers.
2228 */
2229static void
2230csio_hw_intr_enable(struct csio_hw *hw)
2231{
2232 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2233 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2234 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
2235
2236 /*
2237 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2238 * by FW, so do nothing for INTX.
2239 */
2240 if (hw->intr_mode == CSIO_IM_MSIX)
Hariprasad Shenaif061de42015-01-05 16:30:44 +05302241 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2242 AIVEC_V(AIVEC_M), vec);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302243 else if (hw->intr_mode == CSIO_IM_MSI)
Hariprasad Shenaif061de42015-01-05 16:30:44 +05302244 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2245 AIVEC_V(AIVEC_M), 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302246
2247 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
2248
2249 /* Turn on MB interrupts - this will internally flush PIO as well */
2250 csio_mb_intr_enable(hw);
2251
2252 /* These are common registers - only a master can modify them */
2253 if (csio_is_hw_master(hw)) {
2254 /*
2255 * Disable the Serial FLASH interrupt, if enabled!
2256 */
2257 pl &= (~SF);
2258 csio_wr_reg32(hw, pl, PL_INT_ENABLE);
2259
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302260 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2261 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
2262 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
2263 ERR_DATA_CPL_ON_HIGH_QID1_F |
2264 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2265 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2266 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2267 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
2268 SGE_INT_ENABLE3_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302269 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
2270 }
2271
2272 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2273
2274}
2275
2276/*
2277 * csio_hw_intr_disable - Disable HW interrupts
2278 * @hw: Pointer to HW module.
2279 *
2280 * Turn off Mailbox and PCI_PF_CFG interrupts.
2281 */
2282void
2283csio_hw_intr_disable(struct csio_hw *hw)
2284{
2285 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2286
2287 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2288 return;
2289
2290 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2291
2292 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
2293 if (csio_is_hw_master(hw))
2294 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
2295
2296 /* Turn off MB interrupts */
2297 csio_mb_intr_disable(hw);
2298
2299}
2300
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002301void
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302302csio_hw_fatal_err(struct csio_hw *hw)
2303{
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302304 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302305 csio_hw_intr_disable(hw);
2306
2307 /* Do not reset HW, we may need FW state for debugging */
2308 csio_fatal(hw, "HW Fatal error encountered!\n");
2309}
2310
2311/*****************************************************************************/
2312/* START: HW SM */
2313/*****************************************************************************/
2314/*
2315 * csio_hws_uninit - Uninit state
2316 * @hw - HW module
2317 * @evt - Event
2318 *
2319 */
2320static void
2321csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2322{
2323 hw->prev_evt = hw->cur_evt;
2324 hw->cur_evt = evt;
2325 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2326
2327 switch (evt) {
2328 case CSIO_HWE_CFG:
2329 csio_set_state(&hw->sm, csio_hws_configuring);
2330 csio_hw_configure(hw);
2331 break;
2332
2333 default:
2334 CSIO_INC_STATS(hw, n_evt_unexp);
2335 break;
2336 }
2337}
2338
2339/*
2340 * csio_hws_configuring - Configuring state
2341 * @hw - HW module
2342 * @evt - Event
2343 *
2344 */
2345static void
2346csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2347{
2348 hw->prev_evt = hw->cur_evt;
2349 hw->cur_evt = evt;
2350 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2351
2352 switch (evt) {
2353 case CSIO_HWE_INIT:
2354 csio_set_state(&hw->sm, csio_hws_initializing);
2355 csio_hw_initialize(hw);
2356 break;
2357
2358 case CSIO_HWE_INIT_DONE:
2359 csio_set_state(&hw->sm, csio_hws_ready);
2360 /* Fan out event to all lnode SMs */
2361 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2362 break;
2363
2364 case CSIO_HWE_FATAL:
2365 csio_set_state(&hw->sm, csio_hws_uninit);
2366 break;
2367
2368 case CSIO_HWE_PCI_REMOVE:
2369 csio_do_bye(hw);
2370 break;
2371 default:
2372 CSIO_INC_STATS(hw, n_evt_unexp);
2373 break;
2374 }
2375}
2376
2377/*
2378 * csio_hws_initializing - Initialiazing state
2379 * @hw - HW module
2380 * @evt - Event
2381 *
2382 */
2383static void
2384csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2385{
2386 hw->prev_evt = hw->cur_evt;
2387 hw->cur_evt = evt;
2388 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2389
2390 switch (evt) {
2391 case CSIO_HWE_INIT_DONE:
2392 csio_set_state(&hw->sm, csio_hws_ready);
2393
2394 /* Fan out event to all lnode SMs */
2395 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2396
2397 /* Enable interrupts */
2398 csio_hw_intr_enable(hw);
2399 break;
2400
2401 case CSIO_HWE_FATAL:
2402 csio_set_state(&hw->sm, csio_hws_uninit);
2403 break;
2404
2405 case CSIO_HWE_PCI_REMOVE:
2406 csio_do_bye(hw);
2407 break;
2408
2409 default:
2410 CSIO_INC_STATS(hw, n_evt_unexp);
2411 break;
2412 }
2413}
2414
2415/*
2416 * csio_hws_ready - Ready state
2417 * @hw - HW module
2418 * @evt - Event
2419 *
2420 */
2421static void
2422csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2423{
2424 /* Remember the event */
2425 hw->evtflag = evt;
2426
2427 hw->prev_evt = hw->cur_evt;
2428 hw->cur_evt = evt;
2429 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2430
2431 switch (evt) {
2432 case CSIO_HWE_HBA_RESET:
2433 case CSIO_HWE_FW_DLOAD:
2434 case CSIO_HWE_SUSPEND:
2435 case CSIO_HWE_PCI_REMOVE:
2436 case CSIO_HWE_PCIERR_DETECTED:
2437 csio_set_state(&hw->sm, csio_hws_quiescing);
2438 /* cleanup all outstanding cmds */
2439 if (evt == CSIO_HWE_HBA_RESET ||
2440 evt == CSIO_HWE_PCIERR_DETECTED)
2441 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2442 else
2443 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2444
2445 csio_hw_intr_disable(hw);
2446 csio_hw_mbm_cleanup(hw);
2447 csio_evtq_stop(hw);
2448 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2449 csio_evtq_flush(hw);
2450 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2451 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2452 break;
2453
2454 case CSIO_HWE_FATAL:
2455 csio_set_state(&hw->sm, csio_hws_uninit);
2456 break;
2457
2458 default:
2459 CSIO_INC_STATS(hw, n_evt_unexp);
2460 break;
2461 }
2462}
2463
2464/*
2465 * csio_hws_quiescing - Quiescing state
2466 * @hw - HW module
2467 * @evt - Event
2468 *
2469 */
2470static void
2471csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2472{
2473 hw->prev_evt = hw->cur_evt;
2474 hw->cur_evt = evt;
2475 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2476
2477 switch (evt) {
2478 case CSIO_HWE_QUIESCED:
2479 switch (hw->evtflag) {
2480 case CSIO_HWE_FW_DLOAD:
2481 csio_set_state(&hw->sm, csio_hws_resetting);
2482 /* Download firmware */
2483 /* Fall through */
2484
2485 case CSIO_HWE_HBA_RESET:
2486 csio_set_state(&hw->sm, csio_hws_resetting);
2487 /* Start reset of the HBA */
2488 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2489 csio_wr_destroy_queues(hw, false);
2490 csio_do_reset(hw, false);
2491 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2492 break;
2493
2494 case CSIO_HWE_PCI_REMOVE:
2495 csio_set_state(&hw->sm, csio_hws_removing);
2496 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2497 csio_wr_destroy_queues(hw, true);
2498 /* Now send the bye command */
2499 csio_do_bye(hw);
2500 break;
2501
2502 case CSIO_HWE_SUSPEND:
2503 csio_set_state(&hw->sm, csio_hws_quiesced);
2504 break;
2505
2506 case CSIO_HWE_PCIERR_DETECTED:
2507 csio_set_state(&hw->sm, csio_hws_pcierr);
2508 csio_wr_destroy_queues(hw, false);
2509 break;
2510
2511 default:
2512 CSIO_INC_STATS(hw, n_evt_unexp);
2513 break;
2514
2515 }
2516 break;
2517
2518 default:
2519 CSIO_INC_STATS(hw, n_evt_unexp);
2520 break;
2521 }
2522}
2523
2524/*
2525 * csio_hws_quiesced - Quiesced state
2526 * @hw - HW module
2527 * @evt - Event
2528 *
2529 */
2530static void
2531csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2532{
2533 hw->prev_evt = hw->cur_evt;
2534 hw->cur_evt = evt;
2535 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2536
2537 switch (evt) {
2538 case CSIO_HWE_RESUME:
2539 csio_set_state(&hw->sm, csio_hws_configuring);
2540 csio_hw_configure(hw);
2541 break;
2542
2543 default:
2544 CSIO_INC_STATS(hw, n_evt_unexp);
2545 break;
2546 }
2547}
2548
2549/*
2550 * csio_hws_resetting - HW Resetting state
2551 * @hw - HW module
2552 * @evt - Event
2553 *
2554 */
2555static void
2556csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2557{
2558 hw->prev_evt = hw->cur_evt;
2559 hw->cur_evt = evt;
2560 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2561
2562 switch (evt) {
2563 case CSIO_HWE_HBA_RESET_DONE:
2564 csio_evtq_start(hw);
2565 csio_set_state(&hw->sm, csio_hws_configuring);
2566 csio_hw_configure(hw);
2567 break;
2568
2569 default:
2570 CSIO_INC_STATS(hw, n_evt_unexp);
2571 break;
2572 }
2573}
2574
2575/*
2576 * csio_hws_removing - PCI Hotplug removing state
2577 * @hw - HW module
2578 * @evt - Event
2579 *
2580 */
2581static void
2582csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
2583{
2584 hw->prev_evt = hw->cur_evt;
2585 hw->cur_evt = evt;
2586 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2587
2588 switch (evt) {
2589 case CSIO_HWE_HBA_RESET:
2590 if (!csio_is_hw_master(hw))
2591 break;
2592 /*
2593 * The BYE should have alerady been issued, so we cant
2594 * use the mailbox interface. Hence we use the PL_RST
2595 * register directly.
2596 */
2597 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
2598 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
2599 mdelay(2000);
2600 break;
2601
2602 /* Should never receive any new events */
2603 default:
2604 CSIO_INC_STATS(hw, n_evt_unexp);
2605 break;
2606
2607 }
2608}
2609
2610/*
2611 * csio_hws_pcierr - PCI Error state
2612 * @hw - HW module
2613 * @evt - Event
2614 *
2615 */
2616static void
2617csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2618{
2619 hw->prev_evt = hw->cur_evt;
2620 hw->cur_evt = evt;
2621 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2622
2623 switch (evt) {
2624 case CSIO_HWE_PCIERR_SLOT_RESET:
2625 csio_evtq_start(hw);
2626 csio_set_state(&hw->sm, csio_hws_configuring);
2627 csio_hw_configure(hw);
2628 break;
2629
2630 default:
2631 CSIO_INC_STATS(hw, n_evt_unexp);
2632 break;
2633 }
2634}
2635
2636/*****************************************************************************/
2637/* END: HW SM */
2638/*****************************************************************************/
2639
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302640/*
2641 * csio_handle_intr_status - table driven interrupt handler
2642 * @hw: HW instance
2643 * @reg: the interrupt status register to process
2644 * @acts: table of interrupt actions
2645 *
2646 * A table driven interrupt handler that applies a set of masks to an
2647 * interrupt status word and performs the corresponding actions if the
2648 * interrupts described by the mask have occured. The actions include
2649 * optionally emitting a warning or alert message. The table is terminated
2650 * by an entry specifying mask 0. Returns the number of fatal interrupt
2651 * conditions.
2652 */
Arvind Bhushan7cc16382013-03-14 05:09:08 +00002653int
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302654csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
2655 const struct intr_info *acts)
2656{
2657 int fatal = 0;
2658 unsigned int mask = 0;
2659 unsigned int status = csio_rd_reg32(hw, reg);
2660
2661 for ( ; acts->mask; ++acts) {
2662 if (!(status & acts->mask))
2663 continue;
2664 if (acts->fatal) {
2665 fatal++;
2666 csio_fatal(hw, "Fatal %s (0x%x)\n",
2667 acts->msg, status & acts->mask);
2668 } else if (acts->msg)
2669 csio_info(hw, "%s (0x%x)\n",
2670 acts->msg, status & acts->mask);
2671 mask |= acts->mask;
2672 }
2673 status &= mask;
2674 if (status) /* clear processed interrupts */
2675 csio_wr_reg32(hw, status, reg);
2676 return fatal;
2677}
2678
2679/*
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302680 * TP interrupt handler.
2681 */
2682static void csio_tp_intr_handler(struct csio_hw *hw)
2683{
2684 static struct intr_info tp_intr_info[] = {
2685 { 0x3fffffff, "TP parity error", -1, 1 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302686 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302687 { 0, NULL, 0, 0 }
2688 };
2689
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302690 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302691 csio_hw_fatal_err(hw);
2692}
2693
2694/*
2695 * SGE interrupt handler.
2696 */
2697static void csio_sge_intr_handler(struct csio_hw *hw)
2698{
2699 uint64_t v;
2700
2701 static struct intr_info sge_intr_info[] = {
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302702 { ERR_CPL_EXCEED_IQE_SIZE_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302703 "SGE received CPL exceeding IQE size", -1, 1 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302704 { ERR_INVALID_CIDX_INC_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302705 "SGE GTS CIDX increment too large", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302706 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2707 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
2708 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302709 "SGE IQID > 1023 received CPL for FL", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302710 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302711 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302712 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302713 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302714 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302715 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302716 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302717 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302718 { ERR_ING_CTXT_PRIO_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302719 "SGE too many priority ingress contexts", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302720 { ERR_EGR_CTXT_PRIO_F,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302721 "SGE too many priority egress contexts", -1, 0 },
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302722 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2723 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302724 { 0, NULL, 0, 0 }
2725 };
2726
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302727 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
2728 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302729 if (v) {
2730 csio_fatal(hw, "SGE parity error (%#llx)\n",
2731 (unsigned long long)v);
2732 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302733 SGE_INT_CAUSE1_A);
2734 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302735 }
2736
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302737 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302738
Hariprasad Shenaif612b812015-01-05 16:30:43 +05302739 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302740 v != 0)
2741 csio_hw_fatal_err(hw);
2742}
2743
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302744#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2745 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2746#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2747 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302748
2749/*
2750 * CIM interrupt handler.
2751 */
2752static void csio_cim_intr_handler(struct csio_hw *hw)
2753{
2754 static struct intr_info cim_intr_info[] = {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302755 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302756 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2757 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302758 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2759 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2760 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2761 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302762 { 0, NULL, 0, 0 }
2763 };
2764 static struct intr_info cim_upintr_info[] = {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302765 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2766 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2767 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2768 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2769 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2770 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2771 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2772 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2773 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2774 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2775 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2776 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2777 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2778 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2779 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2780 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2781 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2782 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2783 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2784 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2785 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2786 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2787 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2788 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2789 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2790 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2791 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2792 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302793 { 0, NULL, 0, 0 }
2794 };
2795
2796 int fat;
2797
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302798 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
2799 cim_intr_info) +
2800 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
2801 cim_upintr_info);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302802 if (fat)
2803 csio_hw_fatal_err(hw);
2804}
2805
2806/*
2807 * ULP RX interrupt handler.
2808 */
2809static void csio_ulprx_intr_handler(struct csio_hw *hw)
2810{
2811 static struct intr_info ulprx_intr_info[] = {
2812 { 0x1800000, "ULPRX context error", -1, 1 },
2813 { 0x7fffff, "ULPRX parity error", -1, 1 },
2814 { 0, NULL, 0, 0 }
2815 };
2816
2817 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
2818 csio_hw_fatal_err(hw);
2819}
2820
2821/*
2822 * ULP TX interrupt handler.
2823 */
2824static void csio_ulptx_intr_handler(struct csio_hw *hw)
2825{
2826 static struct intr_info ulptx_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302827 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302828 0 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302829 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302830 0 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302831 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302832 0 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302833 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302834 0 },
2835 { 0xfffffff, "ULPTX parity error", -1, 1 },
2836 { 0, NULL, 0, 0 }
2837 };
2838
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302839 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302840 csio_hw_fatal_err(hw);
2841}
2842
2843/*
2844 * PM TX interrupt handler.
2845 */
2846static void csio_pmtx_intr_handler(struct csio_hw *hw)
2847{
2848 static struct intr_info pmtx_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302849 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
2850 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
2851 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
2852 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302853 { 0xffffff0, "PMTX framing error", -1, 1 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302854 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
2855 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302856 1 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302857 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
2858 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302859 { 0, NULL, 0, 0 }
2860 };
2861
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302862 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302863 csio_hw_fatal_err(hw);
2864}
2865
2866/*
2867 * PM RX interrupt handler.
2868 */
2869static void csio_pmrx_intr_handler(struct csio_hw *hw)
2870{
2871 static struct intr_info pmrx_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302872 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302873 { 0x3ffff0, "PMRX framing error", -1, 1 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302874 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
2875 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302876 1 },
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302877 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
2878 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302879 { 0, NULL, 0, 0 }
2880 };
2881
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302882 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302883 csio_hw_fatal_err(hw);
2884}
2885
2886/*
2887 * CPL switch interrupt handler.
2888 */
2889static void csio_cplsw_intr_handler(struct csio_hw *hw)
2890{
2891 static struct intr_info cplsw_intr_info[] = {
2892 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2893 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2894 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2895 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2896 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2897 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2898 { 0, NULL, 0, 0 }
2899 };
2900
2901 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
2902 csio_hw_fatal_err(hw);
2903}
2904
2905/*
2906 * LE interrupt handler.
2907 */
2908static void csio_le_intr_handler(struct csio_hw *hw)
2909{
2910 static struct intr_info le_intr_info[] = {
2911 { LIPMISS, "LE LIP miss", -1, 0 },
2912 { LIP0, "LE 0 LIP error", -1, 0 },
2913 { PARITYERR, "LE parity error", -1, 1 },
2914 { UNKNOWNCMD, "LE unknown command", -1, 1 },
2915 { REQQPARERR, "LE request queue parity error", -1, 1 },
2916 { 0, NULL, 0, 0 }
2917 };
2918
2919 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
2920 csio_hw_fatal_err(hw);
2921}
2922
2923/*
2924 * MPS interrupt handler.
2925 */
2926static void csio_mps_intr_handler(struct csio_hw *hw)
2927{
2928 static struct intr_info mps_rx_intr_info[] = {
2929 { 0xffffff, "MPS Rx parity error", -1, 1 },
2930 { 0, NULL, 0, 0 }
2931 };
2932 static struct intr_info mps_tx_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302933 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
2934 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2935 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
2936 -1, 1 },
2937 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
2938 -1, 1 },
2939 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
2940 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
2941 { FRMERR_F, "MPS Tx framing error", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302942 { 0, NULL, 0, 0 }
2943 };
2944 static struct intr_info mps_trc_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302945 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
2946 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
2947 -1, 1 },
2948 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302949 { 0, NULL, 0, 0 }
2950 };
2951 static struct intr_info mps_stat_sram_intr_info[] = {
2952 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2953 { 0, NULL, 0, 0 }
2954 };
2955 static struct intr_info mps_stat_tx_intr_info[] = {
2956 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2957 { 0, NULL, 0, 0 }
2958 };
2959 static struct intr_info mps_stat_rx_intr_info[] = {
2960 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2961 { 0, NULL, 0, 0 }
2962 };
2963 static struct intr_info mps_cls_intr_info[] = {
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302964 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
2965 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
2966 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302967 { 0, NULL, 0, 0 }
2968 };
2969
2970 int fat;
2971
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302972 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
2973 mps_rx_intr_info) +
2974 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
2975 mps_tx_intr_info) +
2976 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
2977 mps_trc_intr_info) +
2978 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
2979 mps_stat_sram_intr_info) +
2980 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
2981 mps_stat_tx_intr_info) +
2982 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
2983 mps_stat_rx_intr_info) +
2984 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
2985 mps_cls_intr_info);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302986
Hariprasad Shenai837e4a42015-01-05 16:30:46 +05302987 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
2988 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302989 if (fat)
2990 csio_hw_fatal_err(hw);
2991}
2992
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05302993#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
2994 ECC_UE_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05302995
2996/*
2997 * EDC/MC interrupt handler.
2998 */
2999static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3000{
3001 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
3002
3003 unsigned int addr, cnt_addr, v;
3004
3005 if (idx <= MEM_EDC1) {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303006 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3007 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303008 } else {
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303009 addr = MC_INT_CAUSE_A;
3010 cnt_addr = MC_ECC_STATUS_A;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303011 }
3012
3013 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303014 if (v & PERR_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303015 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303016 if (v & ECC_CE_INT_CAUSE_F) {
3017 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303018
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303019 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303020 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3021 cnt, name[idx], cnt > 1 ? "s" : "");
3022 }
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303023 if (v & ECC_UE_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303024 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3025
3026 csio_wr_reg32(hw, v, addr);
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303027 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303028 csio_hw_fatal_err(hw);
3029}
3030
3031/*
3032 * MA interrupt handler.
3033 */
3034static void csio_ma_intr_handler(struct csio_hw *hw)
3035{
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303036 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303037
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303038 if (status & MEM_PERR_INT_CAUSE_F)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303039 csio_fatal(hw, "MA parity error, parity status %#x\n",
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303040 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3041 if (status & MEM_WRAP_INT_CAUSE_F) {
3042 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303043 csio_fatal(hw,
3044 "MA address wrap-around error by client %u to address %#x\n",
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303045 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303046 }
Hariprasad Shenai89c3a862015-01-05 16:30:45 +05303047 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303048 csio_hw_fatal_err(hw);
3049}
3050
3051/*
3052 * SMB interrupt handler.
3053 */
3054static void csio_smb_intr_handler(struct csio_hw *hw)
3055{
3056 static struct intr_info smb_intr_info[] = {
3057 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
3058 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
3059 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
3060 { 0, NULL, 0, 0 }
3061 };
3062
3063 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
3064 csio_hw_fatal_err(hw);
3065}
3066
3067/*
3068 * NC-SI interrupt handler.
3069 */
3070static void csio_ncsi_intr_handler(struct csio_hw *hw)
3071{
3072 static struct intr_info ncsi_intr_info[] = {
3073 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
3074 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
3075 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
3076 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
3077 { 0, NULL, 0, 0 }
3078 };
3079
3080 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
3081 csio_hw_fatal_err(hw);
3082}
3083
3084/*
3085 * XGMAC interrupt handler.
3086 */
3087static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3088{
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003089 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303090
3091 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3092 if (!v)
3093 return;
3094
3095 if (v & TXFIFO_PRTY_ERR)
3096 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3097 if (v & RXFIFO_PRTY_ERR)
3098 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003099 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303100 csio_hw_fatal_err(hw);
3101}
3102
3103/*
3104 * PL interrupt handler.
3105 */
3106static void csio_pl_intr_handler(struct csio_hw *hw)
3107{
3108 static struct intr_info pl_intr_info[] = {
3109 { FATALPERR, "T4 fatal parity error", -1, 1 },
3110 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
3111 { 0, NULL, 0, 0 }
3112 };
3113
3114 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
3115 csio_hw_fatal_err(hw);
3116}
3117
3118/*
3119 * csio_hw_slow_intr_handler - control path interrupt handler
3120 * @hw: HW module
3121 *
3122 * Interrupt handler for non-data global interrupt events, e.g., errors.
3123 * The designation 'slow' is because it involves register reads, while
3124 * data interrupts typically don't involve any MMIOs.
3125 */
3126int
3127csio_hw_slow_intr_handler(struct csio_hw *hw)
3128{
3129 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
3130
3131 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3132 CSIO_INC_STATS(hw, n_plint_unexp);
3133 return 0;
3134 }
3135
3136 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3137
3138 CSIO_INC_STATS(hw, n_plint_cnt);
3139
3140 if (cause & CIM)
3141 csio_cim_intr_handler(hw);
3142
3143 if (cause & MPS)
3144 csio_mps_intr_handler(hw);
3145
3146 if (cause & NCSI)
3147 csio_ncsi_intr_handler(hw);
3148
3149 if (cause & PL)
3150 csio_pl_intr_handler(hw);
3151
3152 if (cause & SMB)
3153 csio_smb_intr_handler(hw);
3154
3155 if (cause & XGMAC0)
3156 csio_xgmac_intr_handler(hw, 0);
3157
3158 if (cause & XGMAC1)
3159 csio_xgmac_intr_handler(hw, 1);
3160
3161 if (cause & XGMAC_KR0)
3162 csio_xgmac_intr_handler(hw, 2);
3163
3164 if (cause & XGMAC_KR1)
3165 csio_xgmac_intr_handler(hw, 3);
3166
3167 if (cause & PCIE)
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003168 hw->chip_ops->chip_pcie_intr_handler(hw);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303169
3170 if (cause & MC)
3171 csio_mem_intr_handler(hw, MEM_MC);
3172
3173 if (cause & EDC0)
3174 csio_mem_intr_handler(hw, MEM_EDC0);
3175
3176 if (cause & EDC1)
3177 csio_mem_intr_handler(hw, MEM_EDC1);
3178
3179 if (cause & LE)
3180 csio_le_intr_handler(hw);
3181
3182 if (cause & TP)
3183 csio_tp_intr_handler(hw);
3184
3185 if (cause & MA)
3186 csio_ma_intr_handler(hw);
3187
3188 if (cause & PM_TX)
3189 csio_pmtx_intr_handler(hw);
3190
3191 if (cause & PM_RX)
3192 csio_pmrx_intr_handler(hw);
3193
3194 if (cause & ULP_RX)
3195 csio_ulprx_intr_handler(hw);
3196
3197 if (cause & CPL_SWITCH)
3198 csio_cplsw_intr_handler(hw);
3199
3200 if (cause & SGE)
3201 csio_sge_intr_handler(hw);
3202
3203 if (cause & ULP_TX)
3204 csio_ulptx_intr_handler(hw);
3205
3206 /* Clear the interrupts just processed for which we are the master. */
3207 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
3208 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
3209
3210 return 1;
3211}
3212
3213/*****************************************************************************
3214 * HW <--> mailbox interfacing routines.
3215 ****************************************************************************/
3216/*
3217 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3218 *
3219 * @data: Private data pointer.
3220 *
3221 * Called from worker thread context.
3222 */
3223static void
3224csio_mberr_worker(void *data)
3225{
3226 struct csio_hw *hw = (struct csio_hw *)data;
3227 struct csio_mbm *mbm = &hw->mbm;
3228 LIST_HEAD(cbfn_q);
3229 struct csio_mb *mbp_next;
3230 int rv;
3231
3232 del_timer_sync(&mbm->timer);
3233
3234 spin_lock_irq(&hw->lock);
3235 if (list_empty(&mbm->cbfn_q)) {
3236 spin_unlock_irq(&hw->lock);
3237 return;
3238 }
3239
3240 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3241 mbm->stats.n_cbfnq = 0;
3242
3243 /* Try to start waiting mailboxes */
3244 if (!list_empty(&mbm->req_q)) {
3245 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3246 list_del_init(&mbp_next->list);
3247
3248 rv = csio_mb_issue(hw, mbp_next);
3249 if (rv != 0)
3250 list_add_tail(&mbp_next->list, &mbm->req_q);
3251 else
3252 CSIO_DEC_STATS(mbm, n_activeq);
3253 }
3254 spin_unlock_irq(&hw->lock);
3255
3256 /* Now callback completions */
3257 csio_mb_completions(hw, &cbfn_q);
3258}
3259
3260/*
3261 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3262 *
3263 * @data: private data pointer
3264 *
3265 **/
3266static void
3267csio_hw_mb_timer(uintptr_t data)
3268{
3269 struct csio_hw *hw = (struct csio_hw *)data;
3270 struct csio_mb *mbp = NULL;
3271
3272 spin_lock_irq(&hw->lock);
3273 mbp = csio_mb_tmo_handler(hw);
3274 spin_unlock_irq(&hw->lock);
3275
3276 /* Call back the function for the timed-out Mailbox */
3277 if (mbp)
3278 mbp->mb_cbfn(hw, mbp);
3279
3280}
3281
3282/*
3283 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3284 * @hw: HW module
3285 *
3286 * Called with lock held, should exit with lock held.
3287 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3288 * into a local queue. Drops lock and calls the completions. Holds
3289 * lock and returns.
3290 */
3291static void
3292csio_hw_mbm_cleanup(struct csio_hw *hw)
3293{
3294 LIST_HEAD(cbfn_q);
3295
3296 csio_mb_cancel_all(hw, &cbfn_q);
3297
3298 spin_unlock_irq(&hw->lock);
3299 csio_mb_completions(hw, &cbfn_q);
3300 spin_lock_irq(&hw->lock);
3301}
3302
3303/*****************************************************************************
3304 * Event handling
3305 ****************************************************************************/
3306int
3307csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3308 uint16_t len)
3309{
3310 struct csio_evt_msg *evt_entry = NULL;
3311
3312 if (type >= CSIO_EVT_MAX)
3313 return -EINVAL;
3314
3315 if (len > CSIO_EVT_MSG_SIZE)
3316 return -EINVAL;
3317
3318 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3319 return -EINVAL;
3320
3321 if (list_empty(&hw->evt_free_q)) {
3322 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3323 type, len);
3324 return -ENOMEM;
3325 }
3326
3327 evt_entry = list_first_entry(&hw->evt_free_q,
3328 struct csio_evt_msg, list);
3329 list_del_init(&evt_entry->list);
3330
3331 /* copy event msg and queue the event */
3332 evt_entry->type = type;
3333 memcpy((void *)evt_entry->data, evt_msg, len);
3334 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3335
3336 CSIO_DEC_STATS(hw, n_evt_freeq);
3337 CSIO_INC_STATS(hw, n_evt_activeq);
3338
3339 return 0;
3340}
3341
3342static int
3343csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3344 uint16_t len, bool msg_sg)
3345{
3346 struct csio_evt_msg *evt_entry = NULL;
3347 struct csio_fl_dma_buf *fl_sg;
3348 uint32_t off = 0;
3349 unsigned long flags;
3350 int n, ret = 0;
3351
3352 if (type >= CSIO_EVT_MAX)
3353 return -EINVAL;
3354
3355 if (len > CSIO_EVT_MSG_SIZE)
3356 return -EINVAL;
3357
3358 spin_lock_irqsave(&hw->lock, flags);
3359 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3360 ret = -EINVAL;
3361 goto out;
3362 }
3363
3364 if (list_empty(&hw->evt_free_q)) {
3365 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3366 type, len);
3367 ret = -ENOMEM;
3368 goto out;
3369 }
3370
3371 evt_entry = list_first_entry(&hw->evt_free_q,
3372 struct csio_evt_msg, list);
3373 list_del_init(&evt_entry->list);
3374
3375 /* copy event msg and queue the event */
3376 evt_entry->type = type;
3377
3378 /* If Payload in SG list*/
3379 if (msg_sg) {
3380 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3381 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3382 memcpy((void *)((uintptr_t)evt_entry->data + off),
3383 fl_sg->flbufs[n].vaddr,
3384 fl_sg->flbufs[n].len);
3385 off += fl_sg->flbufs[n].len;
3386 }
3387 } else
3388 memcpy((void *)evt_entry->data, evt_msg, len);
3389
3390 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3391 CSIO_DEC_STATS(hw, n_evt_freeq);
3392 CSIO_INC_STATS(hw, n_evt_activeq);
3393out:
3394 spin_unlock_irqrestore(&hw->lock, flags);
3395 return ret;
3396}
3397
3398static void
3399csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3400{
3401 if (evt_entry) {
3402 spin_lock_irq(&hw->lock);
3403 list_del_init(&evt_entry->list);
3404 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3405 CSIO_DEC_STATS(hw, n_evt_activeq);
3406 CSIO_INC_STATS(hw, n_evt_freeq);
3407 spin_unlock_irq(&hw->lock);
3408 }
3409}
3410
3411void
3412csio_evtq_flush(struct csio_hw *hw)
3413{
3414 uint32_t count;
3415 count = 30;
3416 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3417 spin_unlock_irq(&hw->lock);
3418 msleep(2000);
3419 spin_lock_irq(&hw->lock);
3420 }
3421
3422 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3423}
3424
3425static void
3426csio_evtq_stop(struct csio_hw *hw)
3427{
3428 hw->flags |= CSIO_HWF_FWEVT_STOP;
3429}
3430
3431static void
3432csio_evtq_start(struct csio_hw *hw)
3433{
3434 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3435}
3436
3437static void
3438csio_evtq_cleanup(struct csio_hw *hw)
3439{
3440 struct list_head *evt_entry, *next_entry;
3441
3442 /* Release outstanding events from activeq to freeq*/
3443 if (!list_empty(&hw->evt_active_q))
3444 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3445
3446 hw->stats.n_evt_activeq = 0;
3447 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3448
3449 /* Freeup event entry */
3450 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3451 kfree(evt_entry);
3452 CSIO_DEC_STATS(hw, n_evt_freeq);
3453 }
3454
3455 hw->stats.n_evt_freeq = 0;
3456}
3457
3458
3459static void
3460csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3461 struct csio_fl_dma_buf *flb, void *priv)
3462{
3463 __u8 op;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303464 void *msg = NULL;
3465 uint32_t msg_len = 0;
3466 bool msg_sg = 0;
3467
3468 op = ((struct rss_header *) wr)->opcode;
3469 if (op == CPL_FW6_PLD) {
3470 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3471 if (!flb || !flb->totlen) {
3472 CSIO_INC_STATS(hw, n_cpl_unexp);
3473 return;
3474 }
3475
3476 msg = (void *) flb;
3477 msg_len = flb->totlen;
3478 msg_sg = 1;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303479 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3480
3481 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3482 /* skip RSS header */
3483 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3484 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3485 sizeof(struct cpl_fw4_msg);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303486 } else {
3487 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3488 CSIO_INC_STATS(hw, n_cpl_unexp);
3489 return;
3490 }
3491
3492 /*
3493 * Enqueue event to EventQ. Events processing happens
3494 * in Event worker thread context
3495 */
3496 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3497 (uint16_t)msg_len, msg_sg))
3498 CSIO_INC_STATS(hw, n_evt_drop);
3499}
3500
3501void
3502csio_evtq_worker(struct work_struct *work)
3503{
3504 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3505 struct list_head *evt_entry, *next_entry;
3506 LIST_HEAD(evt_q);
3507 struct csio_evt_msg *evt_msg;
3508 struct cpl_fw6_msg *msg;
3509 struct csio_rnode *rn;
3510 int rv = 0;
3511 uint8_t evtq_stop = 0;
3512
3513 csio_dbg(hw, "event worker thread active evts#%d\n",
3514 hw->stats.n_evt_activeq);
3515
3516 spin_lock_irq(&hw->lock);
3517 while (!list_empty(&hw->evt_active_q)) {
3518 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3519 spin_unlock_irq(&hw->lock);
3520
3521 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3522 evt_msg = (struct csio_evt_msg *) evt_entry;
3523
3524 /* Drop events if queue is STOPPED */
3525 spin_lock_irq(&hw->lock);
3526 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3527 evtq_stop = 1;
3528 spin_unlock_irq(&hw->lock);
3529 if (evtq_stop) {
3530 CSIO_INC_STATS(hw, n_evt_drop);
3531 goto free_evt;
3532 }
3533
3534 switch (evt_msg->type) {
3535 case CSIO_EVT_FW:
3536 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3537
3538 if ((msg->opcode == CPL_FW6_MSG ||
3539 msg->opcode == CPL_FW4_MSG) &&
3540 !msg->type) {
3541 rv = csio_mb_fwevt_handler(hw,
3542 msg->data);
3543 if (!rv)
3544 break;
3545 /* Handle any remaining fw events */
3546 csio_fcoe_fwevt_handler(hw,
3547 msg->opcode, msg->data);
3548 } else if (msg->opcode == CPL_FW6_PLD) {
3549
3550 csio_fcoe_fwevt_handler(hw,
3551 msg->opcode, msg->data);
3552 } else {
3553 csio_warn(hw,
3554 "Unhandled FW msg op %x type %x\n",
3555 msg->opcode, msg->type);
3556 CSIO_INC_STATS(hw, n_evt_drop);
3557 }
3558 break;
3559
3560 case CSIO_EVT_MBX:
3561 csio_mberr_worker(hw);
3562 break;
3563
3564 case CSIO_EVT_DEV_LOSS:
3565 memcpy(&rn, evt_msg->data, sizeof(rn));
3566 csio_rnode_devloss_handler(rn);
3567 break;
3568
3569 default:
3570 csio_warn(hw, "Unhandled event %x on evtq\n",
3571 evt_msg->type);
3572 CSIO_INC_STATS(hw, n_evt_unexp);
3573 break;
3574 }
3575free_evt:
3576 csio_free_evt(hw, evt_msg);
3577 }
3578
3579 spin_lock_irq(&hw->lock);
3580 }
3581 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3582 spin_unlock_irq(&hw->lock);
3583}
3584
3585int
3586csio_fwevtq_handler(struct csio_hw *hw)
3587{
3588 int rv;
3589
3590 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
3591 CSIO_INC_STATS(hw, n_int_stray);
3592 return -EINVAL;
3593 }
3594
3595 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
3596 csio_process_fwevtq_entry, NULL);
3597 return rv;
3598}
3599
3600/****************************************************************************
3601 * Entry points
3602 ****************************************************************************/
3603
3604/* Management module */
3605/*
3606 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3607 * mgmt - mgmt module
3608 * @io_req - io request
3609 *
3610 * Return - 0:if given IO Req exists in active Q.
3611 * -EINVAL :if lookup fails.
3612 */
3613int
3614csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
3615{
3616 struct list_head *tmp;
3617
3618 /* Lookup ioreq in the ACTIVEQ */
3619 list_for_each(tmp, &mgmtm->active_q) {
3620 if (io_req == (struct csio_ioreq *)tmp)
3621 return 0;
3622 }
3623 return -EINVAL;
3624}
3625
3626#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3627
3628/*
3629 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3630 * @data - Event data.
3631 *
3632 * Return - none.
3633 */
3634static void
3635csio_mgmt_tmo_handler(uintptr_t data)
3636{
3637 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
3638 struct list_head *tmp;
3639 struct csio_ioreq *io_req;
3640
3641 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
3642
3643 spin_lock_irq(&mgmtm->hw->lock);
3644
3645 list_for_each(tmp, &mgmtm->active_q) {
3646 io_req = (struct csio_ioreq *) tmp;
3647 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
3648
3649 if (!io_req->tmo) {
3650 /* Dequeue the request from retry Q. */
3651 tmp = csio_list_prev(tmp);
3652 list_del_init(&io_req->sm.sm_list);
3653 if (io_req->io_cbfn) {
3654 /* io_req will be freed by completion handler */
3655 io_req->wr_status = -ETIMEDOUT;
3656 io_req->io_cbfn(mgmtm->hw, io_req);
3657 } else {
3658 CSIO_DB_ASSERT(0);
3659 }
3660 }
3661 }
3662
3663 /* If retry queue is not empty, re-arm timer */
3664 if (!list_empty(&mgmtm->active_q))
3665 mod_timer(&mgmtm->mgmt_timer,
3666 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
3667 spin_unlock_irq(&mgmtm->hw->lock);
3668}
3669
3670static void
3671csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
3672{
3673 struct csio_hw *hw = mgmtm->hw;
3674 struct csio_ioreq *io_req;
3675 struct list_head *tmp;
3676 uint32_t count;
3677
3678 count = 30;
3679 /* Wait for all outstanding req to complete gracefully */
3680 while ((!list_empty(&mgmtm->active_q)) && count--) {
3681 spin_unlock_irq(&hw->lock);
3682 msleep(2000);
3683 spin_lock_irq(&hw->lock);
3684 }
3685
3686 /* release outstanding req from ACTIVEQ */
3687 list_for_each(tmp, &mgmtm->active_q) {
3688 io_req = (struct csio_ioreq *) tmp;
3689 tmp = csio_list_prev(tmp);
3690 list_del_init(&io_req->sm.sm_list);
3691 mgmtm->stats.n_active--;
3692 if (io_req->io_cbfn) {
3693 /* io_req will be freed by completion handler */
3694 io_req->wr_status = -ETIMEDOUT;
3695 io_req->io_cbfn(mgmtm->hw, io_req);
3696 }
3697 }
3698}
3699
3700/*
3701 * csio_mgmt_init - Mgmt module init entry point
3702 * @mgmtsm - mgmt module
3703 * @hw - HW module
3704 *
3705 * Initialize mgmt timer, resource wait queue, active queue,
3706 * completion q. Allocate Egress and Ingress
3707 * WR queues and save off the queue index returned by the WR
3708 * module for future use. Allocate and save off mgmt reqs in the
3709 * mgmt_req_freelist for future use. Make sure their SM is initialized
3710 * to uninit state.
3711 * Returns: 0 - on success
3712 * -ENOMEM - on error.
3713 */
3714static int
3715csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
3716{
3717 struct timer_list *timer = &mgmtm->mgmt_timer;
3718
3719 init_timer(timer);
3720 timer->function = csio_mgmt_tmo_handler;
3721 timer->data = (unsigned long)mgmtm;
3722
3723 INIT_LIST_HEAD(&mgmtm->active_q);
3724 INIT_LIST_HEAD(&mgmtm->cbfn_q);
3725
3726 mgmtm->hw = hw;
3727 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3728
3729 return 0;
3730}
3731
3732/*
3733 * csio_mgmtm_exit - MGMT module exit entry point
3734 * @mgmtsm - mgmt module
3735 *
3736 * This function called during MGMT module uninit.
3737 * Stop timers, free ioreqs allocated.
3738 * Returns: None
3739 *
3740 */
3741static void
3742csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
3743{
3744 del_timer_sync(&mgmtm->mgmt_timer);
3745}
3746
3747
3748/**
3749 * csio_hw_start - Kicks off the HW State machine
3750 * @hw: Pointer to HW module.
3751 *
3752 * It is assumed that the initialization is a synchronous operation.
3753 * So when we return afer posting the event, the HW SM should be in
3754 * the ready state, if there were no errors during init.
3755 */
3756int
3757csio_hw_start(struct csio_hw *hw)
3758{
3759 spin_lock_irq(&hw->lock);
3760 csio_post_event(&hw->sm, CSIO_HWE_CFG);
3761 spin_unlock_irq(&hw->lock);
3762
3763 if (csio_is_hw_ready(hw))
3764 return 0;
3765 else
3766 return -EINVAL;
3767}
3768
3769int
3770csio_hw_stop(struct csio_hw *hw)
3771{
3772 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
3773
3774 if (csio_is_hw_removing(hw))
3775 return 0;
3776 else
3777 return -EINVAL;
3778}
3779
3780/* Max reset retries */
3781#define CSIO_MAX_RESET_RETRIES 3
3782
3783/**
3784 * csio_hw_reset - Reset the hardware
3785 * @hw: HW module.
3786 *
3787 * Caller should hold lock across this function.
3788 */
3789int
3790csio_hw_reset(struct csio_hw *hw)
3791{
3792 if (!csio_is_hw_master(hw))
3793 return -EPERM;
3794
3795 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
3796 csio_dbg(hw, "Max hw reset attempts reached..");
3797 return -EINVAL;
3798 }
3799
3800 hw->rst_retries++;
3801 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
3802
3803 if (csio_is_hw_ready(hw)) {
3804 hw->rst_retries = 0;
3805 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
3806 return 0;
3807 } else
3808 return -EINVAL;
3809}
3810
3811/*
3812 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3813 * @hw: HW module.
3814 */
3815static void
3816csio_hw_get_device_id(struct csio_hw *hw)
3817{
3818 /* Is the adapter device id cached already ?*/
3819 if (csio_is_dev_id_cached(hw))
3820 return;
3821
3822 /* Get the PCI vendor & device id */
3823 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
3824 &hw->params.pci.vendor_id);
3825 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
3826 &hw->params.pci.device_id);
3827
3828 csio_dev_id_cached(hw);
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003829 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303830
3831} /* csio_hw_get_device_id */
3832
3833/*
3834 * csio_hw_set_description - Set the model, description of the hw.
3835 * @hw: HW module.
3836 * @ven_id: PCI Vendor ID
3837 * @dev_id: PCI Device ID
3838 */
3839static void
3840csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
3841{
3842 uint32_t adap_type, prot_type;
3843
3844 if (ven_id == CSIO_VENDOR_ID) {
3845 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
3846 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
3847
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003848 if (prot_type == CSIO_T4_FCOE_ASIC) {
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303849 memcpy(hw->hw_ver,
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003850 csio_t4_fcoe_adapters[adap_type].model_no, 16);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303851 memcpy(hw->model_desc,
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003852 csio_t4_fcoe_adapters[adap_type].description,
3853 32);
3854 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
3855 memcpy(hw->hw_ver,
3856 csio_t5_fcoe_adapters[adap_type].model_no, 16);
3857 memcpy(hw->model_desc,
3858 csio_t5_fcoe_adapters[adap_type].description,
3859 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303860 } else {
3861 char tempName[32] = "Chelsio FCoE Controller";
3862 memcpy(hw->model_desc, tempName, 32);
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303863 }
3864 }
3865} /* csio_hw_set_description */
3866
3867/**
3868 * csio_hw_init - Initialize HW module.
3869 * @hw: Pointer to HW module.
3870 *
3871 * Initialize the members of the HW module.
3872 */
3873int
3874csio_hw_init(struct csio_hw *hw)
3875{
3876 int rv = -EINVAL;
3877 uint32_t i;
3878 uint16_t ven_id, dev_id;
3879 struct csio_evt_msg *evt_entry;
3880
3881 INIT_LIST_HEAD(&hw->sm.sm_list);
3882 csio_init_state(&hw->sm, csio_hws_uninit);
3883 spin_lock_init(&hw->lock);
3884 INIT_LIST_HEAD(&hw->sln_head);
3885
3886 /* Get the PCI vendor & device id */
3887 csio_hw_get_device_id(hw);
3888
3889 strcpy(hw->name, CSIO_HW_NAME);
3890
Arvind Bhushan7cc16382013-03-14 05:09:08 +00003891 /* Initialize the HW chip ops with T4/T5 specific ops */
3892 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3893
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05303894 /* Set the model & its description */
3895
3896 ven_id = hw->params.pci.vendor_id;
3897 dev_id = hw->params.pci.device_id;
3898
3899 csio_hw_set_description(hw, ven_id, dev_id);
3900
3901 /* Initialize default log level */
3902 hw->params.log_level = (uint32_t) csio_dbg_level;
3903
3904 csio_set_fwevt_intr_idx(hw, -1);
3905 csio_set_nondata_intr_idx(hw, -1);
3906
3907 /* Init all the modules: Mailbox, WorkRequest and Transport */
3908 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
3909 goto err;
3910
3911 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
3912 if (rv)
3913 goto err_mbm_exit;
3914
3915 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
3916 if (rv)
3917 goto err_wrm_exit;
3918
3919 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
3920 if (rv)
3921 goto err_scsim_exit;
3922 /* Pre-allocate evtq and initialize them */
3923 INIT_LIST_HEAD(&hw->evt_active_q);
3924 INIT_LIST_HEAD(&hw->evt_free_q);
3925 for (i = 0; i < csio_evtq_sz; i++) {
3926
3927 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3928 if (!evt_entry) {
3929 csio_err(hw, "Failed to initialize eventq");
3930 goto err_evtq_cleanup;
3931 }
3932
3933 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3934 CSIO_INC_STATS(hw, n_evt_freeq);
3935 }
3936
3937 hw->dev_num = dev_num;
3938 dev_num++;
3939
3940 return 0;
3941
3942err_evtq_cleanup:
3943 csio_evtq_cleanup(hw);
3944 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3945err_scsim_exit:
3946 csio_scsim_exit(csio_hw_to_scsim(hw));
3947err_wrm_exit:
3948 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3949err_mbm_exit:
3950 csio_mbm_exit(csio_hw_to_mbm(hw));
3951err:
3952 return rv;
3953}
3954
3955/**
3956 * csio_hw_exit - Un-initialize HW module.
3957 * @hw: Pointer to HW module.
3958 *
3959 */
3960void
3961csio_hw_exit(struct csio_hw *hw)
3962{
3963 csio_evtq_cleanup(hw);
3964 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3965 csio_scsim_exit(csio_hw_to_scsim(hw));
3966 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3967 csio_mbm_exit(csio_hw_to_mbm(hw));
3968}