blob: 5d59e2630ce614c94fb641d2785132234e863921 [file] [log] [blame]
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301/**
Jitendra Bhivare60f36e02016-08-19 15:20:24 +05302 * Copyright (C) 2005 - 2016 Broadcom
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Jitendra Bhivare60f36e02016-08-19 15:20:24 +053011 * linux-drivers@broadcom.com
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053012 *
Ketan Mukadamc4f39bd2015-07-04 04:12:33 +053013 * Emulex
Jayamohan Kallickal255fa9a2011-03-25 14:23:57 -070014 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053016 */
17
John Soni Jose21771992012-04-03 23:41:49 -050018#include <scsi/iscsi_proto.h>
19
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -070020#include "be_main.h"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053021#include "be.h"
22#include "be_mgmt.h"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053023
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +053024/* UE Status Low CSR */
25static const char * const desc_ue_status_low[] = {
26 "CEV",
27 "CTX",
28 "DBUF",
29 "ERX",
30 "Host",
31 "MPU",
32 "NDMA",
33 "PTC ",
34 "RDMA ",
35 "RXF ",
36 "RXIPS ",
37 "RXULP0 ",
38 "RXULP1 ",
39 "RXULP2 ",
40 "TIM ",
41 "TPOST ",
42 "TPRE ",
43 "TXIPS ",
44 "TXULP0 ",
45 "TXULP1 ",
46 "UC ",
47 "WDMA ",
48 "TXULP2 ",
49 "HOST1 ",
50 "P0_OB_LINK ",
51 "P1_OB_LINK ",
52 "HOST_GPIO ",
53 "MBOX ",
54 "AXGMAC0",
55 "AXGMAC1",
56 "JTAG",
57 "MPU_INTPEND"
58};
59
60/* UE Status High CSR */
61static const char * const desc_ue_status_hi[] = {
62 "LPCMEMHOST",
63 "MGMT_MAC",
64 "PCS0ONLINE",
65 "MPU_IRAM",
66 "PCS1ONLINE",
67 "PCTL0",
68 "PCTL1",
69 "PMEM",
70 "RR",
71 "TXPB",
72 "RXPP",
73 "XAUI",
74 "TXP",
75 "ARM",
76 "IPC",
77 "HOST2",
78 "HOST3",
79 "HOST4",
80 "HOST5",
81 "HOST6",
82 "HOST7",
83 "HOST8",
84 "HOST9",
85 "NETC",
86 "Unknown",
87 "Unknown",
88 "Unknown",
89 "Unknown",
90 "Unknown",
91 "Unknown",
92 "Unknown",
93 "Unknown"
94};
95
Jitendra Bhivare090e2182016-02-04 15:49:17 +053096struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
97 unsigned int *ref_tag)
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +053098{
Jitendra Bhivare090e2182016-02-04 15:49:17 +053099 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
100 struct be_mcc_wrb *wrb = NULL;
101 unsigned int tag;
102
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530103 spin_lock(&phba->ctrl.mcc_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530104 if (mccq->used == mccq->len) {
105 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
106 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
107 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
108 mccq->used, phba->ctrl.mcc_tag_available);
109 goto alloc_failed;
110 }
111
112 if (!phba->ctrl.mcc_tag_available)
113 goto alloc_failed;
114
115 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
116 if (!tag) {
117 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
118 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
119 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
120 phba->ctrl.mcc_tag_available,
121 phba->ctrl.mcc_alloc_index);
122 goto alloc_failed;
123 }
124
125 /* return this tag for further reference */
126 *ref_tag = tag;
127 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
128 phba->ctrl.mcc_tag_status[tag] = 0;
129 phba->ctrl.ptag_state[tag].tag_state = 0;
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530130 phba->ctrl.ptag_state[tag].cbfn = NULL;
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530131 phba->ctrl.mcc_tag_available--;
132 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
133 phba->ctrl.mcc_alloc_index = 0;
134 else
135 phba->ctrl.mcc_alloc_index++;
136
137 wrb = queue_head_node(mccq);
138 memset(wrb, 0, sizeof(*wrb));
139 wrb->tag0 = tag;
140 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
141 queue_head_inc(mccq);
142 mccq->used++;
143
144alloc_failed:
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530145 spin_unlock(&phba->ctrl.mcc_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530146 return wrb;
147}
148
149void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
150{
151 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
152
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530153 spin_lock(&ctrl->mcc_lock);
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530154 tag = tag & MCC_Q_CMD_TAG_MASK;
155 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
156 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
157 ctrl->mcc_free_index = 0;
158 else
159 ctrl->mcc_free_index++;
160 ctrl->mcc_tag_available++;
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530161 mccq->used--;
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530162 spin_unlock(&ctrl->mcc_lock);
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530163}
164
John Soni Josee175def2012-10-20 04:45:40 +0530165/*
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530166 * beiscsi_mcc_compl_status - Return the status of MCC completion
167 * @phba: Driver private structure
168 * @tag: Tag for the MBX Command
169 * @wrb: the WRB used for the MBX Command
170 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
171 *
172 * return
173 * Success: 0
174 * Failure: Non-Zero
175 */
176int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
177 unsigned int tag,
178 struct be_mcc_wrb **wrb,
179 struct be_dma_mem *mbx_cmd_mem)
180{
181 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
182 uint16_t status = 0, addl_status = 0, wrb_num = 0;
183 struct be_cmd_resp_hdr *mbx_resp_hdr;
184 struct be_cmd_req_hdr *mbx_hdr;
185 struct be_mcc_wrb *temp_wrb;
186 uint32_t mcc_tag_status;
187 int rc = 0;
188
189 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
190 status = (mcc_tag_status & CQE_STATUS_MASK);
191 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
192 CQE_STATUS_ADDL_SHIFT);
193
194 if (mbx_cmd_mem) {
195 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
196 } else {
197 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
198 CQE_STATUS_WRB_SHIFT;
199 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
200 mbx_hdr = embedded_payload(temp_wrb);
201
202 if (wrb)
203 *wrb = temp_wrb;
204 }
205
206 if (status || addl_status) {
207 beiscsi_log(phba, KERN_WARNING,
208 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
209 BEISCSI_LOG_CONFIG,
210 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
211 mbx_hdr->subsystem, mbx_hdr->opcode,
212 status, addl_status);
213 rc = -EIO;
214 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
215 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
216 beiscsi_log(phba, KERN_WARNING,
217 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
218 BEISCSI_LOG_CONFIG,
219 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
220 mbx_resp_hdr->response_length,
221 mbx_resp_hdr->actual_resp_len);
222 rc = -EAGAIN;
223 }
224 }
225
226 return rc;
227}
228
229/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530230 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
John Soni Josee175def2012-10-20 04:45:40 +0530231 * @phba: Driver private structure
232 * @tag: Tag for the MBX Command
233 * @wrb: the WRB used for the MBX Command
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500234 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
John Soni Josee175def2012-10-20 04:45:40 +0530235 *
236 * Waits for MBX completion with the passed TAG.
237 *
238 * return
239 * Success: 0
240 * Failure: Non-Zero
241 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530242int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530243 unsigned int tag,
244 struct be_mcc_wrb **wrb,
Jitendra Bhivare88840332016-02-04 15:49:12 +0530245 struct be_dma_mem *mbx_cmd_mem)
John Soni Josee175def2012-10-20 04:45:40 +0530246{
247 int rc = 0;
John Soni Josee175def2012-10-20 04:45:40 +0530248
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530249 if (beiscsi_hba_in_error(phba)) {
250 clear_bit(MCC_TAG_STATE_RUNNING,
251 &phba->ctrl.ptag_state[tag].tag_state);
252 return -EIO;
253 }
John Soni Jose7a158002012-10-20 04:45:51 +0530254
John Soni Josee175def2012-10-20 04:45:40 +0530255 /* wait for the mccq completion */
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530256 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
257 phba->ctrl.mcc_tag_status[tag],
258 msecs_to_jiffies(
259 BEISCSI_HOST_MBX_TIMEOUT));
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +0530260 /**
261 * Return EIO if port is being disabled. Associated DMA memory, if any,
262 * is freed by the caller. When port goes offline, MCCQ is cleaned up
263 * so does WRB.
264 */
265 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
266 clear_bit(MCC_TAG_STATE_RUNNING,
267 &phba->ctrl.ptag_state[tag].tag_state);
268 return -EIO;
269 }
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530270
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530271 /**
272 * If MBOX cmd timeout expired, tag and resource allocated
273 * for cmd is not freed until FW returns completion.
274 */
John Soni Josee175def2012-10-20 04:45:40 +0530275 if (rc <= 0) {
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500276 struct be_dma_mem *tag_mem;
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500277
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530278 /**
279 * PCI/DMA memory allocated and posted in non-embedded mode
280 * will have mbx_cmd_mem != NULL.
281 * Save virtual and bus addresses for the command so that it
282 * can be freed later.
283 **/
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500284 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
285 if (mbx_cmd_mem) {
286 tag_mem->size = mbx_cmd_mem->size;
287 tag_mem->va = mbx_cmd_mem->va;
288 tag_mem->dma = mbx_cmd_mem->dma;
289 } else
290 tag_mem->size = 0;
291
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530292 /* first make tag_mem_state visible to all */
293 wmb();
294 set_bit(MCC_TAG_STATE_TIMEOUT,
295 &phba->ctrl.ptag_state[tag].tag_state);
296
John Soni Josee175def2012-10-20 04:45:40 +0530297 beiscsi_log(phba, KERN_ERR,
298 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
299 BEISCSI_LOG_CONFIG,
300 "BC_%d : MBX Cmd Completion timed out\n");
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500301 return -EBUSY;
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500302 }
John Soni Josee175def2012-10-20 04:45:40 +0530303
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530304 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
John Soni Josee175def2012-10-20 04:45:40 +0530305
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530306 free_mcc_wrb(&phba->ctrl, tag);
John Soni Josee175def2012-10-20 04:45:40 +0530307 return rc;
308}
309
John Soni Josee175def2012-10-20 04:45:40 +0530310/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530311 * beiscsi_process_mbox_compl()- Check the MBX completion status
John Soni Josee175def2012-10-20 04:45:40 +0530312 * @ctrl: Function specific MBX data structure
313 * @compl: Completion status of MBX Command
314 *
315 * Check for the MBX completion status when BMBX method used
316 *
317 * return
318 * Success: Zero
319 * Failure: Non-Zero
320 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530321static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
322 struct be_mcc_compl *compl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530323{
John Soni Josee175def2012-10-20 04:45:40 +0530324 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530325 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
John Soni Josee175def2012-10-20 04:45:40 +0530326 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
Jitendra Bhivare66940952016-08-19 15:20:14 +0530327 u16 compl_status, extd_status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530328
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530329 /**
330 * To check if valid bit is set, check the entire word as we don't know
331 * the endianness of the data (old entry is host endian while a new
332 * entry is little endian)
333 */
334 if (!compl->flags) {
John Soni Jose99bc5d52012-08-20 23:00:18 +0530335 beiscsi_log(phba, KERN_ERR,
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530336 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
337 "BC_%d : BMBX busy, no completion\n");
338 return -EBUSY;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530339 }
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530340 compl->flags = le32_to_cpu(compl->flags);
341 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
342
343 /**
344 * Just swap the status to host endian;
345 * mcc tag is opaquely copied from mcc_wrb.
346 */
347 be_dws_le_to_cpu(compl, 4);
348 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
349 CQE_STATUS_COMPL_MASK;
350 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
351 CQE_STATUS_EXTD_MASK;
352 /* Need to reset the entire word that houses the valid bit */
353 compl->flags = 0;
354
355 if (compl_status == MCC_STATUS_SUCCESS)
356 return 0;
357
358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
359 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
360 hdr->subsystem, hdr->opcode, compl_status, extd_status);
Jitendra Bhivare66940952016-08-19 15:20:14 +0530361 return compl_status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530362}
363
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530364static void beiscsi_process_async_link(struct beiscsi_hba *phba,
365 struct be_mcc_compl *compl)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530366{
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530367 struct be_async_event_link_state *evt;
368
369 evt = (struct be_async_event_link_state *)compl;
370
Jitendra Bhivare048084c2016-01-20 14:10:58 +0530371 phba->port_speed = evt->port_speed;
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530372 /**
373 * Check logical link status in ASYNC event.
374 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
375 **/
376 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530377 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
378 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
379 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530380 __beiscsi_log(phba, KERN_ERR,
381 "BC_%d : Link Up on Port %d tag 0x%x\n",
382 evt->physical_port, evt->event_tag);
383 } else {
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530384 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530385 __beiscsi_log(phba, KERN_ERR,
386 "BC_%d : Link Down on Port %d tag 0x%x\n",
387 evt->physical_port, evt->event_tag);
388 iscsi_host_for_each_session(phba->shost,
Jitendra Bhivare480195c2016-08-19 15:20:15 +0530389 beiscsi_session_fail);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530390 }
391}
392
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530393static char *beiscsi_port_misconf_event_msg[] = {
394 "Physical Link is functional.",
395 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
396 "Optics of two types installed - Remove one optic or install matching pair of optics.",
397 "Incompatible optics - Replace with compatible optics for card to function.",
398 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
399 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
400};
401
402static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
403 struct be_mcc_compl *compl)
404{
405 struct be_async_event_sli *async_sli;
406 u8 evt_type, state, old_state, le;
407 char *sev = KERN_WARNING;
408 char *msg = NULL;
409
410 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
411 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
412
413 /* processing only MISCONFIGURED physical port event */
414 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
415 return;
416
417 async_sli = (struct be_async_event_sli *)compl;
418 state = async_sli->event_data1 >>
419 (phba->fw_config.phys_port * 8) & 0xff;
420 le = async_sli->event_data2 >>
421 (phba->fw_config.phys_port * 8) & 0xff;
422
423 old_state = phba->optic_state;
424 phba->optic_state = state;
425
426 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
427 /* fw is reporting a state we don't know, log and return */
428 __beiscsi_log(phba, KERN_ERR,
429 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
430 phba->port_name, async_sli->event_data1);
431 return;
432 }
433
434 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
435 /* log link effect for unqualified-4, uncertified-5 optics */
436 if (state > 3)
437 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
438 " Link is non-operational." :
439 " Link is operational.";
440 /* 1 - info */
441 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
442 sev = KERN_INFO;
443 /* 2 - error */
444 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
445 sev = KERN_ERR;
446 }
447
448 if (old_state != phba->optic_state)
449 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
450 phba->port_name,
451 beiscsi_port_misconf_event_msg[state],
452 !msg ? "" : msg);
453}
454
455void beiscsi_process_async_event(struct beiscsi_hba *phba,
456 struct be_mcc_compl *compl)
457{
458 char *sev = KERN_INFO;
459 u8 evt_code;
460
461 /* interpret flags as an async trailer */
462 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
463 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
464 switch (evt_code) {
465 case ASYNC_EVENT_CODE_LINK_STATE:
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530466 beiscsi_process_async_link(phba, compl);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530467 break;
468 case ASYNC_EVENT_CODE_ISCSI:
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530469 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
470 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530471 sev = KERN_ERR;
472 break;
473 case ASYNC_EVENT_CODE_SLI:
474 beiscsi_process_async_sli(phba, compl);
475 break;
476 default:
477 /* event not registered */
478 sev = KERN_ERR;
479 }
480
481 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530482 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
483 evt_code, compl->status, compl->flags);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530484}
485
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530486int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
487 struct be_mcc_compl *compl)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530488{
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530489 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
490 u16 compl_status, extd_status;
491 struct be_dma_mem *tag_mem;
492 unsigned int tag, wrb_idx;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530493
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530494 be_dws_le_to_cpu(compl, 4);
495 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
496 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
497
498 if (!test_bit(MCC_TAG_STATE_RUNNING,
499 &ctrl->ptag_state[tag].tag_state)) {
500 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
501 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
502 "BC_%d : MBX cmd completed but not posted\n");
503 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530504 }
505
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530506 /* end MCC with this tag */
507 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
508
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530509 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
510 beiscsi_log(phba, KERN_WARNING,
511 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
512 BEISCSI_LOG_CONFIG,
513 "BC_%d : MBX Completion for timeout Command from FW\n");
514 /**
515 * Check for the size before freeing resource.
516 * Only for non-embedded cmd, PCI resource is allocated.
517 **/
518 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530519 if (tag_mem->size) {
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530520 pci_free_consistent(ctrl->pdev, tag_mem->size,
521 tag_mem->va, tag_mem->dma);
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530522 tag_mem->size = 0;
523 }
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530524 free_mcc_wrb(ctrl, tag);
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530525 return 0;
526 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530527
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530528 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
529 CQE_STATUS_COMPL_MASK;
530 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
531 CQE_STATUS_EXTD_MASK;
532 /* The ctrl.mcc_tag_status[tag] is filled with
533 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
534 * [7:0] = compl_status
535 */
536 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
537 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
538 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
539 CQE_STATUS_ADDL_MASK;
540 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
541
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530542 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
543 if (ctrl->ptag_state[tag].cbfn)
544 ctrl->ptag_state[tag].cbfn(phba, tag);
545 else
Jitendra Bhivare66940952016-08-19 15:20:14 +0530546 __beiscsi_log(phba, KERN_ERR,
547 "BC_%d : MBX ASYNC command with no callback\n");
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530548 free_mcc_wrb(ctrl, tag);
549 return 0;
550 }
551
Jitendra Bhivare10bcd472016-08-19 15:20:13 +0530552 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
553 /* just check completion status and free wrb */
554 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
555 free_mcc_wrb(ctrl, tag);
556 return 0;
557 }
558
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530559 wake_up_interruptible(&ctrl->mcc_wait[tag]);
560 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530561}
562
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530563void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
564{
565 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
566 u32 val = 0;
567
568 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
569 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
570 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
571 /* make request available for DMA */
572 wmb();
573 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530574}
575
John Soni Josee175def2012-10-20 04:45:40 +0530576/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530577 * be_mbox_db_ready_poll()- Check ready status
John Soni Josee175def2012-10-20 04:45:40 +0530578 * @ctrl: Function specific MBX data structure
579 *
580 * Check for the ready status of FW to send BMBX
581 * commands to adapter.
582 *
583 * return
584 * Success: 0
585 * Failure: Non-Zero
586 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530587static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530588{
Jitendra Bhivare9ec6f6b2016-01-20 14:10:49 +0530589 /* wait 30s for generic non-flash MBOX operation */
590#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530591 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
John Soni Josee175def2012-10-20 04:45:40 +0530592 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700593 unsigned long timeout;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530594 u32 ready;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700595
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530596 /*
597 * This BMBX busy wait path is used during init only.
598 * For the commands executed during init, 5s should suffice.
599 */
600 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530601 do {
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530602 if (beiscsi_hba_in_error(phba))
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530603 return -EIO;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530604
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530605 ready = ioread32(db);
606 if (ready == 0xffffffff)
607 return -EIO;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700608
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530609 ready &= MPU_MAILBOX_DB_RDY_MASK;
610 if (ready)
611 return 0;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700612
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530613 if (time_after(jiffies, timeout))
614 break;
Jitendra Bhivare3c9e36a2016-08-19 15:20:00 +0530615 /* 1ms sleep is enough in most cases */
616 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530617 } while (!ready);
618
619 beiscsi_log(phba, KERN_ERR,
620 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
621 "BC_%d : FW Timed Out\n");
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530622 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530623 return -EBUSY;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530624}
625
John Soni Josee175def2012-10-20 04:45:40 +0530626/*
627 * be_mbox_notify: Notify adapter of new BMBX command
628 * @ctrl: Function specific MBX data structure
629 *
630 * Ring doorbell to inform adapter of a BMBX command
631 * to process
632 *
633 * return
634 * Success: 0
635 * Failure: Non-Zero
636 **/
Jitendra Bhivare480195c2016-08-19 15:20:15 +0530637static int be_mbox_notify(struct be_ctrl_info *ctrl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530638{
639 int status;
640 u32 val = 0;
641 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
642 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
643 struct be_mcc_mailbox *mbox = mbox_mem->va;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530644
Jitendra Bhivare88840332016-02-04 15:49:12 +0530645 status = be_mbox_db_ready_poll(ctrl);
Jayamohan Kallickal1e234bb2013-04-05 20:38:23 -0700646 if (status)
647 return status;
648
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530649 val &= ~MPU_MAILBOX_DB_RDY_MASK;
650 val |= MPU_MAILBOX_DB_HI_MASK;
651 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
652 iowrite32(val, db);
653
Jitendra Bhivare88840332016-02-04 15:49:12 +0530654 status = be_mbox_db_ready_poll(ctrl);
John Soni Josee175def2012-10-20 04:45:40 +0530655 if (status)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530656 return status;
John Soni Josee175def2012-10-20 04:45:40 +0530657
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530658 val = 0;
659 val &= ~MPU_MAILBOX_DB_RDY_MASK;
660 val &= ~MPU_MAILBOX_DB_HI_MASK;
661 val |= (u32) (mbox_mem->dma >> 4) << 2;
662 iowrite32(val, db);
663
Jitendra Bhivare88840332016-02-04 15:49:12 +0530664 status = be_mbox_db_ready_poll(ctrl);
John Soni Josee175def2012-10-20 04:45:40 +0530665 if (status)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530666 return status;
John Soni Josee175def2012-10-20 04:45:40 +0530667
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530668 /* RDY is set; small delay before CQE read. */
669 udelay(1);
670
Jitendra Bhivarea264f5e2016-02-04 15:49:13 +0530671 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
672 return status;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530673}
674
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530675void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
676 bool embedded, u8 sge_cnt)
677{
678 if (embedded)
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +0530679 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530680 else
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +0530681 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
682 MCC_WRB_SGE_CNT_SHIFT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530683 wrb->payload_length = payload_len;
684 be_dws_cpu_to_le(wrb, 8);
685}
686
687void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
688 u8 subsystem, u8 opcode, int cmd_len)
689{
690 req_hdr->opcode = opcode;
691 req_hdr->subsystem = subsystem;
692 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
John Soni Josee175def2012-10-20 04:45:40 +0530693 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530694}
695
696static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
697 struct be_dma_mem *mem)
698{
699 int i, buf_pages;
700 u64 dma = (u64) mem->dma;
701
702 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
703 for (i = 0; i < buf_pages; i++) {
704 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
705 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
706 dma += PAGE_SIZE_4K;
707 }
708}
709
710static u32 eq_delay_to_mult(u32 usec_delay)
711{
712#define MAX_INTR_RATE 651042
713 const u32 round = 10;
714 u32 multiplier;
715
716 if (usec_delay == 0)
717 multiplier = 0;
718 else {
719 u32 interrupt_rate = 1000000 / usec_delay;
720 if (interrupt_rate == 0)
721 multiplier = 1023;
722 else {
723 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
724 multiplier /= interrupt_rate;
725 multiplier = (multiplier + round / 2) / round;
726 multiplier = min(multiplier, (u32) 1023);
727 }
728 }
729 return multiplier;
730}
731
732struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
733{
734 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
735}
736
737int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
738 struct be_queue_info *eq, int eq_delay)
739{
740 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
741 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
742 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
743 struct be_dma_mem *q_mem = &eq->dma_mem;
744 int status;
745
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530746 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530747 memset(wrb, 0, sizeof(*wrb));
748
749 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
750
751 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
752 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
753
754 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
755
756 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
757 PCI_FUNC(ctrl->pdev->devfn));
758 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
759 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
760 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
761 __ilog2_u32(eq->len / 256));
762 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
763 eq_delay_to_mult(eq_delay));
764 be_dws_cpu_to_le(req->context, sizeof(req->context));
765
766 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
767
768 status = be_mbox_notify(ctrl);
769 if (!status) {
770 eq->id = le16_to_cpu(resp->eq_id);
771 eq->created = true;
772 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530773 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530774 return status;
775}
776
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530777int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
778 struct be_queue_info *cq, struct be_queue_info *eq,
779 bool sol_evts, bool no_delay, int coalesce_wm)
780{
781 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
782 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
783 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530784 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530785 struct be_dma_mem *q_mem = &cq->dma_mem;
786 void *ctxt = &req->context;
787 int status;
788
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530789 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530790 memset(wrb, 0, sizeof(*wrb));
791
792 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
793
794 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
795 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530796
797 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Jayamohan Kallickal2c9dfd32013-04-05 20:38:26 -0700798 if (is_chip_be2_be3r(phba)) {
John Soni Joseeaae5262012-10-20 04:43:44 +0530799 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
800 ctxt, coalesce_wm);
801 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
802 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
803 __ilog2_u32(cq->len / 256));
804 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
805 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
806 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
807 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
808 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
809 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
810 PCI_FUNC(ctrl->pdev->devfn));
Jayamohan Kallickal2c9dfd32013-04-05 20:38:26 -0700811 } else {
812 req->hdr.version = MBX_CMD_VER2;
813 req->page_size = 1;
814 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
815 ctxt, coalesce_wm);
816 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
817 ctxt, no_delay);
818 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
819 __ilog2_u32(cq->len / 256));
820 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
821 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
822 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
823 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
John Soni Joseeaae5262012-10-20 04:43:44 +0530824 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530825
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530826 be_dws_cpu_to_le(ctxt, sizeof(req->context));
827
828 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
829
830 status = be_mbox_notify(ctrl);
831 if (!status) {
832 cq->id = le16_to_cpu(resp->cq_id);
833 cq->created = true;
834 } else
John Soni Jose99bc5d52012-08-20 23:00:18 +0530835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
836 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
837 status);
838
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530839 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530840
841 return status;
842}
843
844static u32 be_encoded_q_len(int q_len)
845{
846 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
847 if (len_encoded == 16)
848 len_encoded = 0;
849 return len_encoded;
850}
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530851
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530852int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530853 struct be_queue_info *mccq,
854 struct be_queue_info *cq)
855{
856 struct be_mcc_wrb *wrb;
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530857 struct be_cmd_req_mcc_create_ext *req;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530858 struct be_dma_mem *q_mem = &mccq->dma_mem;
859 struct be_ctrl_info *ctrl;
860 void *ctxt;
861 int status;
862
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530863 mutex_lock(&phba->ctrl.mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530864 ctrl = &phba->ctrl;
865 wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jayamohan Kallickal37609762011-10-07 19:31:11 -0500866 memset(wrb, 0, sizeof(*wrb));
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530867 req = embedded_payload(wrb);
868 ctxt = &req->context;
869
870 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
871
872 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530873 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530874
875 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530876 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
877 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
878 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530879
880 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
881 PCI_FUNC(phba->pcidev->devfn));
882 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
883 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
884 be_encoded_q_len(mccq->len));
885 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
886
887 be_dws_cpu_to_le(ctxt, sizeof(req->context));
888
889 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
890
Jitendra Bhivarea264f5e2016-02-04 15:49:13 +0530891 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530892 if (!status) {
893 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
894 mccq->id = le16_to_cpu(resp->id);
895 mccq->created = true;
896 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530897 mutex_unlock(&phba->ctrl.mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530898
899 return status;
900}
901
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530902int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
903 int queue_type)
904{
905 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
906 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530907 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530908 u8 subsys = 0, opcode = 0;
909 int status;
910
John Soni Jose99bc5d52012-08-20 23:00:18 +0530911 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
912 "BC_%d : In beiscsi_cmd_q_destroy "
913 "queue_type : %d\n", queue_type);
914
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530915 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530916 memset(wrb, 0, sizeof(*wrb));
917 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
918
919 switch (queue_type) {
920 case QTYPE_EQ:
921 subsys = CMD_SUBSYSTEM_COMMON;
922 opcode = OPCODE_COMMON_EQ_DESTROY;
923 break;
924 case QTYPE_CQ:
925 subsys = CMD_SUBSYSTEM_COMMON;
926 opcode = OPCODE_COMMON_CQ_DESTROY;
927 break;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530928 case QTYPE_MCCQ:
929 subsys = CMD_SUBSYSTEM_COMMON;
930 opcode = OPCODE_COMMON_MCC_DESTROY;
931 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530932 case QTYPE_WRBQ:
933 subsys = CMD_SUBSYSTEM_ISCSI;
934 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
935 break;
936 case QTYPE_DPDUQ:
937 subsys = CMD_SUBSYSTEM_ISCSI;
938 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
939 break;
940 case QTYPE_SGL:
941 subsys = CMD_SUBSYSTEM_ISCSI;
942 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
943 break;
944 default:
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530945 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530946 BUG();
Jayamohan Kallickald3ad2bb2010-07-22 04:16:38 +0530947 return -ENXIO;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530948 }
949 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
950 if (queue_type != QTYPE_SGL)
951 req->id = cpu_to_le16(q->id);
952
953 status = be_mbox_notify(ctrl);
954
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530955 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530956 return status;
957}
958
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700959/**
960 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
961 * @ctrl: ptr to ctrl_info
962 * @cq: Completion Queue
963 * @dq: Default Queue
964 * @lenght: ring size
965 * @entry_size: size of each entry in DEFQ
966 * @is_header: Header or Data DEFQ
967 * @ulp_num: Bind to which ULP
968 *
969 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
970 * on this queue by the FW
971 *
972 * return
973 * Success: 0
974 * Failure: Non-Zero Value
975 *
976 **/
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530977int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
978 struct be_queue_info *cq,
979 struct be_queue_info *dq, int length,
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700980 int entry_size, uint8_t is_header,
981 uint8_t ulp_num)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530982{
983 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
984 struct be_defq_create_req *req = embedded_payload(wrb);
985 struct be_dma_mem *q_mem = &dq->dma_mem;
Jayamohan Kallickalef9e1b92013-04-05 20:38:27 -0700986 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530987 void *ctxt = &req->context;
988 int status;
989
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530990 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530991 memset(wrb, 0, sizeof(*wrb));
992
993 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
994
995 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
996 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
997
998 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700999 if (phba->fw_config.dual_ulp_aware) {
1000 req->ulp_num = ulp_num;
1001 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1002 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1003 }
Jayamohan Kallickalef9e1b92013-04-05 20:38:27 -07001004
1005 if (is_chip_be2_be3r(phba)) {
1006 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1007 rx_pdid, ctxt, 0);
1008 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1009 rx_pdid_valid, ctxt, 1);
1010 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1011 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1012 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1013 ring_size, ctxt,
1014 be_encoded_q_len(length /
1015 sizeof(struct phys_addr)));
1016 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1017 default_buffer_size, ctxt, entry_size);
1018 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1019 cq_id_recv, ctxt, cq->id);
1020 } else {
1021 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1022 rx_pdid, ctxt, 0);
1023 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1024 rx_pdid_valid, ctxt, 1);
1025 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1026 ring_size, ctxt,
1027 be_encoded_q_len(length /
1028 sizeof(struct phys_addr)));
1029 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1030 default_buffer_size, ctxt, entry_size);
1031 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1032 cq_id_recv, ctxt, cq->id);
1033 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301034
1035 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1036
1037 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1038
1039 status = be_mbox_notify(ctrl);
1040 if (!status) {
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -07001041 struct be_ring *defq_ring;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301042 struct be_defq_create_resp *resp = embedded_payload(wrb);
1043
1044 dq->id = le16_to_cpu(resp->id);
1045 dq->created = true;
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -07001046 if (is_header)
1047 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1048 else
1049 defq_ring = &phba->phwi_ctrlr->
1050 default_pdu_data[ulp_num];
1051
1052 defq_ring->id = dq->id;
1053
1054 if (!phba->fw_config.dual_ulp_aware) {
1055 defq_ring->ulp_num = BEISCSI_ULP0;
1056 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1057 } else {
1058 defq_ring->ulp_num = resp->ulp_num;
1059 defq_ring->doorbell_offset = resp->doorbell_offset;
1060 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301061 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301062 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301063
1064 return status;
1065}
1066
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001067/**
1068 * be_cmd_wrbq_create()- Create WRBQ
1069 * @ctrl: ptr to ctrl_info
1070 * @q_mem: memory details for the queue
1071 * @wrbq: queue info
1072 * @pwrb_context: ptr to wrb_context
1073 * @ulp_num: ULP on which the WRBQ is to be created
1074 *
1075 * Create WRBQ on the passed ULP_NUM.
1076 *
1077 **/
1078int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1079 struct be_dma_mem *q_mem,
1080 struct be_queue_info *wrbq,
1081 struct hwi_wrb_context *pwrb_context,
1082 uint8_t ulp_num)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301083{
1084 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1085 struct be_wrbq_create_req *req = embedded_payload(wrb);
1086 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001087 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301088 int status;
1089
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301090 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301091 memset(wrb, 0, sizeof(*wrb));
1092
1093 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1094
1095 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1096 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1097 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001098
1099 if (phba->fw_config.dual_ulp_aware) {
1100 req->ulp_num = ulp_num;
1101 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1102 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1103 }
1104
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301105 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1106
1107 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301108 if (!status) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301109 wrbq->id = le16_to_cpu(resp->cid);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301110 wrbq->created = true;
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001111
1112 pwrb_context->cid = wrbq->id;
1113 if (!phba->fw_config.dual_ulp_aware) {
1114 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1115 pwrb_context->ulp_num = BEISCSI_ULP0;
1116 } else {
1117 pwrb_context->ulp_num = resp->ulp_num;
1118 pwrb_context->doorbell_offset = resp->doorbell_offset;
1119 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301120 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301121 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301122 return status;
1123}
1124
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001125int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1126 struct be_dma_mem *q_mem)
1127{
1128 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1129 struct be_post_template_pages_req *req = embedded_payload(wrb);
1130 int status;
1131
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301132 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001133
1134 memset(wrb, 0, sizeof(*wrb));
1135 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1136 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1137 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1138 sizeof(*req));
1139
1140 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1141 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1142 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1143
1144 status = be_mbox_notify(ctrl);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301145 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001146 return status;
1147}
1148
1149int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1150{
1151 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1152 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1153 int status;
1154
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301155 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001156
1157 memset(wrb, 0, sizeof(*wrb));
1158 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1159 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1160 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1161 sizeof(*req));
1162
1163 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1164
1165 status = be_mbox_notify(ctrl);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301166 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001167 return status;
1168}
1169
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301170int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1171 struct be_dma_mem *q_mem,
1172 u32 page_offset, u32 num_pages)
1173{
1174 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1175 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +05301176 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301177 int status;
1178 unsigned int curr_pages;
1179 u32 internal_page_offset = 0;
1180 u32 temp_num_pages = num_pages;
1181
1182 if (num_pages == 0xff)
1183 num_pages = 1;
1184
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301185 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301186 do {
1187 memset(wrb, 0, sizeof(*wrb));
1188 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1189 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1190 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1191 sizeof(*req));
1192 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1193 pages);
1194 req->num_pages = min(num_pages, curr_pages);
1195 req->page_offset = page_offset;
1196 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1197 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1198 internal_page_offset += req->num_pages;
1199 page_offset += req->num_pages;
1200 num_pages -= req->num_pages;
1201
1202 if (temp_num_pages == 0xff)
1203 req->num_pages = temp_num_pages;
1204
1205 status = be_mbox_notify(ctrl);
1206 if (status) {
John Soni Jose99bc5d52012-08-20 23:00:18 +05301207 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1208 "BC_%d : FW CMD to map iscsi frags failed.\n");
1209
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301210 goto error;
1211 }
1212 } while (num_pages > 0);
1213error:
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301214 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301215 if (status != 0)
1216 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1217 return status;
1218}
Jayamohan Kallickale5285862011-10-07 19:31:08 -05001219
John Soni Jose6f722382012-08-20 23:00:43 +05301220/**
1221 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1222 * @phba: device priv structure instance
1223 * @vlan_tag: TAG to be set
1224 *
1225 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1226 *
1227 * returns
1228 * TAG for the MBX Cmd
1229 * **/
1230int be_cmd_set_vlan(struct beiscsi_hba *phba,
1231 uint16_t vlan_tag)
1232{
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301233 unsigned int tag;
John Soni Jose6f722382012-08-20 23:00:43 +05301234 struct be_mcc_wrb *wrb;
1235 struct be_cmd_set_vlan_req *req;
1236 struct be_ctrl_info *ctrl = &phba->ctrl;
1237
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301238 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1239 return 0;
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301240 wrb = alloc_mcc_wrb(phba, &tag);
1241 if (!wrb) {
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301242 mutex_unlock(&ctrl->mbox_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301243 return 0;
John Soni Jose6f722382012-08-20 23:00:43 +05301244 }
1245
John Soni Jose6f722382012-08-20 23:00:43 +05301246 req = embedded_payload(wrb);
John Soni Jose6f722382012-08-20 23:00:43 +05301247 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1248 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1249 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1250 sizeof(*req));
1251
1252 req->interface_hndl = phba->interface_handle;
1253 req->vlan_priority = vlan_tag;
1254
Jitendra Bhivarecdde6682016-01-20 14:10:47 +05301255 be_mcc_notify(phba, tag);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301256 mutex_unlock(&ctrl->mbox_lock);
John Soni Jose6f722382012-08-20 23:00:43 +05301257
1258 return tag;
1259}
Jitendra Bhivare66940952016-08-19 15:20:14 +05301260
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301261int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1262 struct beiscsi_hba *phba)
1263{
1264 struct be_dma_mem nonemb_cmd;
1265 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1266 struct be_mgmt_controller_attributes *req;
1267 struct be_sge *sge = nonembedded_sgl(wrb);
1268 int status = 0;
1269
1270 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
1271 sizeof(struct be_mgmt_controller_attributes),
1272 &nonemb_cmd.dma);
1273 if (nonemb_cmd.va == NULL) {
1274 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1275 "BG_%d : pci_alloc_consistent failed in %s\n",
1276 __func__);
1277 return -ENOMEM;
1278 }
1279 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1280 req = nonemb_cmd.va;
1281 memset(req, 0, sizeof(*req));
1282 mutex_lock(&ctrl->mbox_lock);
1283 memset(wrb, 0, sizeof(*wrb));
1284 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1285 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1286 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1287 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1288 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1289 sge->len = cpu_to_le32(nonemb_cmd.size);
1290 status = be_mbox_notify(ctrl);
1291 if (!status) {
1292 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1293
1294 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1295 "BG_%d : Firmware Version of CMD : %s\n"
1296 "Firmware Version is : %s\n"
1297 "Developer Build, not performing version check...\n",
1298 resp->params.hba_attribs
1299 .flashrom_version_string,
1300 resp->params.hba_attribs.
1301 firmware_version_string);
1302
1303 phba->fw_config.iscsi_features =
1304 resp->params.hba_attribs.iscsi_features;
1305 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1306 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1307 phba->fw_config.iscsi_features);
1308 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1309 firmware_version_string, BEISCSI_VER_STRLEN);
1310 } else
1311 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1312 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1313 mutex_unlock(&ctrl->mbox_lock);
1314 if (nonemb_cmd.va)
1315 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
1316 nonemb_cmd.va, nonemb_cmd.dma);
1317
1318 return status;
1319}
1320
1321/**
1322 * beiscsi_get_fw_config()- Get the FW config for the function
1323 * @ctrl: ptr to Ctrl Info
1324 * @phba: ptr to the dev priv structure
1325 *
1326 * Get the FW config and resources available for the function.
1327 * The resources are created based on the count received here.
1328 *
1329 * return
1330 * Success: 0
1331 * Failure: Non-Zero Value
1332 **/
1333int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1334 struct beiscsi_hba *phba)
1335{
1336 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1337 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1338 uint32_t cid_count, icd_count;
1339 int status = -EINVAL;
1340 uint8_t ulp_num = 0;
1341
1342 mutex_lock(&ctrl->mbox_lock);
1343 memset(wrb, 0, sizeof(*wrb));
1344 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1345
1346 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1347 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1348 EMBED_MBX_MAX_PAYLOAD_SIZE);
1349
1350 if (be_mbox_notify(ctrl)) {
1351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1352 "BG_%d : Failed in beiscsi_get_fw_config\n");
1353 goto fail_init;
1354 }
1355
1356 /* FW response formats depend on port id */
1357 phba->fw_config.phys_port = pfw_cfg->phys_port;
1358 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1360 "BG_%d : invalid physical port id %d\n",
1361 phba->fw_config.phys_port);
1362 goto fail_init;
1363 }
1364
1365 /* populate and check FW config against min and max values */
1366 if (!is_chip_be2_be3r(phba)) {
1367 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1368 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1369 if (phba->fw_config.eqid_count == 0 ||
1370 phba->fw_config.eqid_count > 2048) {
1371 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1372 "BG_%d : invalid EQ count %d\n",
1373 phba->fw_config.eqid_count);
1374 goto fail_init;
1375 }
1376 if (phba->fw_config.cqid_count == 0 ||
1377 phba->fw_config.cqid_count > 4096) {
1378 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1379 "BG_%d : invalid CQ count %d\n",
1380 phba->fw_config.cqid_count);
1381 goto fail_init;
1382 }
1383 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1384 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1385 phba->fw_config.eqid_count,
1386 phba->fw_config.cqid_count);
1387 }
1388
1389 /**
1390 * Check on which all ULP iSCSI Protocol is loaded.
1391 * Set the Bit for those ULP. This set flag is used
1392 * at all places in the code to check on which ULP
1393 * iSCSi Protocol is loaded
1394 **/
1395 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1396 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1397 BEISCSI_ULP_ISCSI_INI_MODE) {
1398 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1399
1400 /* Get the CID, ICD and Chain count for each ULP */
1401 phba->fw_config.iscsi_cid_start[ulp_num] =
1402 pfw_cfg->ulp[ulp_num].sq_base;
1403 phba->fw_config.iscsi_cid_count[ulp_num] =
1404 pfw_cfg->ulp[ulp_num].sq_count;
1405
1406 phba->fw_config.iscsi_icd_start[ulp_num] =
1407 pfw_cfg->ulp[ulp_num].icd_base;
1408 phba->fw_config.iscsi_icd_count[ulp_num] =
1409 pfw_cfg->ulp[ulp_num].icd_count;
1410
1411 phba->fw_config.iscsi_chain_start[ulp_num] =
1412 pfw_cfg->chain_icd[ulp_num].chain_base;
1413 phba->fw_config.iscsi_chain_count[ulp_num] =
1414 pfw_cfg->chain_icd[ulp_num].chain_count;
1415
1416 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1417 "BG_%d : Function loaded on ULP : %d\n"
1418 "\tiscsi_cid_count : %d\n"
1419 "\tiscsi_cid_start : %d\n"
1420 "\t iscsi_icd_count : %d\n"
1421 "\t iscsi_icd_start : %d\n",
1422 ulp_num,
1423 phba->fw_config.
1424 iscsi_cid_count[ulp_num],
1425 phba->fw_config.
1426 iscsi_cid_start[ulp_num],
1427 phba->fw_config.
1428 iscsi_icd_count[ulp_num],
1429 phba->fw_config.
1430 iscsi_icd_start[ulp_num]);
1431 }
1432 }
1433
1434 if (phba->fw_config.ulp_supported == 0) {
1435 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1436 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1437 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1438 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1439 goto fail_init;
1440 }
1441
1442 /**
1443 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1444 **/
1445 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1446 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1447 break;
1448 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1449 if (icd_count == 0 || icd_count > 65536) {
1450 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1451 "BG_%d: invalid ICD count %d\n", icd_count);
1452 goto fail_init;
1453 }
1454
1455 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1456 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1457 if (cid_count == 0 || cid_count > 4096) {
1458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1459 "BG_%d: invalid CID count %d\n", cid_count);
1460 goto fail_init;
1461 }
1462
1463 /**
1464 * Check FW is dual ULP aware i.e. can handle either
1465 * of the protocols.
1466 */
1467 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1468 BEISCSI_FUNC_DUA_MODE);
1469
1470 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1471 "BG_%d : DUA Mode : 0x%x\n",
1472 phba->fw_config.dual_ulp_aware);
1473
1474 /* all set, continue using this FW config */
1475 status = 0;
1476fail_init:
1477 mutex_unlock(&ctrl->mbox_lock);
1478 return status;
1479}
1480
1481/**
1482 * beiscsi_get_port_name()- Get port name for the function
1483 * @ctrl: ptr to Ctrl Info
1484 * @phba: ptr to the dev priv structure
1485 *
1486 * Get the alphanumeric character for port
1487 *
1488 **/
1489int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1490{
1491 int ret = 0;
1492 struct be_mcc_wrb *wrb;
1493 struct be_cmd_get_port_name *ioctl;
1494
1495 mutex_lock(&ctrl->mbox_lock);
1496 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1497 memset(wrb, 0, sizeof(*wrb));
1498 ioctl = embedded_payload(wrb);
1499
1500 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1501 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1502 OPCODE_COMMON_GET_PORT_NAME,
1503 EMBED_MBX_MAX_PAYLOAD_SIZE);
1504 ret = be_mbox_notify(ctrl);
1505 phba->port_name = 0;
1506 if (!ret) {
1507 phba->port_name = ioctl->p.resp.port_names >>
1508 (phba->fw_config.phys_port * 8) & 0xff;
1509 } else {
1510 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1511 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1512 ret, ioctl->h.resp_hdr.status);
1513 }
1514
1515 if (phba->port_name == 0)
1516 phba->port_name = '?';
1517
1518 mutex_unlock(&ctrl->mbox_lock);
1519 return ret;
1520}
1521
Jitendra Bhivare66940952016-08-19 15:20:14 +05301522int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1523{
1524 struct be_ctrl_info *ctrl = &phba->ctrl;
1525 struct be_cmd_set_features *ioctl;
1526 struct be_mcc_wrb *wrb;
1527 int ret = 0;
1528
1529 mutex_lock(&ctrl->mbox_lock);
1530 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1531 memset(wrb, 0, sizeof(*wrb));
1532 ioctl = embedded_payload(wrb);
1533
1534 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1535 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1536 OPCODE_COMMON_SET_FEATURES,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE);
1538 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1539 ioctl->param_len = sizeof(ioctl->param.req);
1540 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1541 ret = be_mbox_notify(ctrl);
1542 if (!ret) {
1543 phba->ue2rp = ioctl->param.resp.ue2rp;
1544 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1545 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1546 "BG_%d : HBA error recovery supported\n");
1547 } else {
1548 /**
1549 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1550 * Older FW versions return this error.
1551 */
1552 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1553 ret == MCC_STATUS_INVALID_LENGTH)
1554 __beiscsi_log(phba, KERN_INFO,
1555 "BG_%d : HBA error recovery not supported\n");
1556 }
1557
1558 mutex_unlock(&ctrl->mbox_lock);
1559 return ret;
1560}
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301561
1562static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1563{
1564 u32 sem;
1565
1566 if (is_chip_be2_be3r(phba))
1567 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1568 else
1569 pci_read_config_dword(phba->pcidev,
1570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1571 return sem;
1572}
1573
1574int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1575{
1576 u32 loop, post, rdy = 0;
1577
1578 loop = 1000;
1579 while (loop--) {
1580 post = beiscsi_get_post_stage(phba);
1581 if (post & POST_ERROR_BIT)
1582 break;
1583 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1584 rdy = 1;
1585 break;
1586 }
1587 msleep(60);
1588 }
1589
1590 if (!rdy) {
1591 __beiscsi_log(phba, KERN_ERR,
1592 "BC_%d : FW not ready 0x%x\n", post);
1593 }
1594
1595 return rdy;
1596}
1597
Jitendra Bhivare4ee1ec42016-08-19 15:20:20 +05301598int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301599{
1600 struct be_ctrl_info *ctrl = &phba->ctrl;
1601 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +05301602 struct be_post_sgl_pages_req *req;
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301603 int status;
1604
1605 mutex_lock(&ctrl->mbox_lock);
1606
1607 req = embedded_payload(wrb);
1608 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1609 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1610 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1611 status = be_mbox_notify(ctrl);
1612
1613 mutex_unlock(&ctrl->mbox_lock);
1614 return status;
1615}
1616
1617int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1618{
1619 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1620 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1621 u8 *endian_check;
1622 int status;
1623
1624 mutex_lock(&ctrl->mbox_lock);
1625 memset(wrb, 0, sizeof(*wrb));
1626
1627 endian_check = (u8 *) wrb;
1628 if (load) {
1629 /* to start communicating */
1630 *endian_check++ = 0xFF;
1631 *endian_check++ = 0x12;
1632 *endian_check++ = 0x34;
1633 *endian_check++ = 0xFF;
1634 *endian_check++ = 0xFF;
1635 *endian_check++ = 0x56;
1636 *endian_check++ = 0x78;
1637 *endian_check++ = 0xFF;
1638 } else {
1639 /* to stop communicating */
1640 *endian_check++ = 0xFF;
1641 *endian_check++ = 0xAA;
1642 *endian_check++ = 0xBB;
1643 *endian_check++ = 0xFF;
1644 *endian_check++ = 0xFF;
1645 *endian_check++ = 0xCC;
1646 *endian_check++ = 0xDD;
1647 *endian_check = 0xFF;
1648 }
1649 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1650
1651 status = be_mbox_notify(ctrl);
1652 if (status)
1653 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1654 "BC_%d : special WRB message failed\n");
1655 mutex_unlock(&ctrl->mbox_lock);
1656 return status;
1657}
1658
1659int beiscsi_init_sliport(struct beiscsi_hba *phba)
1660{
1661 int status;
1662
1663 /* check POST stage before talking to FW */
1664 status = beiscsi_check_fw_rdy(phba);
1665 if (!status)
1666 return -EIO;
1667
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +05301668 /* clear all error states after checking FW rdy */
1669 phba->state &= ~BEISCSI_HBA_IN_ERR;
1670
1671 /* check again UER support */
1672 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1673
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301674 /*
1675 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1676 * It should clean up any stale info in FW for this fn.
1677 */
1678 status = beiscsi_cmd_function_reset(phba);
1679 if (status) {
1680 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1681 "BC_%d : SLI Function Reset failed\n");
1682 return status;
1683 }
1684
1685 /* indicate driver is loading */
1686 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1687}
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301688
1689/**
1690 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1691 * @phba: pointer to dev priv structure
1692 * @ulp: ULP number.
1693 *
1694 * return
1695 * Success: 0
1696 * Failure: Non-Zero Value
1697 **/
1698int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1699{
1700 struct be_ctrl_info *ctrl = &phba->ctrl;
1701 struct iscsi_cleanup_req_v1 *req_v1;
1702 struct iscsi_cleanup_req *req;
Jitendra Bhivared7401052016-12-13 15:55:59 +05301703 u16 hdr_ring_id, data_ring_id;
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301704 struct be_mcc_wrb *wrb;
1705 int status;
1706
1707 mutex_lock(&ctrl->mbox_lock);
1708 wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301709
Jitendra Bhivared7401052016-12-13 15:55:59 +05301710 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1711 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301712 if (is_chip_be2_be3r(phba)) {
Jitendra Bhivared7401052016-12-13 15:55:59 +05301713 req = embedded_payload(wrb);
1714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1715 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1716 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301717 req->chute = (1 << ulp);
Jitendra Bhivared7401052016-12-13 15:55:59 +05301718 /* BE2/BE3 FW creates 8-bit ring id */
1719 req->hdr_ring_id = hdr_ring_id;
1720 req->data_ring_id = data_ring_id;
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301721 } else {
Jitendra Bhivared7401052016-12-13 15:55:59 +05301722 req_v1 = embedded_payload(wrb);
1723 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1724 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1725 OPCODE_COMMON_ISCSI_CLEANUP,
1726 sizeof(*req_v1));
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301727 req_v1->hdr.version = 1;
Jitendra Bhivared7401052016-12-13 15:55:59 +05301728 req_v1->chute = (1 << ulp);
1729 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1730 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301731 }
1732
1733 status = be_mbox_notify(ctrl);
1734 if (status)
1735 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1736 "BG_%d : %s failed %d\n", __func__, ulp);
1737 mutex_unlock(&ctrl->mbox_lock);
1738 return status;
1739}
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +05301740
1741/*
1742 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1743 * @phba: Driver priv structure
1744 *
1745 * Read registers linked to UE and check for the UE status
1746 **/
1747int beiscsi_detect_ue(struct beiscsi_hba *phba)
1748{
1749 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1750 uint32_t ue_hi = 0, ue_lo = 0;
1751 uint8_t i = 0;
1752 int ret = 0;
1753
1754 pci_read_config_dword(phba->pcidev,
1755 PCICFG_UE_STATUS_LOW, &ue_lo);
1756 pci_read_config_dword(phba->pcidev,
1757 PCICFG_UE_STATUS_MASK_LOW,
1758 &ue_mask_lo);
1759 pci_read_config_dword(phba->pcidev,
1760 PCICFG_UE_STATUS_HIGH,
1761 &ue_hi);
1762 pci_read_config_dword(phba->pcidev,
1763 PCICFG_UE_STATUS_MASK_HI,
1764 &ue_mask_hi);
1765
1766 ue_lo = (ue_lo & ~ue_mask_lo);
1767 ue_hi = (ue_hi & ~ue_mask_hi);
1768
1769
1770 if (ue_lo || ue_hi) {
1771 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1772 __beiscsi_log(phba, KERN_ERR,
1773 "BC_%d : HBA error detected\n");
1774 ret = 1;
1775 }
1776
1777 if (ue_lo) {
1778 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1779 if (ue_lo & 1)
1780 __beiscsi_log(phba, KERN_ERR,
1781 "BC_%d : UE_LOW %s bit set\n",
1782 desc_ue_status_low[i]);
1783 }
1784 }
1785
1786 if (ue_hi) {
1787 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1788 if (ue_hi & 1)
1789 __beiscsi_log(phba, KERN_ERR,
1790 "BC_%d : UE_HIGH %s bit set\n",
1791 desc_ue_status_hi[i]);
1792 }
1793 }
1794 return ret;
1795}
1796
1797/*
1798 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1799 * @phba: Driver priv structure
1800 *
1801 * Read SLIPORT SEMAPHORE register to check for UER
1802 *
1803 **/
1804int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1805{
1806 u32 post, status;
1807 int ret = 0;
1808
1809 post = beiscsi_get_post_stage(phba);
1810 status = post & POST_STAGE_MASK;
1811 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1812 POST_STAGE_RECOVERABLE_ERR) {
1813 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1814 __beiscsi_log(phba, KERN_INFO,
1815 "BC_%d : HBA error recoverable: 0x%x\n", post);
1816 ret = 1;
1817 } else {
1818 __beiscsi_log(phba, KERN_INFO,
1819 "BC_%d : HBA in UE: 0x%x\n", post);
1820 }
1821
1822 return ret;
1823}