blob: f2cd0f37e03f7fd1658c0dc285f2a57036a3b107 [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
James Smart50611572016-03-31 14:12:34 -07004 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04005 * EMULEX and SLI are trademarks of Emulex. *
dea31012005-04-17 16:05:31 -05006 * www.emulex.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -05008 * *
9 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040010 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea31012005-04-17 16:05:31 -050020 *******************************************************************/
21
dea31012005-04-17 16:05:31 -050022#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010027#include <linux/lockdep.h>
dea31012005-04-17 16:05:31 -050028
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040029#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050030#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
James.Smart@Emulex.Comf888ba32005-08-10 15:03:01 -040033#include <scsi/scsi_transport_fc.h>
James Smartda0436e2009-05-22 14:51:39 -040034#include <scsi/fc/fc_fs.h>
James Smart0d878412009-10-02 15:16:56 -040035#include <linux/aer.h>
dea31012005-04-17 16:05:31 -050036
James Smart895427b2017-02-12 13:52:30 -080037#include <linux/nvme-fc-driver.h>
38
James Smartda0436e2009-05-22 14:51:39 -040039#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050040#include "lpfc_hw.h"
41#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040042#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040043#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050044#include "lpfc_disc.h"
dea31012005-04-17 16:05:31 -050045#include "lpfc.h"
James Smart895427b2017-02-12 13:52:30 -080046#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
James Smartf358dd02017-02-12 13:52:34 -080048#include "lpfc_nvmet.h"
dea31012005-04-17 16:05:31 -050049#include "lpfc_crtn.h"
50#include "lpfc_logmsg.h"
51#include "lpfc_compat.h"
James Smart858c9f62007-06-17 19:56:39 -050052#include "lpfc_debugfs.h"
James Smart04c68492009-05-22 14:52:52 -040053#include "lpfc_vport.h"
James Smart61bda8f2016-10-13 15:06:05 -070054#include "lpfc_version.h"
dea31012005-04-17 16:05:31 -050055
56/* There are only four IOCB completion types. */
57typedef enum _lpfc_iocb_type {
58 LPFC_UNKNOWN_IOCB,
59 LPFC_UNSOL_IOCB,
60 LPFC_SOL_IOCB,
61 LPFC_ABORT_IOCB
62} lpfc_iocb_type;
63
James Smart4f774512009-05-22 14:52:35 -040064
65/* Provide function prototypes local to this module. */
66static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
67 uint32_t);
68static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
James Smart45ed1192009-10-02 15:17:02 -040069 uint8_t *, uint32_t *);
70static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
71 struct lpfc_iocbq *);
James Smart6669f9b2009-10-02 15:16:45 -040072static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
73 struct hbq_dmabuf *);
James Smart895427b2017-02-12 13:52:30 -080074static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
James Smart05580562011-05-24 11:40:48 -040075 struct lpfc_cqe *);
James Smart895427b2017-02-12 13:52:30 -080076static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
James Smart8a9d2e82012-05-09 21:16:12 -040077 int);
James Smartba20c852012-08-03 12:36:52 -040078static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
79 uint32_t);
James Smarte8d3c3b2013-10-10 12:21:30 -040080static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
81static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
James Smart895427b2017-02-12 13:52:30 -080082static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
83 struct lpfc_sli_ring *pring,
84 struct lpfc_iocbq *cmdiocb);
James Smart05580562011-05-24 11:40:48 -040085
James Smart4f774512009-05-22 14:52:35 -040086static IOCB_t *
87lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
88{
89 return &iocbq->iocb;
90}
91
92/**
93 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
94 * @q: The Work Queue to operate on.
95 * @wqe: The work Queue Entry to put on the Work queue.
96 *
97 * This routine will copy the contents of @wqe to the next available entry on
98 * the @q. This function will then ring the Work Queue Doorbell to signal the
99 * HBA to start processing the Work Queue Entry. This function returns 0 if
100 * successful. If no entries are available on @q then this function will return
101 * -ENOMEM.
102 * The caller is expected to hold the hbalock when calling this routine.
103 **/
104static uint32_t
105lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
106{
James Smart2e90f4b2011-12-13 13:22:37 -0500107 union lpfc_wqe *temp_wqe;
James Smart4f774512009-05-22 14:52:35 -0400108 struct lpfc_register doorbell;
109 uint32_t host_index;
James Smart027140e2012-08-03 12:35:44 -0400110 uint32_t idx;
James Smart4f774512009-05-22 14:52:35 -0400111
James Smart2e90f4b2011-12-13 13:22:37 -0500112 /* sanity check on queue memory */
113 if (unlikely(!q))
114 return -ENOMEM;
115 temp_wqe = q->qe[q->host_index].wqe;
116
James Smart4f774512009-05-22 14:52:35 -0400117 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400118 idx = ((q->host_index + 1) % q->entry_count);
119 if (idx == q->hba_index) {
James Smartb84daac2012-08-03 12:35:13 -0400120 q->WQ_overflow++;
James Smart4f774512009-05-22 14:52:35 -0400121 return -ENOMEM;
James Smartb84daac2012-08-03 12:35:13 -0400122 }
123 q->WQ_posted++;
James Smart4f774512009-05-22 14:52:35 -0400124 /* set consumption flag every once in a while */
James Smartff78d8f2011-12-13 13:21:35 -0500125 if (!((q->host_index + 1) % q->entry_repost))
James Smartf0d9bcc2010-10-22 11:07:09 -0400126 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
James Smartfedd3b72011-02-16 12:39:24 -0500127 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
128 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400129 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
James Smart6b3b3bd2016-12-19 15:07:30 -0800130 /* ensure WQE bcopy flushed before doorbell write */
131 wmb();
James Smart4f774512009-05-22 14:52:35 -0400132
133 /* Update the host index before invoking device */
134 host_index = q->host_index;
James Smart027140e2012-08-03 12:35:44 -0400135
136 q->host_index = idx;
James Smart4f774512009-05-22 14:52:35 -0400137
138 /* Ring Doorbell */
139 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500140 if (q->db_format == LPFC_DB_LIST_FORMAT) {
141 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
142 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
143 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
144 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
145 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
146 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
147 } else {
148 return -EINVAL;
149 }
150 writel(doorbell.word0, q->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400151
152 return 0;
153}
154
155/**
156 * lpfc_sli4_wq_release - Updates internal hba index for WQ
157 * @q: The Work Queue to operate on.
158 * @index: The index to advance the hba index to.
159 *
160 * This routine will update the HBA index of a queue to reflect consumption of
161 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
162 * an entry the host calls this function to update the queue's internal
163 * pointers. This routine returns the number of entries that were consumed by
164 * the HBA.
165 **/
166static uint32_t
167lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
168{
169 uint32_t released = 0;
170
James Smart2e90f4b2011-12-13 13:22:37 -0500171 /* sanity check on queue memory */
172 if (unlikely(!q))
173 return 0;
174
James Smart4f774512009-05-22 14:52:35 -0400175 if (q->hba_index == index)
176 return 0;
177 do {
178 q->hba_index = ((q->hba_index + 1) % q->entry_count);
179 released++;
180 } while (q->hba_index != index);
181 return released;
182}
183
184/**
185 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
186 * @q: The Mailbox Queue to operate on.
187 * @wqe: The Mailbox Queue Entry to put on the Work queue.
188 *
189 * This routine will copy the contents of @mqe to the next available entry on
190 * the @q. This function will then ring the Work Queue Doorbell to signal the
191 * HBA to start processing the Work Queue Entry. This function returns 0 if
192 * successful. If no entries are available on @q then this function will return
193 * -ENOMEM.
194 * The caller is expected to hold the hbalock when calling this routine.
195 **/
196static uint32_t
197lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
198{
James Smart2e90f4b2011-12-13 13:22:37 -0500199 struct lpfc_mqe *temp_mqe;
James Smart4f774512009-05-22 14:52:35 -0400200 struct lpfc_register doorbell;
James Smart4f774512009-05-22 14:52:35 -0400201
James Smart2e90f4b2011-12-13 13:22:37 -0500202 /* sanity check on queue memory */
203 if (unlikely(!q))
204 return -ENOMEM;
205 temp_mqe = q->qe[q->host_index].mqe;
206
James Smart4f774512009-05-22 14:52:35 -0400207 /* If the host has not yet processed the next entry then we are done */
208 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
209 return -ENOMEM;
210 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
211 /* Save off the mailbox pointer for completion */
212 q->phba->mbox = (MAILBOX_t *)temp_mqe;
213
214 /* Update the host index before invoking device */
James Smart4f774512009-05-22 14:52:35 -0400215 q->host_index = ((q->host_index + 1) % q->entry_count);
216
217 /* Ring Doorbell */
218 doorbell.word0 = 0;
219 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
220 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
221 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400222 return 0;
223}
224
225/**
226 * lpfc_sli4_mq_release - Updates internal hba index for MQ
227 * @q: The Mailbox Queue to operate on.
228 *
229 * This routine will update the HBA index of a queue to reflect consumption of
230 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
231 * an entry the host calls this function to update the queue's internal
232 * pointers. This routine returns the number of entries that were consumed by
233 * the HBA.
234 **/
235static uint32_t
236lpfc_sli4_mq_release(struct lpfc_queue *q)
237{
James Smart2e90f4b2011-12-13 13:22:37 -0500238 /* sanity check on queue memory */
239 if (unlikely(!q))
240 return 0;
241
James Smart4f774512009-05-22 14:52:35 -0400242 /* Clear the mailbox pointer for completion */
243 q->phba->mbox = NULL;
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
245 return 1;
246}
247
248/**
249 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
250 * @q: The Event Queue to get the first valid EQE from
251 *
252 * This routine will get the first valid Event Queue Entry from @q, update
253 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
254 * the Queue (no more work to do), or the Queue is full of EQEs that have been
255 * processed, but not popped back to the HBA then this routine will return NULL.
256 **/
257static struct lpfc_eqe *
258lpfc_sli4_eq_get(struct lpfc_queue *q)
259{
James Smart2e90f4b2011-12-13 13:22:37 -0500260 struct lpfc_eqe *eqe;
James Smart027140e2012-08-03 12:35:44 -0400261 uint32_t idx;
James Smart2e90f4b2011-12-13 13:22:37 -0500262
263 /* sanity check on queue memory */
264 if (unlikely(!q))
265 return NULL;
266 eqe = q->qe[q->hba_index].eqe;
James Smart4f774512009-05-22 14:52:35 -0400267
268 /* If the next EQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400269 if (!bf_get_le32(lpfc_eqe_valid, eqe))
James Smart4f774512009-05-22 14:52:35 -0400270 return NULL;
271 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400272 idx = ((q->hba_index + 1) % q->entry_count);
273 if (idx == q->host_index)
James Smart4f774512009-05-22 14:52:35 -0400274 return NULL;
275
James Smart027140e2012-08-03 12:35:44 -0400276 q->hba_index = idx;
James Smart27f344e2014-05-07 17:16:46 -0400277
278 /*
279 * insert barrier for instruction interlock : data from the hardware
280 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800281 * upon. Speculative instructions were allowing a bcopy at the start
282 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
283 * after our return, to copy data before the valid bit check above
284 * was done. As such, some of the copied data was stale. The barrier
285 * ensures the check is before any data is copied.
James Smart27f344e2014-05-07 17:16:46 -0400286 */
287 mb();
James Smart4f774512009-05-22 14:52:35 -0400288 return eqe;
289}
290
291/**
James Smartba20c852012-08-03 12:36:52 -0400292 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
293 * @q: The Event Queue to disable interrupts
294 *
295 **/
296static inline void
297lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
298{
299 struct lpfc_register doorbell;
300
301 doorbell.word0 = 0;
302 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
303 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
304 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
305 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
306 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
307 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
308}
309
310/**
James Smart4f774512009-05-22 14:52:35 -0400311 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
312 * @q: The Event Queue that the host has completed processing for.
313 * @arm: Indicates whether the host wants to arms this CQ.
314 *
315 * This routine will mark all Event Queue Entries on @q, from the last
316 * known completed entry to the last entry that was processed, as completed
317 * by clearing the valid bit for each completion queue entry. Then it will
318 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
319 * The internal host index in the @q will be updated by this routine to indicate
320 * that the host has finished processing the entries. The @arm parameter
321 * indicates that the queue should be rearmed when ringing the doorbell.
322 *
323 * This function will return the number of EQEs that were popped.
324 **/
325uint32_t
326lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
327{
328 uint32_t released = 0;
329 struct lpfc_eqe *temp_eqe;
330 struct lpfc_register doorbell;
331
James Smart2e90f4b2011-12-13 13:22:37 -0500332 /* sanity check on queue memory */
333 if (unlikely(!q))
334 return 0;
335
James Smart4f774512009-05-22 14:52:35 -0400336 /* while there are valid entries */
337 while (q->hba_index != q->host_index) {
338 temp_eqe = q->qe[q->host_index].eqe;
James Smartcb5172e2010-03-15 11:25:07 -0400339 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
James Smart4f774512009-05-22 14:52:35 -0400340 released++;
341 q->host_index = ((q->host_index + 1) % q->entry_count);
342 }
343 if (unlikely(released == 0 && !arm))
344 return 0;
345
346 /* ring doorbell for number popped */
347 doorbell.word0 = 0;
348 if (arm) {
349 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
350 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
351 }
352 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
353 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
James Smart6b5151f2012-01-18 16:24:06 -0500354 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
355 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
356 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400357 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
James Smarta747c9c2009-11-18 15:41:10 -0500358 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
359 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
360 readl(q->phba->sli4_hba.EQCQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400361 return released;
362}
363
364/**
365 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
366 * @q: The Completion Queue to get the first valid CQE from
367 *
368 * This routine will get the first valid Completion Queue Entry from @q, update
369 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
370 * the Queue (no more work to do), or the Queue is full of CQEs that have been
371 * processed, but not popped back to the HBA then this routine will return NULL.
372 **/
373static struct lpfc_cqe *
374lpfc_sli4_cq_get(struct lpfc_queue *q)
375{
376 struct lpfc_cqe *cqe;
James Smart027140e2012-08-03 12:35:44 -0400377 uint32_t idx;
James Smart4f774512009-05-22 14:52:35 -0400378
James Smart2e90f4b2011-12-13 13:22:37 -0500379 /* sanity check on queue memory */
380 if (unlikely(!q))
381 return NULL;
382
James Smart4f774512009-05-22 14:52:35 -0400383 /* If the next CQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400384 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
James Smart4f774512009-05-22 14:52:35 -0400385 return NULL;
386 /* If the host has not yet processed the next entry then we are done */
James Smart027140e2012-08-03 12:35:44 -0400387 idx = ((q->hba_index + 1) % q->entry_count);
388 if (idx == q->host_index)
James Smart4f774512009-05-22 14:52:35 -0400389 return NULL;
390
391 cqe = q->qe[q->hba_index].cqe;
James Smart027140e2012-08-03 12:35:44 -0400392 q->hba_index = idx;
James Smart27f344e2014-05-07 17:16:46 -0400393
394 /*
395 * insert barrier for instruction interlock : data from the hardware
396 * must have the valid bit checked before it can be copied and acted
James Smart2ea259e2017-02-12 13:52:27 -0800397 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
398 * instructions allowing action on content before valid bit checked,
399 * add barrier here as well. May not be needed as "content" is a
400 * single 32-bit entity here (vs multi word structure for cq's).
James Smart27f344e2014-05-07 17:16:46 -0400401 */
402 mb();
James Smart4f774512009-05-22 14:52:35 -0400403 return cqe;
404}
405
406/**
407 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
408 * @q: The Completion Queue that the host has completed processing for.
409 * @arm: Indicates whether the host wants to arms this CQ.
410 *
411 * This routine will mark all Completion queue entries on @q, from the last
412 * known completed entry to the last entry that was processed, as completed
413 * by clearing the valid bit for each completion queue entry. Then it will
414 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
415 * The internal host index in the @q will be updated by this routine to indicate
416 * that the host has finished processing the entries. The @arm parameter
417 * indicates that the queue should be rearmed when ringing the doorbell.
418 *
419 * This function will return the number of CQEs that were released.
420 **/
421uint32_t
422lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
423{
424 uint32_t released = 0;
425 struct lpfc_cqe *temp_qe;
426 struct lpfc_register doorbell;
427
James Smart2e90f4b2011-12-13 13:22:37 -0500428 /* sanity check on queue memory */
429 if (unlikely(!q))
430 return 0;
James Smart4f774512009-05-22 14:52:35 -0400431 /* while there are valid entries */
432 while (q->hba_index != q->host_index) {
433 temp_qe = q->qe[q->host_index].cqe;
James Smartcb5172e2010-03-15 11:25:07 -0400434 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
James Smart4f774512009-05-22 14:52:35 -0400435 released++;
436 q->host_index = ((q->host_index + 1) % q->entry_count);
437 }
438 if (unlikely(released == 0 && !arm))
439 return 0;
440
441 /* ring doorbell for number popped */
442 doorbell.word0 = 0;
443 if (arm)
444 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
James Smart6b5151f2012-01-18 16:24:06 -0500447 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
448 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
449 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400450 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
451 return released;
452}
453
454/**
455 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
456 * @q: The Header Receive Queue to operate on.
457 * @wqe: The Receive Queue Entry to put on the Receive queue.
458 *
459 * This routine will copy the contents of @wqe to the next available entry on
460 * the @q. This function will then ring the Receive Queue Doorbell to signal the
461 * HBA to start processing the Receive Queue Entry. This function returns the
462 * index that the rqe was copied to if successful. If no entries are available
463 * on @q then this function will return -ENOMEM.
464 * The caller is expected to hold the hbalock when calling this routine.
465 **/
James Smart895427b2017-02-12 13:52:30 -0800466int
James Smart4f774512009-05-22 14:52:35 -0400467lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
468 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
469{
James Smart2e90f4b2011-12-13 13:22:37 -0500470 struct lpfc_rqe *temp_hrqe;
471 struct lpfc_rqe *temp_drqe;
James Smart4f774512009-05-22 14:52:35 -0400472 struct lpfc_register doorbell;
Wei Yongjun5a25bf32012-12-02 08:33:24 -0500473 int put_index;
James Smart4f774512009-05-22 14:52:35 -0400474
James Smart2e90f4b2011-12-13 13:22:37 -0500475 /* sanity check on queue memory */
476 if (unlikely(!hq) || unlikely(!dq))
477 return -ENOMEM;
Wei Yongjun5a25bf32012-12-02 08:33:24 -0500478 put_index = hq->host_index;
James Smart2e90f4b2011-12-13 13:22:37 -0500479 temp_hrqe = hq->qe[hq->host_index].rqe;
480 temp_drqe = dq->qe[dq->host_index].rqe;
481
James Smart4f774512009-05-22 14:52:35 -0400482 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
483 return -EINVAL;
484 if (hq->host_index != dq->host_index)
485 return -EINVAL;
486 /* If the host has not yet processed the next entry then we are done */
487 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
488 return -EBUSY;
489 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
490 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
491
492 /* Update the host index to point to the next slot */
493 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
494 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
495
496 /* Ring The Header Receive Queue Doorbell */
James Smart73d91e52011-10-10 21:32:10 -0400497 if (!(hq->host_index % hq->entry_repost)) {
James Smart4f774512009-05-22 14:52:35 -0400498 doorbell.word0 = 0;
James Smart962bc512013-01-03 15:44:00 -0500499 if (hq->db_format == LPFC_DB_RING_FORMAT) {
500 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
501 hq->entry_repost);
502 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
503 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
504 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
505 hq->entry_repost);
506 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
507 hq->host_index);
508 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
509 } else {
510 return -EINVAL;
511 }
512 writel(doorbell.word0, hq->db_regaddr);
James Smart4f774512009-05-22 14:52:35 -0400513 }
514 return put_index;
515}
516
517/**
518 * lpfc_sli4_rq_release - Updates internal hba index for RQ
519 * @q: The Header Receive Queue to operate on.
520 *
521 * This routine will update the HBA index of a queue to reflect consumption of
522 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
523 * consumed an entry the host calls this function to update the queue's
524 * internal pointers. This routine returns the number of entries that were
525 * consumed by the HBA.
526 **/
527static uint32_t
528lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
529{
James Smart2e90f4b2011-12-13 13:22:37 -0500530 /* sanity check on queue memory */
531 if (unlikely(!hq) || unlikely(!dq))
532 return 0;
533
James Smart4f774512009-05-22 14:52:35 -0400534 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
535 return 0;
536 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
537 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
538 return 1;
539}
540
James Smarte59058c2008-08-24 21:49:00 -0400541/**
James Smart3621a712009-04-06 18:47:14 -0400542 * lpfc_cmd_iocb - Get next command iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400543 * @phba: Pointer to HBA context object.
544 * @pring: Pointer to driver SLI ring object.
545 *
546 * This function returns pointer to next command iocb entry
547 * in the command ring. The caller must hold hbalock to prevent
548 * other threads consume the next command iocb.
549 * SLI-2/SLI-3 provide different sized iocbs.
550 **/
James Smarted957682007-06-17 19:56:37 -0500551static inline IOCB_t *
552lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
553{
James Smart7e56aa22012-08-03 12:35:34 -0400554 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
555 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
James Smarted957682007-06-17 19:56:37 -0500556}
557
James Smarte59058c2008-08-24 21:49:00 -0400558/**
James Smart3621a712009-04-06 18:47:14 -0400559 * lpfc_resp_iocb - Get next response iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400560 * @phba: Pointer to HBA context object.
561 * @pring: Pointer to driver SLI ring object.
562 *
563 * This function returns pointer to next response iocb entry
564 * in the response ring. The caller must hold hbalock to make sure
565 * that no other thread consume the next response iocb.
566 * SLI-2/SLI-3 provide different sized iocbs.
567 **/
James Smarted957682007-06-17 19:56:37 -0500568static inline IOCB_t *
569lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
570{
James Smart7e56aa22012-08-03 12:35:34 -0400571 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
572 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
James Smarted957682007-06-17 19:56:37 -0500573}
574
James Smarte59058c2008-08-24 21:49:00 -0400575/**
James Smart3621a712009-04-06 18:47:14 -0400576 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400577 * @phba: Pointer to HBA context object.
578 *
579 * This function is called with hbalock held. This function
580 * allocates a new driver iocb object from the iocb pool. If the
581 * allocation is successful, it returns pointer to the newly
582 * allocated iocb object else it returns NULL.
583 **/
James Smart4f2e66c2012-05-09 21:17:07 -0400584struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -0500585__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400586{
587 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
588 struct lpfc_iocbq * iocbq = NULL;
589
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100590 lockdep_assert_held(&phba->hbalock);
591
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400592 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400593 if (iocbq)
594 phba->iocb_cnt++;
595 if (phba->iocb_cnt > phba->iocb_max)
596 phba->iocb_max = phba->iocb_cnt;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400597 return iocbq;
598}
599
James Smarte59058c2008-08-24 21:49:00 -0400600/**
James Smartda0436e2009-05-22 14:51:39 -0400601 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
602 * @phba: Pointer to HBA context object.
603 * @xritag: XRI value.
604 *
605 * This function clears the sglq pointer from the array of acive
606 * sglq's. The xritag that is passed in is used to index into the
607 * array. Before the xritag can be used it needs to be adjusted
608 * by subtracting the xribase.
609 *
610 * Returns sglq ponter = success, NULL = Failure.
611 **/
James Smart895427b2017-02-12 13:52:30 -0800612struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400613__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
614{
James Smartda0436e2009-05-22 14:51:39 -0400615 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400616
617 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
618 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
James Smartda0436e2009-05-22 14:51:39 -0400619 return sglq;
620}
621
622/**
623 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
624 * @phba: Pointer to HBA context object.
625 * @xritag: XRI value.
626 *
627 * This function returns the sglq pointer from the array of acive
628 * sglq's. The xritag that is passed in is used to index into the
629 * array. Before the xritag can be used it needs to be adjusted
630 * by subtracting the xribase.
631 *
632 * Returns sglq ponter = success, NULL = Failure.
633 **/
James Smart0f65ff62010-02-26 14:14:23 -0500634struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400635__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
636{
James Smartda0436e2009-05-22 14:51:39 -0400637 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400638
639 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
James Smartda0436e2009-05-22 14:51:39 -0400640 return sglq;
641}
642
643/**
James Smart1151e3e2011-02-16 12:39:35 -0500644 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500645 * @phba: Pointer to HBA context object.
646 * @xritag: xri used in this exchange.
647 * @rrq: The RRQ to be cleared.
648 *
James Smart19ca7602010-11-20 23:11:55 -0500649 **/
James Smart1151e3e2011-02-16 12:39:35 -0500650void
651lpfc_clr_rrq_active(struct lpfc_hba *phba,
652 uint16_t xritag,
653 struct lpfc_node_rrq *rrq)
James Smart19ca7602010-11-20 23:11:55 -0500654{
James Smart1151e3e2011-02-16 12:39:35 -0500655 struct lpfc_nodelist *ndlp = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500656
James Smart1151e3e2011-02-16 12:39:35 -0500657 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
658 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
James Smart19ca7602010-11-20 23:11:55 -0500659
660 /* The target DID could have been swapped (cable swap)
661 * we should use the ndlp from the findnode if it is
662 * available.
663 */
James Smart1151e3e2011-02-16 12:39:35 -0500664 if ((!ndlp) && rrq->ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500665 ndlp = rrq->ndlp;
666
James Smart1151e3e2011-02-16 12:39:35 -0500667 if (!ndlp)
668 goto out;
669
James Smartcff261f2013-12-17 20:29:47 -0500670 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500671 rrq->send_rrq = 0;
672 rrq->xritag = 0;
673 rrq->rrq_stop_time = 0;
674 }
James Smart1151e3e2011-02-16 12:39:35 -0500675out:
James Smart19ca7602010-11-20 23:11:55 -0500676 mempool_free(rrq, phba->rrq_pool);
677}
678
679/**
680 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
681 * @phba: Pointer to HBA context object.
682 *
683 * This function is called with hbalock held. This function
684 * Checks if stop_time (ratov from setting rrq active) has
685 * been reached, if it has and the send_rrq flag is set then
686 * it will call lpfc_send_rrq. If the send_rrq flag is not set
687 * then it will just call the routine to clear the rrq and
688 * free the rrq resource.
689 * The timer is set to the next rrq that is going to expire before
690 * leaving the routine.
691 *
692 **/
693void
694lpfc_handle_rrq_active(struct lpfc_hba *phba)
695{
696 struct lpfc_node_rrq *rrq;
697 struct lpfc_node_rrq *nextrrq;
698 unsigned long next_time;
699 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500700 LIST_HEAD(send_rrq);
James Smart19ca7602010-11-20 23:11:55 -0500701
702 spin_lock_irqsave(&phba->hbalock, iflags);
703 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
James Smart256ec0d2013-04-17 20:14:58 -0400704 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smart19ca7602010-11-20 23:11:55 -0500705 list_for_each_entry_safe(rrq, nextrrq,
James Smart1151e3e2011-02-16 12:39:35 -0500706 &phba->active_rrq_list, list) {
707 if (time_after(jiffies, rrq->rrq_stop_time))
708 list_move(&rrq->list, &send_rrq);
709 else if (time_before(rrq->rrq_stop_time, next_time))
James Smart19ca7602010-11-20 23:11:55 -0500710 next_time = rrq->rrq_stop_time;
711 }
712 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart06918ac2014-02-20 09:57:57 -0500713 if ((!list_empty(&phba->active_rrq_list)) &&
714 (!(phba->pport->load_flag & FC_UNLOADING)))
James Smart19ca7602010-11-20 23:11:55 -0500715 mod_timer(&phba->rrq_tmr, next_time);
James Smart1151e3e2011-02-16 12:39:35 -0500716 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
717 list_del(&rrq->list);
718 if (!rrq->send_rrq)
719 /* this call will free the rrq */
720 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
721 else if (lpfc_send_rrq(phba, rrq)) {
722 /* if we send the rrq then the completion handler
723 * will clear the bit in the xribitmap.
724 */
725 lpfc_clr_rrq_active(phba, rrq->xritag,
726 rrq);
727 }
728 }
James Smart19ca7602010-11-20 23:11:55 -0500729}
730
731/**
732 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
733 * @vport: Pointer to vport context object.
734 * @xri: The xri used in the exchange.
735 * @did: The targets DID for this exchange.
736 *
737 * returns NULL = rrq not found in the phba->active_rrq_list.
738 * rrq = rrq for this xri and target.
739 **/
740struct lpfc_node_rrq *
741lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
742{
743 struct lpfc_hba *phba = vport->phba;
744 struct lpfc_node_rrq *rrq;
745 struct lpfc_node_rrq *nextrrq;
746 unsigned long iflags;
747
748 if (phba->sli_rev != LPFC_SLI_REV4)
749 return NULL;
750 spin_lock_irqsave(&phba->hbalock, iflags);
751 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
752 if (rrq->vport == vport && rrq->xritag == xri &&
753 rrq->nlp_DID == did){
754 list_del(&rrq->list);
755 spin_unlock_irqrestore(&phba->hbalock, iflags);
756 return rrq;
757 }
758 }
759 spin_unlock_irqrestore(&phba->hbalock, iflags);
760 return NULL;
761}
762
763/**
764 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
765 * @vport: Pointer to vport context object.
James Smart1151e3e2011-02-16 12:39:35 -0500766 * @ndlp: Pointer to the lpfc_node_list structure.
767 * If ndlp is NULL Remove all active RRQs for this vport from the
768 * phba->active_rrq_list and clear the rrq.
769 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
James Smart19ca7602010-11-20 23:11:55 -0500770 **/
771void
James Smart1151e3e2011-02-16 12:39:35 -0500772lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500773
774{
775 struct lpfc_hba *phba = vport->phba;
776 struct lpfc_node_rrq *rrq;
777 struct lpfc_node_rrq *nextrrq;
778 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500779 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500780
781 if (phba->sli_rev != LPFC_SLI_REV4)
782 return;
James Smart1151e3e2011-02-16 12:39:35 -0500783 if (!ndlp) {
784 lpfc_sli4_vport_delete_els_xri_aborted(vport);
785 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
James Smart19ca7602010-11-20 23:11:55 -0500786 }
James Smart1151e3e2011-02-16 12:39:35 -0500787 spin_lock_irqsave(&phba->hbalock, iflags);
788 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
789 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
790 list_move(&rrq->list, &rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500791 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -0500792
793 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
794 list_del(&rrq->list);
795 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
796 }
James Smart19ca7602010-11-20 23:11:55 -0500797}
798
799/**
James Smart1151e3e2011-02-16 12:39:35 -0500800 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500801 * @phba: Pointer to HBA context object.
802 * @ndlp: Targets nodelist pointer for this exchange.
803 * @xritag the xri in the bitmap to test.
804 *
805 * This function is called with hbalock held. This function
806 * returns 0 = rrq not active for this xri
807 * 1 = rrq is valid for this xri.
808 **/
James Smart1151e3e2011-02-16 12:39:35 -0500809int
810lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smart19ca7602010-11-20 23:11:55 -0500811 uint16_t xritag)
812{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100813 lockdep_assert_held(&phba->hbalock);
James Smart19ca7602010-11-20 23:11:55 -0500814 if (!ndlp)
815 return 0;
James Smartcff261f2013-12-17 20:29:47 -0500816 if (!ndlp->active_rrqs_xri_bitmap)
817 return 0;
818 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smart19ca7602010-11-20 23:11:55 -0500819 return 1;
820 else
821 return 0;
822}
823
824/**
825 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
826 * @phba: Pointer to HBA context object.
827 * @ndlp: nodelist pointer for this target.
828 * @xritag: xri used in this exchange.
829 * @rxid: Remote Exchange ID.
830 * @send_rrq: Flag used to determine if we should send rrq els cmd.
831 *
832 * This function takes the hbalock.
833 * The active bit is always set in the active rrq xri_bitmap even
834 * if there is no slot avaiable for the other rrq information.
835 *
836 * returns 0 rrq actived for this xri
837 * < 0 No memory or invalid ndlp.
838 **/
839int
840lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smartb42c07c2012-01-18 16:25:55 -0500841 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
James Smart19ca7602010-11-20 23:11:55 -0500842{
James Smart19ca7602010-11-20 23:11:55 -0500843 unsigned long iflags;
James Smartb42c07c2012-01-18 16:25:55 -0500844 struct lpfc_node_rrq *rrq;
845 int empty;
846
847 if (!ndlp)
848 return -EINVAL;
849
850 if (!phba->cfg_enable_rrq)
851 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -0500852
853 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -0500854 if (phba->pport->load_flag & FC_UNLOADING) {
855 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
856 goto out;
857 }
858
859 /*
860 * set the active bit even if there is no mem available.
861 */
862 if (NLP_CHK_FREE_REQ(ndlp))
863 goto out;
864
865 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
866 goto out;
867
James Smartcff261f2013-12-17 20:29:47 -0500868 if (!ndlp->active_rrqs_xri_bitmap)
869 goto out;
870
871 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
James Smartb42c07c2012-01-18 16:25:55 -0500872 goto out;
873
James Smart19ca7602010-11-20 23:11:55 -0500874 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smartb42c07c2012-01-18 16:25:55 -0500875 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
876 if (!rrq) {
877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
878 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
879 " DID:0x%x Send:%d\n",
880 xritag, rxid, ndlp->nlp_DID, send_rrq);
881 return -EINVAL;
882 }
James Smarte5771b42013-03-01 16:37:14 -0500883 if (phba->cfg_enable_rrq == 1)
884 rrq->send_rrq = send_rrq;
885 else
886 rrq->send_rrq = 0;
James Smartb42c07c2012-01-18 16:25:55 -0500887 rrq->xritag = xritag;
James Smart256ec0d2013-04-17 20:14:58 -0400888 rrq->rrq_stop_time = jiffies +
889 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
James Smartb42c07c2012-01-18 16:25:55 -0500890 rrq->ndlp = ndlp;
891 rrq->nlp_DID = ndlp->nlp_DID;
892 rrq->vport = ndlp->vport;
893 rrq->rxid = rxid;
James Smartb42c07c2012-01-18 16:25:55 -0500894 spin_lock_irqsave(&phba->hbalock, iflags);
895 empty = list_empty(&phba->active_rrq_list);
896 list_add_tail(&rrq->list, &phba->active_rrq_list);
897 phba->hba_flag |= HBA_RRQ_ACTIVE;
898 if (empty)
899 lpfc_worker_wake_up(phba);
900 spin_unlock_irqrestore(&phba->hbalock, iflags);
901 return 0;
902out:
903 spin_unlock_irqrestore(&phba->hbalock, iflags);
904 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
905 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
906 " DID:0x%x Send:%d\n",
907 xritag, rxid, ndlp->nlp_DID, send_rrq);
908 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -0500909}
910
911/**
James Smart895427b2017-02-12 13:52:30 -0800912 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
James Smartda0436e2009-05-22 14:51:39 -0400913 * @phba: Pointer to HBA context object.
James Smart19ca7602010-11-20 23:11:55 -0500914 * @piocb: Pointer to the iocbq.
James Smartda0436e2009-05-22 14:51:39 -0400915 *
James Smartdafe8ce2014-09-03 12:56:40 -0400916 * This function is called with the ring lock held. This function
James Smart6d368e52011-05-24 11:44:12 -0400917 * gets a new driver sglq object from the sglq list. If the
James Smartda0436e2009-05-22 14:51:39 -0400918 * list is not empty then it is successful, it returns pointer to the newly
919 * allocated sglq object else it returns NULL.
920 **/
921static struct lpfc_sglq *
James Smart895427b2017-02-12 13:52:30 -0800922__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
James Smartda0436e2009-05-22 14:51:39 -0400923{
James Smart895427b2017-02-12 13:52:30 -0800924 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
James Smartda0436e2009-05-22 14:51:39 -0400925 struct lpfc_sglq *sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500926 struct lpfc_sglq *start_sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500927 struct lpfc_scsi_buf *lpfc_cmd;
928 struct lpfc_nodelist *ndlp;
929 int found = 0;
930
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +0100931 lockdep_assert_held(&phba->hbalock);
932
James Smart19ca7602010-11-20 23:11:55 -0500933 if (piocbq->iocb_flag & LPFC_IO_FCP) {
934 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
935 ndlp = lpfc_cmd->rdata->pnode;
James Smartbe858b62010-12-15 17:57:20 -0500936 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
James Smart6c7cf482015-04-07 15:07:25 -0400937 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
James Smart19ca7602010-11-20 23:11:55 -0500938 ndlp = piocbq->context_un.ndlp;
James Smart6c7cf482015-04-07 15:07:25 -0400939 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
940 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
941 ndlp = NULL;
942 else
943 ndlp = piocbq->context_un.ndlp;
944 } else {
James Smart19ca7602010-11-20 23:11:55 -0500945 ndlp = piocbq->context1;
James Smart6c7cf482015-04-07 15:07:25 -0400946 }
James Smart19ca7602010-11-20 23:11:55 -0500947
James Smart895427b2017-02-12 13:52:30 -0800948 spin_lock(&phba->sli4_hba.sgl_list_lock);
949 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
James Smart19ca7602010-11-20 23:11:55 -0500950 start_sglq = sglq;
951 while (!found) {
952 if (!sglq)
953 return NULL;
James Smart895427b2017-02-12 13:52:30 -0800954 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
955 test_bit(sglq->sli4_lxritag,
956 ndlp->active_rrqs_xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500957 /* This xri has an rrq outstanding for this DID.
958 * put it back in the list and get another xri.
959 */
James Smart895427b2017-02-12 13:52:30 -0800960 list_add_tail(&sglq->list, lpfc_els_sgl_list);
James Smart19ca7602010-11-20 23:11:55 -0500961 sglq = NULL;
James Smart895427b2017-02-12 13:52:30 -0800962 list_remove_head(lpfc_els_sgl_list, sglq,
James Smart19ca7602010-11-20 23:11:55 -0500963 struct lpfc_sglq, list);
964 if (sglq == start_sglq) {
965 sglq = NULL;
966 break;
967 } else
968 continue;
969 }
970 sglq->ndlp = ndlp;
971 found = 1;
James Smart6d368e52011-05-24 11:44:12 -0400972 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
James Smart19ca7602010-11-20 23:11:55 -0500973 sglq->state = SGL_ALLOCATED;
974 }
James Smart895427b2017-02-12 13:52:30 -0800975 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smartda0436e2009-05-22 14:51:39 -0400976 return sglq;
977}
978
979/**
James Smartf358dd02017-02-12 13:52:34 -0800980 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
981 * @phba: Pointer to HBA context object.
982 * @piocb: Pointer to the iocbq.
983 *
984 * This function is called with the sgl_list lock held. This function
985 * gets a new driver sglq object from the sglq list. If the
986 * list is not empty then it is successful, it returns pointer to the newly
987 * allocated sglq object else it returns NULL.
988 **/
989struct lpfc_sglq *
990__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
991{
992 struct list_head *lpfc_nvmet_sgl_list;
993 struct lpfc_sglq *sglq = NULL;
994
995 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
996
997 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
998
999 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1000 if (!sglq)
1001 return NULL;
1002 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1003 sglq->state = SGL_ALLOCATED;
1004 return sglq;
1005}
1006
1007/**
James Smart3621a712009-04-06 18:47:14 -04001008 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001009 * @phba: Pointer to HBA context object.
1010 *
1011 * This function is called with no lock held. This function
1012 * allocates a new driver iocb object from the iocb pool. If the
1013 * allocation is successful, it returns pointer to the newly
1014 * allocated iocb object else it returns NULL.
1015 **/
James Smart2e0fef82007-06-17 19:56:36 -05001016struct lpfc_iocbq *
1017lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James Bottomley604a3e32005-10-29 10:28:33 -05001018{
James Smart2e0fef82007-06-17 19:56:36 -05001019 struct lpfc_iocbq * iocbq = NULL;
1020 unsigned long iflags;
1021
1022 spin_lock_irqsave(&phba->hbalock, iflags);
1023 iocbq = __lpfc_sli_get_iocbq(phba);
1024 spin_unlock_irqrestore(&phba->hbalock, iflags);
1025 return iocbq;
1026}
1027
James Smarte59058c2008-08-24 21:49:00 -04001028/**
James Smart4f774512009-05-22 14:52:35 -04001029 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1030 * @phba: Pointer to HBA context object.
1031 * @iocbq: Pointer to driver iocb object.
1032 *
1033 * This function is called with hbalock held to release driver
1034 * iocb object to the iocb pool. The iotag in the iocb object
1035 * does not change for each use of the iocb object. This function
1036 * clears all other fields of the iocb object when it is freed.
1037 * The sqlq structure that holds the xritag and phys and virtual
1038 * mappings for the scatter gather list is retrieved from the
1039 * active array of sglq. The get of the sglq pointer also clears
1040 * the entry in the array. If the status of the IO indiactes that
1041 * this IO was aborted then the sglq entry it put on the
1042 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1043 * IO has good status or fails for any other reason then the sglq
James Smart895427b2017-02-12 13:52:30 -08001044 * entry is added to the free list (lpfc_els_sgl_list).
James Smart4f774512009-05-22 14:52:35 -04001045 **/
1046static void
1047__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1048{
1049 struct lpfc_sglq *sglq;
1050 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04001051 unsigned long iflag = 0;
James Smart895427b2017-02-12 13:52:30 -08001052 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04001053
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001054 lockdep_assert_held(&phba->hbalock);
1055
James Smart4f774512009-05-22 14:52:35 -04001056 if (iocbq->sli4_xritag == NO_XRI)
1057 sglq = NULL;
1058 else
James Smart6d368e52011-05-24 11:44:12 -04001059 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1060
James Smart0e9bb8d2013-03-01 16:35:12 -05001061
James Smart4f774512009-05-22 14:52:35 -04001062 if (sglq) {
James Smartf358dd02017-02-12 13:52:34 -08001063 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1064 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1065 iflag);
1066 sglq->state = SGL_FREED;
1067 sglq->ndlp = NULL;
1068 list_add_tail(&sglq->list,
1069 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1070 spin_unlock_irqrestore(
1071 &phba->sli4_hba.sgl_list_lock, iflag);
1072 goto out;
1073 }
1074
James Smart895427b2017-02-12 13:52:30 -08001075 pring = phba->sli4_hba.els_wq->pring;
James Smart0f65ff62010-02-26 14:14:23 -05001076 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1077 (sglq->state != SGL_XRI_ABORTED)) {
James Smart895427b2017-02-12 13:52:30 -08001078 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1079 iflag);
James Smart4f774512009-05-22 14:52:35 -04001080 list_add(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001081 &phba->sli4_hba.lpfc_abts_els_sgl_list);
James Smart4f774512009-05-22 14:52:35 -04001082 spin_unlock_irqrestore(
James Smart895427b2017-02-12 13:52:30 -08001083 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart0f65ff62010-02-26 14:14:23 -05001084 } else {
James Smart895427b2017-02-12 13:52:30 -08001085 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1086 iflag);
James Smart0f65ff62010-02-26 14:14:23 -05001087 sglq->state = SGL_FREED;
James Smart19ca7602010-11-20 23:11:55 -05001088 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -05001089 list_add_tail(&sglq->list,
James Smart895427b2017-02-12 13:52:30 -08001090 &phba->sli4_hba.lpfc_els_sgl_list);
1091 spin_unlock_irqrestore(
1092 &phba->sli4_hba.sgl_list_lock, iflag);
James Smart2a9bf3d2010-06-07 15:24:45 -04001093
1094 /* Check if TXQ queue needs to be serviced */
James Smart0e9bb8d2013-03-01 16:35:12 -05001095 if (!list_empty(&pring->txq))
James Smart2a9bf3d2010-06-07 15:24:45 -04001096 lpfc_worker_wake_up(phba);
James Smart0f65ff62010-02-26 14:14:23 -05001097 }
James Smart4f774512009-05-22 14:52:35 -04001098 }
1099
James Smartf358dd02017-02-12 13:52:34 -08001100out:
James Smart4f774512009-05-22 14:52:35 -04001101 /*
1102 * Clean all volatile data fields, preserve iotag and node struct.
1103 */
1104 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
James Smart6d368e52011-05-24 11:44:12 -04001105 iocbq->sli4_lxritag = NO_XRI;
James Smart4f774512009-05-22 14:52:35 -04001106 iocbq->sli4_xritag = NO_XRI;
James Smartf358dd02017-02-12 13:52:34 -08001107 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1108 LPFC_IO_NVME_LS);
James Smart4f774512009-05-22 14:52:35 -04001109 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1110}
1111
James Smart2a9bf3d2010-06-07 15:24:45 -04001112
James Smart4f774512009-05-22 14:52:35 -04001113/**
James Smart3772a992009-05-22 14:50:54 -04001114 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1115 * @phba: Pointer to HBA context object.
1116 * @iocbq: Pointer to driver iocb object.
1117 *
1118 * This function is called with hbalock held to release driver
1119 * iocb object to the iocb pool. The iotag in the iocb object
1120 * does not change for each use of the iocb object. This function
1121 * clears all other fields of the iocb object when it is freed.
1122 **/
1123static void
1124__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1125{
1126 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1127
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001128 lockdep_assert_held(&phba->hbalock);
James Smart0e9bb8d2013-03-01 16:35:12 -05001129
1130 /*
James Smart3772a992009-05-22 14:50:54 -04001131 * Clean all volatile data fields, preserve iotag and node struct.
1132 */
1133 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1134 iocbq->sli4_xritag = NO_XRI;
1135 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1136}
1137
1138/**
James Smart3621a712009-04-06 18:47:14 -04001139 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001140 * @phba: Pointer to HBA context object.
1141 * @iocbq: Pointer to driver iocb object.
1142 *
1143 * This function is called with hbalock held to release driver
1144 * iocb object to the iocb pool. The iotag in the iocb object
1145 * does not change for each use of the iocb object. This function
1146 * clears all other fields of the iocb object when it is freed.
1147 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001148static void
James Smart2e0fef82007-06-17 19:56:36 -05001149__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1150{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001151 lockdep_assert_held(&phba->hbalock);
1152
James Smart3772a992009-05-22 14:50:54 -04001153 phba->__lpfc_sli_release_iocbq(phba, iocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001154 phba->iocb_cnt--;
James Bottomley604a3e32005-10-29 10:28:33 -05001155}
1156
James Smarte59058c2008-08-24 21:49:00 -04001157/**
James Smart3621a712009-04-06 18:47:14 -04001158 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001159 * @phba: Pointer to HBA context object.
1160 * @iocbq: Pointer to driver iocb object.
1161 *
1162 * This function is called with no lock held to release the iocb to
1163 * iocb pool.
1164 **/
James Smart2e0fef82007-06-17 19:56:36 -05001165void
1166lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1167{
1168 unsigned long iflags;
1169
1170 /*
1171 * Clean all volatile data fields, preserve iotag and node struct.
1172 */
1173 spin_lock_irqsave(&phba->hbalock, iflags);
1174 __lpfc_sli_release_iocbq(phba, iocbq);
1175 spin_unlock_irqrestore(&phba->hbalock, iflags);
1176}
1177
James Smarte59058c2008-08-24 21:49:00 -04001178/**
James Smarta257bf92009-04-06 18:48:10 -04001179 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1180 * @phba: Pointer to HBA context object.
1181 * @iocblist: List of IOCBs.
1182 * @ulpstatus: ULP status in IOCB command field.
1183 * @ulpWord4: ULP word-4 in IOCB command field.
1184 *
1185 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1186 * on the list by invoking the complete callback function associated with the
1187 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1188 * fields.
1189 **/
1190void
1191lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1192 uint32_t ulpstatus, uint32_t ulpWord4)
1193{
1194 struct lpfc_iocbq *piocb;
1195
1196 while (!list_empty(iocblist)) {
1197 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
James Smarta257bf92009-04-06 18:48:10 -04001198 if (!piocb->iocb_cmpl)
1199 lpfc_sli_release_iocbq(phba, piocb);
1200 else {
1201 piocb->iocb.ulpStatus = ulpstatus;
1202 piocb->iocb.un.ulpWord[4] = ulpWord4;
1203 (piocb->iocb_cmpl) (phba, piocb, piocb);
1204 }
1205 }
1206 return;
1207}
1208
1209/**
James Smart3621a712009-04-06 18:47:14 -04001210 * lpfc_sli_iocb_cmd_type - Get the iocb type
1211 * @iocb_cmnd: iocb command code.
James Smarte59058c2008-08-24 21:49:00 -04001212 *
1213 * This function is called by ring event handler function to get the iocb type.
1214 * This function translates the iocb command to an iocb command type used to
1215 * decide the final disposition of each completed IOCB.
1216 * The function returns
1217 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1218 * LPFC_SOL_IOCB if it is a solicited iocb completion
1219 * LPFC_ABORT_IOCB if it is an abort iocb
1220 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1221 *
1222 * The caller is not required to hold any lock.
1223 **/
dea31012005-04-17 16:05:31 -05001224static lpfc_iocb_type
1225lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1226{
1227 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1228
1229 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1230 return 0;
1231
1232 switch (iocb_cmnd) {
1233 case CMD_XMIT_SEQUENCE_CR:
1234 case CMD_XMIT_SEQUENCE_CX:
1235 case CMD_XMIT_BCAST_CN:
1236 case CMD_XMIT_BCAST_CX:
1237 case CMD_ELS_REQUEST_CR:
1238 case CMD_ELS_REQUEST_CX:
1239 case CMD_CREATE_XRI_CR:
1240 case CMD_CREATE_XRI_CX:
1241 case CMD_GET_RPI_CN:
1242 case CMD_XMIT_ELS_RSP_CX:
1243 case CMD_GET_RPI_CR:
1244 case CMD_FCP_IWRITE_CR:
1245 case CMD_FCP_IWRITE_CX:
1246 case CMD_FCP_IREAD_CR:
1247 case CMD_FCP_IREAD_CX:
1248 case CMD_FCP_ICMND_CR:
1249 case CMD_FCP_ICMND_CX:
James Smartf5603512006-12-02 13:35:43 -05001250 case CMD_FCP_TSEND_CX:
1251 case CMD_FCP_TRSP_CX:
1252 case CMD_FCP_TRECEIVE_CX:
1253 case CMD_FCP_AUTO_TRSP_CX:
dea31012005-04-17 16:05:31 -05001254 case CMD_ADAPTER_MSG:
1255 case CMD_ADAPTER_DUMP:
1256 case CMD_XMIT_SEQUENCE64_CR:
1257 case CMD_XMIT_SEQUENCE64_CX:
1258 case CMD_XMIT_BCAST64_CN:
1259 case CMD_XMIT_BCAST64_CX:
1260 case CMD_ELS_REQUEST64_CR:
1261 case CMD_ELS_REQUEST64_CX:
1262 case CMD_FCP_IWRITE64_CR:
1263 case CMD_FCP_IWRITE64_CX:
1264 case CMD_FCP_IREAD64_CR:
1265 case CMD_FCP_IREAD64_CX:
1266 case CMD_FCP_ICMND64_CR:
1267 case CMD_FCP_ICMND64_CX:
James Smartf5603512006-12-02 13:35:43 -05001268 case CMD_FCP_TSEND64_CX:
1269 case CMD_FCP_TRSP64_CX:
1270 case CMD_FCP_TRECEIVE64_CX:
dea31012005-04-17 16:05:31 -05001271 case CMD_GEN_REQUEST64_CR:
1272 case CMD_GEN_REQUEST64_CX:
1273 case CMD_XMIT_ELS_RSP64_CX:
James Smartda0436e2009-05-22 14:51:39 -04001274 case DSSCMD_IWRITE64_CR:
1275 case DSSCMD_IWRITE64_CX:
1276 case DSSCMD_IREAD64_CR:
1277 case DSSCMD_IREAD64_CX:
dea31012005-04-17 16:05:31 -05001278 type = LPFC_SOL_IOCB;
1279 break;
1280 case CMD_ABORT_XRI_CN:
1281 case CMD_ABORT_XRI_CX:
1282 case CMD_CLOSE_XRI_CN:
1283 case CMD_CLOSE_XRI_CX:
1284 case CMD_XRI_ABORTED_CX:
1285 case CMD_ABORT_MXRI64_CN:
James Smart6669f9b2009-10-02 15:16:45 -04001286 case CMD_XMIT_BLS_RSP64_CX:
dea31012005-04-17 16:05:31 -05001287 type = LPFC_ABORT_IOCB;
1288 break;
1289 case CMD_RCV_SEQUENCE_CX:
1290 case CMD_RCV_ELS_REQ_CX:
1291 case CMD_RCV_SEQUENCE64_CX:
1292 case CMD_RCV_ELS_REQ64_CX:
James Smart57127f12007-10-27 13:37:05 -04001293 case CMD_ASYNC_STATUS:
James Smarted957682007-06-17 19:56:37 -05001294 case CMD_IOCB_RCV_SEQ64_CX:
1295 case CMD_IOCB_RCV_ELS64_CX:
1296 case CMD_IOCB_RCV_CONT64_CX:
James Smart3163f722008-02-08 18:50:25 -05001297 case CMD_IOCB_RET_XRI64_CX:
dea31012005-04-17 16:05:31 -05001298 type = LPFC_UNSOL_IOCB;
1299 break;
James Smart3163f722008-02-08 18:50:25 -05001300 case CMD_IOCB_XMIT_MSEQ64_CR:
1301 case CMD_IOCB_XMIT_MSEQ64_CX:
1302 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1303 case CMD_IOCB_RCV_ELS_LIST64_CX:
1304 case CMD_IOCB_CLOSE_EXTENDED_CN:
1305 case CMD_IOCB_ABORT_EXTENDED_CN:
1306 case CMD_IOCB_RET_HBQE64_CN:
1307 case CMD_IOCB_FCP_IBIDIR64_CR:
1308 case CMD_IOCB_FCP_IBIDIR64_CX:
1309 case CMD_IOCB_FCP_ITASKMGT64_CX:
1310 case CMD_IOCB_LOGENTRY_CN:
1311 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1312 printk("%s - Unhandled SLI-3 Command x%x\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07001313 __func__, iocb_cmnd);
James Smart3163f722008-02-08 18:50:25 -05001314 type = LPFC_UNKNOWN_IOCB;
1315 break;
dea31012005-04-17 16:05:31 -05001316 default:
1317 type = LPFC_UNKNOWN_IOCB;
1318 break;
1319 }
1320
1321 return type;
1322}
1323
James Smarte59058c2008-08-24 21:49:00 -04001324/**
James Smart3621a712009-04-06 18:47:14 -04001325 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
James Smarte59058c2008-08-24 21:49:00 -04001326 * @phba: Pointer to HBA context object.
1327 *
1328 * This function is called from SLI initialization code
1329 * to configure every ring of the HBA's SLI interface. The
1330 * caller is not required to hold any lock. This function issues
1331 * a config_ring mailbox command for each ring.
1332 * This function returns zero if successful else returns a negative
1333 * error code.
1334 **/
dea31012005-04-17 16:05:31 -05001335static int
James Smarted957682007-06-17 19:56:37 -05001336lpfc_sli_ring_map(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001337{
1338 struct lpfc_sli *psli = &phba->sli;
James Smarted957682007-06-17 19:56:37 -05001339 LPFC_MBOXQ_t *pmb;
1340 MAILBOX_t *pmbox;
1341 int i, rc, ret = 0;
dea31012005-04-17 16:05:31 -05001342
James Smarted957682007-06-17 19:56:37 -05001343 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1344 if (!pmb)
1345 return -ENOMEM;
James Smart04c68492009-05-22 14:52:52 -04001346 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05001347 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05001348 for (i = 0; i < psli->num_rings; i++) {
dea31012005-04-17 16:05:31 -05001349 lpfc_config_ring(phba, i, pmb);
1350 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1351 if (rc != MBX_SUCCESS) {
James Smart92d7f7b2007-06-17 19:56:38 -05001352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04001353 "0446 Adapter failed to init (%d), "
dea31012005-04-17 16:05:31 -05001354 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1355 "ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001356 rc, pmbox->mbxCommand,
1357 pmbox->mbxStatus, i);
James Smart2e0fef82007-06-17 19:56:36 -05001358 phba->link_state = LPFC_HBA_ERROR;
James Smarted957682007-06-17 19:56:37 -05001359 ret = -ENXIO;
1360 break;
dea31012005-04-17 16:05:31 -05001361 }
1362 }
James Smarted957682007-06-17 19:56:37 -05001363 mempool_free(pmb, phba->mbox_mem_pool);
1364 return ret;
dea31012005-04-17 16:05:31 -05001365}
1366
James Smarte59058c2008-08-24 21:49:00 -04001367/**
James Smart3621a712009-04-06 18:47:14 -04001368 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
James Smarte59058c2008-08-24 21:49:00 -04001369 * @phba: Pointer to HBA context object.
1370 * @pring: Pointer to driver SLI ring object.
1371 * @piocb: Pointer to the driver iocb object.
1372 *
1373 * This function is called with hbalock held. The function adds the
1374 * new iocb to txcmplq of the given ring. This function always returns
1375 * 0. If this function is called for ELS ring, this function checks if
1376 * there is a vport associated with the ELS command. This function also
1377 * starts els_tmofunc timer if this is an ELS command.
1378 **/
dea31012005-04-17 16:05:31 -05001379static int
James Smart2e0fef82007-06-17 19:56:36 -05001380lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1381 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05001382{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001383 lockdep_assert_held(&phba->hbalock);
1384
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001385 BUG_ON(!piocb);
Johannes Thumshirn22466da2016-07-29 15:30:56 +02001386
dea31012005-04-17 16:05:31 -05001387 list_add_tail(&piocb->list, &pring->txcmplq);
James Smart4f2e66c2012-05-09 21:17:07 -04001388 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
James Smart2a9bf3d2010-06-07 15:24:45 -04001389
James Smart92d7f7b2007-06-17 19:56:38 -05001390 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1391 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
Mauricio Faria de Oliveira2319f842016-11-23 10:33:19 -02001392 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1393 BUG_ON(!piocb->vport);
1394 if (!(piocb->vport->load_flag & FC_UNLOADING))
1395 mod_timer(&piocb->vport->els_tmofunc,
1396 jiffies +
1397 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1398 }
dea31012005-04-17 16:05:31 -05001399
James Smart2e0fef82007-06-17 19:56:36 -05001400 return 0;
dea31012005-04-17 16:05:31 -05001401}
1402
James Smarte59058c2008-08-24 21:49:00 -04001403/**
James Smart3621a712009-04-06 18:47:14 -04001404 * lpfc_sli_ringtx_get - Get first element of the txq
James Smarte59058c2008-08-24 21:49:00 -04001405 * @phba: Pointer to HBA context object.
1406 * @pring: Pointer to driver SLI ring object.
1407 *
1408 * This function is called with hbalock held to get next
1409 * iocb in txq of the given ring. If there is any iocb in
1410 * the txq, the function returns first iocb in the list after
1411 * removing the iocb from the list, else it returns NULL.
1412 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04001413struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05001414lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001415{
dea31012005-04-17 16:05:31 -05001416 struct lpfc_iocbq *cmd_iocb;
1417
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001418 lockdep_assert_held(&phba->hbalock);
1419
James Smart858c9f62007-06-17 19:56:39 -05001420 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
James Smart2e0fef82007-06-17 19:56:36 -05001421 return cmd_iocb;
dea31012005-04-17 16:05:31 -05001422}
1423
James Smarte59058c2008-08-24 21:49:00 -04001424/**
James Smart3621a712009-04-06 18:47:14 -04001425 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
James Smarte59058c2008-08-24 21:49:00 -04001426 * @phba: Pointer to HBA context object.
1427 * @pring: Pointer to driver SLI ring object.
1428 *
1429 * This function is called with hbalock held and the caller must post the
1430 * iocb without releasing the lock. If the caller releases the lock,
1431 * iocb slot returned by the function is not guaranteed to be available.
1432 * The function returns pointer to the next available iocb slot if there
1433 * is available slot in the ring, else it returns NULL.
1434 * If the get index of the ring is ahead of the put index, the function
1435 * will post an error attention event to the worker thread to take the
1436 * HBA to offline state.
1437 **/
dea31012005-04-17 16:05:31 -05001438static IOCB_t *
1439lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1440{
James Smart34b02dc2008-08-24 21:49:55 -04001441 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James Smart7e56aa22012-08-03 12:35:34 -04001442 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001443
1444 lockdep_assert_held(&phba->hbalock);
1445
James Smart7e56aa22012-08-03 12:35:34 -04001446 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1447 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1448 pring->sli.sli3.next_cmdidx = 0;
dea31012005-04-17 16:05:31 -05001449
James Smart7e56aa22012-08-03 12:35:34 -04001450 if (unlikely(pring->sli.sli3.local_getidx ==
1451 pring->sli.sli3.next_cmdidx)) {
dea31012005-04-17 16:05:31 -05001452
James Smart7e56aa22012-08-03 12:35:34 -04001453 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05001454
James Smart7e56aa22012-08-03 12:35:34 -04001455 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
dea31012005-04-17 16:05:31 -05001456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001457 "0315 Ring %d issue: portCmdGet %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02001458 "is bigger than cmd ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001459 pring->ringno,
James Smart7e56aa22012-08-03 12:35:34 -04001460 pring->sli.sli3.local_getidx,
1461 max_cmd_idx);
dea31012005-04-17 16:05:31 -05001462
James Smart2e0fef82007-06-17 19:56:36 -05001463 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05001464 /*
1465 * All error attention handlers are posted to
1466 * worker thread
1467 */
1468 phba->work_ha |= HA_ERATT;
1469 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05001470
James Smart5e9d9b82008-06-14 22:52:53 -04001471 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -05001472
1473 return NULL;
1474 }
1475
James Smart7e56aa22012-08-03 12:35:34 -04001476 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
dea31012005-04-17 16:05:31 -05001477 return NULL;
1478 }
1479
James Smarted957682007-06-17 19:56:37 -05001480 return lpfc_cmd_iocb(phba, pring);
dea31012005-04-17 16:05:31 -05001481}
1482
James Smarte59058c2008-08-24 21:49:00 -04001483/**
James Smart3621a712009-04-06 18:47:14 -04001484 * lpfc_sli_next_iotag - Get an iotag for the iocb
James Smarte59058c2008-08-24 21:49:00 -04001485 * @phba: Pointer to HBA context object.
1486 * @iocbq: Pointer to driver iocb object.
1487 *
1488 * This function gets an iotag for the iocb. If there is no unused iotag and
1489 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1490 * array and assigns a new iotag.
1491 * The function returns the allocated iotag if successful, else returns zero.
1492 * Zero is not a valid iotag.
1493 * The caller is not required to hold any lock.
1494 **/
James Bottomley604a3e32005-10-29 10:28:33 -05001495uint16_t
James Smart2e0fef82007-06-17 19:56:36 -05001496lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea31012005-04-17 16:05:31 -05001497{
James Smart2e0fef82007-06-17 19:56:36 -05001498 struct lpfc_iocbq **new_arr;
1499 struct lpfc_iocbq **old_arr;
James Bottomley604a3e32005-10-29 10:28:33 -05001500 size_t new_len;
1501 struct lpfc_sli *psli = &phba->sli;
1502 uint16_t iotag;
dea31012005-04-17 16:05:31 -05001503
James Smart2e0fef82007-06-17 19:56:36 -05001504 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001505 iotag = psli->last_iotag;
1506 if(++iotag < psli->iocbq_lookup_len) {
1507 psli->last_iotag = iotag;
1508 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001509 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001510 iocbq->iotag = iotag;
1511 return iotag;
James Smart2e0fef82007-06-17 19:56:36 -05001512 } else if (psli->iocbq_lookup_len < (0xffff
James Bottomley604a3e32005-10-29 10:28:33 -05001513 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1514 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
James Smart2e0fef82007-06-17 19:56:36 -05001515 spin_unlock_irq(&phba->hbalock);
1516 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
James Bottomley604a3e32005-10-29 10:28:33 -05001517 GFP_KERNEL);
1518 if (new_arr) {
James Smart2e0fef82007-06-17 19:56:36 -05001519 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001520 old_arr = psli->iocbq_lookup;
1521 if (new_len <= psli->iocbq_lookup_len) {
1522 /* highly unprobable case */
1523 kfree(new_arr);
1524 iotag = psli->last_iotag;
1525 if(++iotag < psli->iocbq_lookup_len) {
1526 psli->last_iotag = iotag;
1527 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001528 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001529 iocbq->iotag = iotag;
1530 return iotag;
1531 }
James Smart2e0fef82007-06-17 19:56:36 -05001532 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001533 return 0;
1534 }
1535 if (psli->iocbq_lookup)
1536 memcpy(new_arr, old_arr,
1537 ((psli->last_iotag + 1) *
James Smart311464e2007-08-02 11:10:37 -04001538 sizeof (struct lpfc_iocbq *)));
James Bottomley604a3e32005-10-29 10:28:33 -05001539 psli->iocbq_lookup = new_arr;
1540 psli->iocbq_lookup_len = new_len;
1541 psli->last_iotag = iotag;
1542 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001543 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001544 iocbq->iotag = iotag;
1545 kfree(old_arr);
1546 return iotag;
1547 }
James Smart8f6d98d2006-08-01 07:34:00 -04001548 } else
James Smart2e0fef82007-06-17 19:56:36 -05001549 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001550
James Smartbc739052010-08-04 16:11:18 -04001551 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001552 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1553 psli->last_iotag);
dea31012005-04-17 16:05:31 -05001554
James Bottomley604a3e32005-10-29 10:28:33 -05001555 return 0;
dea31012005-04-17 16:05:31 -05001556}
1557
James Smarte59058c2008-08-24 21:49:00 -04001558/**
James Smart3621a712009-04-06 18:47:14 -04001559 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
James Smarte59058c2008-08-24 21:49:00 -04001560 * @phba: Pointer to HBA context object.
1561 * @pring: Pointer to driver SLI ring object.
1562 * @iocb: Pointer to iocb slot in the ring.
1563 * @nextiocb: Pointer to driver iocb object which need to be
1564 * posted to firmware.
1565 *
1566 * This function is called with hbalock held to post a new iocb to
1567 * the firmware. This function copies the new iocb to ring iocb slot and
1568 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1569 * a completion call back for this iocb else the function will free the
1570 * iocb object.
1571 **/
dea31012005-04-17 16:05:31 -05001572static void
1573lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1574 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1575{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001576 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001577 /*
James Bottomley604a3e32005-10-29 10:28:33 -05001578 * Set up an iotag
dea31012005-04-17 16:05:31 -05001579 */
James Bottomley604a3e32005-10-29 10:28:33 -05001580 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea31012005-04-17 16:05:31 -05001581
James Smarte2a0a9d2008-12-04 22:40:02 -05001582
James Smarta58cbd52007-08-02 11:09:43 -04001583 if (pring->ringno == LPFC_ELS_RING) {
1584 lpfc_debugfs_slow_ring_trc(phba,
1585 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1586 *(((uint32_t *) &nextiocb->iocb) + 4),
1587 *(((uint32_t *) &nextiocb->iocb) + 6),
1588 *(((uint32_t *) &nextiocb->iocb) + 7));
1589 }
1590
dea31012005-04-17 16:05:31 -05001591 /*
1592 * Issue iocb command to adapter
1593 */
James Smart92d7f7b2007-06-17 19:56:38 -05001594 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea31012005-04-17 16:05:31 -05001595 wmb();
1596 pring->stats.iocb_cmd++;
1597
1598 /*
1599 * If there is no completion routine to call, we can release the
1600 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1601 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1602 */
1603 if (nextiocb->iocb_cmpl)
1604 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
James Bottomley604a3e32005-10-29 10:28:33 -05001605 else
James Smart2e0fef82007-06-17 19:56:36 -05001606 __lpfc_sli_release_iocbq(phba, nextiocb);
dea31012005-04-17 16:05:31 -05001607
1608 /*
1609 * Let the HBA know what IOCB slot will be the next one the
1610 * driver will put a command into.
1611 */
James Smart7e56aa22012-08-03 12:35:34 -04001612 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1613 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea31012005-04-17 16:05:31 -05001614}
1615
James Smarte59058c2008-08-24 21:49:00 -04001616/**
James Smart3621a712009-04-06 18:47:14 -04001617 * lpfc_sli_update_full_ring - Update the chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001618 * @phba: Pointer to HBA context object.
1619 * @pring: Pointer to driver SLI ring object.
1620 *
1621 * The caller is not required to hold any lock for calling this function.
1622 * This function updates the chip attention bits for the ring to inform firmware
1623 * that there are pending work to be done for this ring and requests an
1624 * interrupt when there is space available in the ring. This function is
1625 * called when the driver is unable to post more iocbs to the ring due
1626 * to unavailability of space in the ring.
1627 **/
dea31012005-04-17 16:05:31 -05001628static void
James Smart2e0fef82007-06-17 19:56:36 -05001629lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001630{
1631 int ringno = pring->ringno;
1632
1633 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1634
1635 wmb();
1636
1637 /*
1638 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1639 * The HBA will tell us when an IOCB entry is available.
1640 */
1641 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1642 readl(phba->CAregaddr); /* flush */
1643
1644 pring->stats.iocb_cmd_full++;
1645}
1646
James Smarte59058c2008-08-24 21:49:00 -04001647/**
James Smart3621a712009-04-06 18:47:14 -04001648 * lpfc_sli_update_ring - Update chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001649 * @phba: Pointer to HBA context object.
1650 * @pring: Pointer to driver SLI ring object.
1651 *
1652 * This function updates the chip attention register bit for the
1653 * given ring to inform HBA that there is more work to be done
1654 * in this ring. The caller is not required to hold any lock.
1655 **/
dea31012005-04-17 16:05:31 -05001656static void
James Smart2e0fef82007-06-17 19:56:36 -05001657lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001658{
1659 int ringno = pring->ringno;
1660
1661 /*
1662 * Tell the HBA that there is work to do in this ring.
1663 */
James Smart34b02dc2008-08-24 21:49:55 -04001664 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1665 wmb();
1666 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1667 readl(phba->CAregaddr); /* flush */
1668 }
dea31012005-04-17 16:05:31 -05001669}
1670
James Smarte59058c2008-08-24 21:49:00 -04001671/**
James Smart3621a712009-04-06 18:47:14 -04001672 * lpfc_sli_resume_iocb - Process iocbs in the txq
James Smarte59058c2008-08-24 21:49:00 -04001673 * @phba: Pointer to HBA context object.
1674 * @pring: Pointer to driver SLI ring object.
1675 *
1676 * This function is called with hbalock held to post pending iocbs
1677 * in the txq to the firmware. This function is called when driver
1678 * detects space available in the ring.
1679 **/
dea31012005-04-17 16:05:31 -05001680static void
James Smart2e0fef82007-06-17 19:56:36 -05001681lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001682{
1683 IOCB_t *iocb;
1684 struct lpfc_iocbq *nextiocb;
1685
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001686 lockdep_assert_held(&phba->hbalock);
1687
dea31012005-04-17 16:05:31 -05001688 /*
1689 * Check to see if:
1690 * (a) there is anything on the txq to send
1691 * (b) link is up
1692 * (c) link attention events can be processed (fcp ring only)
1693 * (d) IOCB processing is not blocked by the outstanding mbox command.
1694 */
James Smart0e9bb8d2013-03-01 16:35:12 -05001695
1696 if (lpfc_is_link_up(phba) &&
1697 (!list_empty(&pring->txq)) &&
James Smart895427b2017-02-12 13:52:30 -08001698 (pring->ringno != LPFC_FCP_RING ||
James Smart0b727fe2007-10-27 13:37:25 -04001699 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea31012005-04-17 16:05:31 -05001700
1701 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1702 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1703 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1704
1705 if (iocb)
1706 lpfc_sli_update_ring(phba, pring);
1707 else
1708 lpfc_sli_update_full_ring(phba, pring);
1709 }
1710
1711 return;
1712}
1713
James Smarte59058c2008-08-24 21:49:00 -04001714/**
James Smart3621a712009-04-06 18:47:14 -04001715 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001716 * @phba: Pointer to HBA context object.
1717 * @hbqno: HBQ number.
1718 *
1719 * This function is called with hbalock held to get the next
1720 * available slot for the given HBQ. If there is free slot
1721 * available for the HBQ it will return pointer to the next available
1722 * HBQ entry else it will return NULL.
1723 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001724static struct lpfc_hbq_entry *
James Smarted957682007-06-17 19:56:37 -05001725lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1726{
1727 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1728
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001729 lockdep_assert_held(&phba->hbalock);
1730
James Smarted957682007-06-17 19:56:37 -05001731 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1732 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1733 hbqp->next_hbqPutIdx = 0;
1734
1735 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
James Smart92d7f7b2007-06-17 19:56:38 -05001736 uint32_t raw_index = phba->hbq_get[hbqno];
James Smarted957682007-06-17 19:56:37 -05001737 uint32_t getidx = le32_to_cpu(raw_index);
1738
1739 hbqp->local_hbqGetIdx = getidx;
1740
1741 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1742 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05001743 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04001744 "1802 HBQ %d: local_hbqGetIdx "
James Smarted957682007-06-17 19:56:37 -05001745 "%u is > than hbqp->entry_count %u\n",
James Smarte8b62012007-08-02 11:10:09 -04001746 hbqno, hbqp->local_hbqGetIdx,
James Smarted957682007-06-17 19:56:37 -05001747 hbqp->entry_count);
1748
1749 phba->link_state = LPFC_HBA_ERROR;
1750 return NULL;
1751 }
1752
1753 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1754 return NULL;
1755 }
1756
James Smart51ef4c22007-08-02 11:10:31 -04001757 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1758 hbqp->hbqPutIdx;
James Smarted957682007-06-17 19:56:37 -05001759}
1760
James Smarte59058c2008-08-24 21:49:00 -04001761/**
James Smart3621a712009-04-06 18:47:14 -04001762 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
James Smarte59058c2008-08-24 21:49:00 -04001763 * @phba: Pointer to HBA context object.
1764 *
1765 * This function is called with no lock held to free all the
1766 * hbq buffers while uninitializing the SLI interface. It also
1767 * frees the HBQ buffers returned by the firmware but not yet
1768 * processed by the upper layers.
1769 **/
James Smarted957682007-06-17 19:56:37 -05001770void
1771lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1772{
James Smart92d7f7b2007-06-17 19:56:38 -05001773 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1774 struct hbq_dmabuf *hbq_buf;
James Smart3163f722008-02-08 18:50:25 -05001775 unsigned long flags;
James Smart51ef4c22007-08-02 11:10:31 -04001776 int i, hbq_count;
James Smarted957682007-06-17 19:56:37 -05001777
James Smart51ef4c22007-08-02 11:10:31 -04001778 hbq_count = lpfc_sli_hbq_count();
James Smarted957682007-06-17 19:56:37 -05001779 /* Return all memory used by all HBQs */
James Smart3163f722008-02-08 18:50:25 -05001780 spin_lock_irqsave(&phba->hbalock, flags);
James Smart51ef4c22007-08-02 11:10:31 -04001781 for (i = 0; i < hbq_count; ++i) {
1782 list_for_each_entry_safe(dmabuf, next_dmabuf,
1783 &phba->hbqs[i].hbq_buffer_list, list) {
1784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1785 list_del(&hbq_buf->dbuf.list);
1786 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1787 }
James Smarta8adb832007-10-27 13:37:53 -04001788 phba->hbqs[i].buffer_count = 0;
James Smarted957682007-06-17 19:56:37 -05001789 }
James Smart3163f722008-02-08 18:50:25 -05001790
1791 /* Mark the HBQs not in use */
1792 phba->hbq_in_use = 0;
1793 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smarted957682007-06-17 19:56:37 -05001794}
1795
James Smarte59058c2008-08-24 21:49:00 -04001796/**
James Smart3621a712009-04-06 18:47:14 -04001797 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04001798 * @phba: Pointer to HBA context object.
1799 * @hbqno: HBQ number.
1800 * @hbq_buf: Pointer to HBQ buffer.
1801 *
1802 * This function is called with the hbalock held to post a
1803 * hbq buffer to the firmware. If the function finds an empty
1804 * slot in the HBQ, it will post the buffer. The function will return
1805 * pointer to the hbq entry if it successfully post the buffer
1806 * else it will return NULL.
1807 **/
James Smart3772a992009-05-22 14:50:54 -04001808static int
James Smarted957682007-06-17 19:56:37 -05001809lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
James Smart92d7f7b2007-06-17 19:56:38 -05001810 struct hbq_dmabuf *hbq_buf)
James Smarted957682007-06-17 19:56:37 -05001811{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001812 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04001813 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1814}
1815
1816/**
1817 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1818 * @phba: Pointer to HBA context object.
1819 * @hbqno: HBQ number.
1820 * @hbq_buf: Pointer to HBQ buffer.
1821 *
1822 * This function is called with the hbalock held to post a hbq buffer to the
1823 * firmware. If the function finds an empty slot in the HBQ, it will post the
1824 * buffer and place it on the hbq_buffer_list. The function will return zero if
1825 * it successfully post the buffer else it will return an error.
1826 **/
1827static int
1828lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1829 struct hbq_dmabuf *hbq_buf)
1830{
James Smarted957682007-06-17 19:56:37 -05001831 struct lpfc_hbq_entry *hbqe;
James Smart92d7f7b2007-06-17 19:56:38 -05001832 dma_addr_t physaddr = hbq_buf->dbuf.phys;
James Smarted957682007-06-17 19:56:37 -05001833
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001834 lockdep_assert_held(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05001835 /* Get next HBQ entry slot to use */
1836 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1837 if (hbqe) {
1838 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1839
James Smart92d7f7b2007-06-17 19:56:38 -05001840 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1841 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
James Smart895427b2017-02-12 13:52:30 -08001842 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
James Smarted957682007-06-17 19:56:37 -05001843 hbqe->bde.tus.f.bdeFlags = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05001844 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1845 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1846 /* Sync SLIM */
James Smarted957682007-06-17 19:56:37 -05001847 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1848 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
James Smart92d7f7b2007-06-17 19:56:38 -05001849 /* flush */
James Smarted957682007-06-17 19:56:37 -05001850 readl(phba->hbq_put + hbqno);
James Smart51ef4c22007-08-02 11:10:31 -04001851 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
James Smart3772a992009-05-22 14:50:54 -04001852 return 0;
1853 } else
1854 return -ENOMEM;
James Smarted957682007-06-17 19:56:37 -05001855}
1856
James Smart4f774512009-05-22 14:52:35 -04001857/**
1858 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1859 * @phba: Pointer to HBA context object.
1860 * @hbqno: HBQ number.
1861 * @hbq_buf: Pointer to HBQ buffer.
1862 *
1863 * This function is called with the hbalock held to post an RQE to the SLI4
1864 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1865 * the hbq_buffer_list and return zero, otherwise it will return an error.
1866 **/
1867static int
1868lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1869 struct hbq_dmabuf *hbq_buf)
1870{
1871 int rc;
1872 struct lpfc_rqe hrqe;
1873 struct lpfc_rqe drqe;
James Smart895427b2017-02-12 13:52:30 -08001874 struct lpfc_queue *hrq;
1875 struct lpfc_queue *drq;
1876
1877 if (hbqno != LPFC_ELS_HBQ)
1878 return 1;
1879 hrq = phba->sli4_hba.hdr_rq;
1880 drq = phba->sli4_hba.dat_rq;
James Smart4f774512009-05-22 14:52:35 -04001881
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01001882 lockdep_assert_held(&phba->hbalock);
James Smart4f774512009-05-22 14:52:35 -04001883 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1884 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1885 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1886 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
James Smart895427b2017-02-12 13:52:30 -08001887 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
James Smart4f774512009-05-22 14:52:35 -04001888 if (rc < 0)
1889 return rc;
James Smart895427b2017-02-12 13:52:30 -08001890 hbq_buf->tag = (rc | (hbqno << 16));
James Smart4f774512009-05-22 14:52:35 -04001891 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1892 return 0;
1893}
1894
James Smarte59058c2008-08-24 21:49:00 -04001895/* HBQ for ELS and CT traffic. */
James Smart92d7f7b2007-06-17 19:56:38 -05001896static struct lpfc_hbq_init lpfc_els_hbq = {
1897 .rn = 1,
James Smartdef9c7a2009-12-21 17:02:28 -05001898 .entry_count = 256,
James Smart92d7f7b2007-06-17 19:56:38 -05001899 .mask_count = 0,
1900 .profile = 0,
James Smart51ef4c22007-08-02 11:10:31 -04001901 .ring_mask = (1 << LPFC_ELS_RING),
James Smart92d7f7b2007-06-17 19:56:38 -05001902 .buffer_count = 0,
James Smarta257bf92009-04-06 18:48:10 -04001903 .init_count = 40,
1904 .add_count = 40,
James Smart92d7f7b2007-06-17 19:56:38 -05001905};
James Smarted957682007-06-17 19:56:37 -05001906
James Smarte59058c2008-08-24 21:49:00 -04001907/* Array of HBQs */
James Smart78b2d852007-08-02 11:10:21 -04001908struct lpfc_hbq_init *lpfc_hbq_defs[] = {
James Smart92d7f7b2007-06-17 19:56:38 -05001909 &lpfc_els_hbq,
1910};
1911
James Smarte59058c2008-08-24 21:49:00 -04001912/**
James Smart3621a712009-04-06 18:47:14 -04001913 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
James Smarte59058c2008-08-24 21:49:00 -04001914 * @phba: Pointer to HBA context object.
1915 * @hbqno: HBQ number.
1916 * @count: Number of HBQ buffers to be posted.
1917 *
James Smartd7c255b2008-08-24 21:50:00 -04001918 * This function is called with no lock held to post more hbq buffers to the
1919 * given HBQ. The function returns the number of HBQ buffers successfully
1920 * posted.
James Smarte59058c2008-08-24 21:49:00 -04001921 **/
James Smart311464e2007-08-02 11:10:37 -04001922static int
James Smart92d7f7b2007-06-17 19:56:38 -05001923lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1924{
James Smartd7c255b2008-08-24 21:50:00 -04001925 uint32_t i, posted = 0;
James Smart3163f722008-02-08 18:50:25 -05001926 unsigned long flags;
James Smart92d7f7b2007-06-17 19:56:38 -05001927 struct hbq_dmabuf *hbq_buffer;
James Smartd7c255b2008-08-24 21:50:00 -04001928 LIST_HEAD(hbq_buf_list);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001929 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
James Smart51ef4c22007-08-02 11:10:31 -04001930 return 0;
James Smart51ef4c22007-08-02 11:10:31 -04001931
James Smartd7c255b2008-08-24 21:50:00 -04001932 if ((phba->hbqs[hbqno].buffer_count + count) >
1933 lpfc_hbq_defs[hbqno]->entry_count)
1934 count = lpfc_hbq_defs[hbqno]->entry_count -
1935 phba->hbqs[hbqno].buffer_count;
1936 if (!count)
1937 return 0;
1938 /* Allocate HBQ entries */
1939 for (i = 0; i < count; i++) {
1940 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1941 if (!hbq_buffer)
1942 break;
1943 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1944 }
James Smart3163f722008-02-08 18:50:25 -05001945 /* Check whether HBQ is still in use */
1946 spin_lock_irqsave(&phba->hbalock, flags);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001947 if (!phba->hbq_in_use)
James Smartd7c255b2008-08-24 21:50:00 -04001948 goto err;
1949 while (!list_empty(&hbq_buf_list)) {
1950 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1951 dbuf.list);
1952 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1953 (hbqno << 16));
James Smart3772a992009-05-22 14:50:54 -04001954 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
James Smarta8adb832007-10-27 13:37:53 -04001955 phba->hbqs[hbqno].buffer_count++;
James Smartd7c255b2008-08-24 21:50:00 -04001956 posted++;
1957 } else
James Smart51ef4c22007-08-02 11:10:31 -04001958 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smart92d7f7b2007-06-17 19:56:38 -05001959 }
James Smart3163f722008-02-08 18:50:25 -05001960 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smartd7c255b2008-08-24 21:50:00 -04001961 return posted;
1962err:
1963 spin_unlock_irqrestore(&phba->hbalock, flags);
1964 while (!list_empty(&hbq_buf_list)) {
1965 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1966 dbuf.list);
1967 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1968 }
James Smart92d7f7b2007-06-17 19:56:38 -05001969 return 0;
James Smarted957682007-06-17 19:56:37 -05001970}
1971
James Smarte59058c2008-08-24 21:49:00 -04001972/**
James Smart3621a712009-04-06 18:47:14 -04001973 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
James Smarte59058c2008-08-24 21:49:00 -04001974 * @phba: Pointer to HBA context object.
1975 * @qno: HBQ number.
1976 *
1977 * This function posts more buffers to the HBQ. This function
James Smartd7c255b2008-08-24 21:50:00 -04001978 * is called with no lock held. The function returns the number of HBQ entries
1979 * successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04001980 **/
James Smarted957682007-06-17 19:56:37 -05001981int
James Smart92d7f7b2007-06-17 19:56:38 -05001982lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05001983{
James Smartdef9c7a2009-12-21 17:02:28 -05001984 if (phba->sli_rev == LPFC_SLI_REV4)
1985 return 0;
1986 else
1987 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1988 lpfc_hbq_defs[qno]->add_count);
James Smarted957682007-06-17 19:56:37 -05001989}
1990
James Smarte59058c2008-08-24 21:49:00 -04001991/**
James Smart3621a712009-04-06 18:47:14 -04001992 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001993 * @phba: Pointer to HBA context object.
1994 * @qno: HBQ queue number.
1995 *
1996 * This function is called from SLI initialization code path with
1997 * no lock held to post initial HBQ buffers to firmware. The
James Smartd7c255b2008-08-24 21:50:00 -04001998 * function returns the number of HBQ entries successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04001999 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002000static int
James Smart92d7f7b2007-06-17 19:56:38 -05002001lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05002002{
James Smartdef9c7a2009-12-21 17:02:28 -05002003 if (phba->sli_rev == LPFC_SLI_REV4)
2004 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
James Smart73d91e52011-10-10 21:32:10 -04002005 lpfc_hbq_defs[qno]->entry_count);
James Smartdef9c7a2009-12-21 17:02:28 -05002006 else
2007 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2008 lpfc_hbq_defs[qno]->init_count);
James Smarted957682007-06-17 19:56:37 -05002009}
2010
James Smarte59058c2008-08-24 21:49:00 -04002011/**
James Smart3772a992009-05-22 14:50:54 -04002012 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2013 * @phba: Pointer to HBA context object.
2014 * @hbqno: HBQ number.
2015 *
2016 * This function removes the first hbq buffer on an hbq list and returns a
2017 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2018 **/
2019static struct hbq_dmabuf *
2020lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2021{
2022 struct lpfc_dmabuf *d_buf;
2023
2024 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2025 if (!d_buf)
2026 return NULL;
2027 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2028}
2029
2030/**
James Smart3621a712009-04-06 18:47:14 -04002031 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
James Smarte59058c2008-08-24 21:49:00 -04002032 * @phba: Pointer to HBA context object.
2033 * @tag: Tag of the hbq buffer.
2034 *
Sebastian Herbszt71892412016-04-17 13:27:27 +02002035 * This function searches for the hbq buffer associated with the given tag in
2036 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2037 * otherwise it returns NULL.
James Smarte59058c2008-08-24 21:49:00 -04002038 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01002039static struct hbq_dmabuf *
James Smarted957682007-06-17 19:56:37 -05002040lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2041{
James Smart92d7f7b2007-06-17 19:56:38 -05002042 struct lpfc_dmabuf *d_buf;
2043 struct hbq_dmabuf *hbq_buf;
James Smart51ef4c22007-08-02 11:10:31 -04002044 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05002045
James Smart51ef4c22007-08-02 11:10:31 -04002046 hbqno = tag >> 16;
Jesper Juhla0a74e452007-08-09 20:47:15 +02002047 if (hbqno >= LPFC_MAX_HBQS)
James Smart51ef4c22007-08-02 11:10:31 -04002048 return NULL;
2049
James Smart3772a992009-05-22 14:50:54 -04002050 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -04002051 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
James Smart92d7f7b2007-06-17 19:56:38 -05002052 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
James Smart51ef4c22007-08-02 11:10:31 -04002053 if (hbq_buf->tag == tag) {
James Smart3772a992009-05-22 14:50:54 -04002054 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002055 return hbq_buf;
James Smarted957682007-06-17 19:56:37 -05002056 }
2057 }
James Smart3772a992009-05-22 14:50:54 -04002058 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05002059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04002060 "1803 Bad hbq tag. Data: x%x x%x\n",
James Smarta8adb832007-10-27 13:37:53 -04002061 tag, phba->hbqs[tag >> 16].buffer_count);
James Smart92d7f7b2007-06-17 19:56:38 -05002062 return NULL;
James Smarted957682007-06-17 19:56:37 -05002063}
2064
James Smarte59058c2008-08-24 21:49:00 -04002065/**
James Smart3621a712009-04-06 18:47:14 -04002066 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04002067 * @phba: Pointer to HBA context object.
2068 * @hbq_buffer: Pointer to HBQ buffer.
2069 *
2070 * This function is called with hbalock. This function gives back
2071 * the hbq buffer to firmware. If the HBQ does not have space to
2072 * post the buffer, it will free the buffer.
2073 **/
James Smarted957682007-06-17 19:56:37 -05002074void
James Smart51ef4c22007-08-02 11:10:31 -04002075lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
James Smarted957682007-06-17 19:56:37 -05002076{
2077 uint32_t hbqno;
2078
James Smart51ef4c22007-08-02 11:10:31 -04002079 if (hbq_buffer) {
2080 hbqno = hbq_buffer->tag >> 16;
James Smart3772a992009-05-22 14:50:54 -04002081 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
James Smart51ef4c22007-08-02 11:10:31 -04002082 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smarted957682007-06-17 19:56:37 -05002083 }
2084}
2085
James Smarte59058c2008-08-24 21:49:00 -04002086/**
James Smart3621a712009-04-06 18:47:14 -04002087 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
James Smarte59058c2008-08-24 21:49:00 -04002088 * @mbxCommand: mailbox command code.
2089 *
2090 * This function is called by the mailbox event handler function to verify
2091 * that the completed mailbox command is a legitimate mailbox command. If the
2092 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2093 * and the mailbox event handler will take the HBA offline.
2094 **/
dea31012005-04-17 16:05:31 -05002095static int
2096lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2097{
2098 uint8_t ret;
2099
2100 switch (mbxCommand) {
2101 case MBX_LOAD_SM:
2102 case MBX_READ_NV:
2103 case MBX_WRITE_NV:
James Smarta8adb832007-10-27 13:37:53 -04002104 case MBX_WRITE_VPARMS:
dea31012005-04-17 16:05:31 -05002105 case MBX_RUN_BIU_DIAG:
2106 case MBX_INIT_LINK:
2107 case MBX_DOWN_LINK:
2108 case MBX_CONFIG_LINK:
2109 case MBX_CONFIG_RING:
2110 case MBX_RESET_RING:
2111 case MBX_READ_CONFIG:
2112 case MBX_READ_RCONFIG:
2113 case MBX_READ_SPARM:
2114 case MBX_READ_STATUS:
2115 case MBX_READ_RPI:
2116 case MBX_READ_XRI:
2117 case MBX_READ_REV:
2118 case MBX_READ_LNK_STAT:
2119 case MBX_REG_LOGIN:
2120 case MBX_UNREG_LOGIN:
dea31012005-04-17 16:05:31 -05002121 case MBX_CLEAR_LA:
2122 case MBX_DUMP_MEMORY:
2123 case MBX_DUMP_CONTEXT:
2124 case MBX_RUN_DIAGS:
2125 case MBX_RESTART:
2126 case MBX_UPDATE_CFG:
2127 case MBX_DOWN_LOAD:
2128 case MBX_DEL_LD_ENTRY:
2129 case MBX_RUN_PROGRAM:
2130 case MBX_SET_MASK:
James Smart09372822008-01-11 01:52:54 -05002131 case MBX_SET_VARIABLE:
dea31012005-04-17 16:05:31 -05002132 case MBX_UNREG_D_ID:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002133 case MBX_KILL_BOARD:
dea31012005-04-17 16:05:31 -05002134 case MBX_CONFIG_FARP:
Jamie Wellnitz41415862006-02-28 19:25:27 -05002135 case MBX_BEACON:
dea31012005-04-17 16:05:31 -05002136 case MBX_LOAD_AREA:
2137 case MBX_RUN_BIU_DIAG64:
2138 case MBX_CONFIG_PORT:
2139 case MBX_READ_SPARM64:
2140 case MBX_READ_RPI64:
2141 case MBX_REG_LOGIN64:
James Smart76a95d72010-11-20 23:11:48 -05002142 case MBX_READ_TOPOLOGY:
James Smart09372822008-01-11 01:52:54 -05002143 case MBX_WRITE_WWN:
dea31012005-04-17 16:05:31 -05002144 case MBX_SET_DEBUG:
2145 case MBX_LOAD_EXP_ROM:
James Smart57127f12007-10-27 13:37:05 -04002146 case MBX_ASYNCEVT_ENABLE:
James Smart92d7f7b2007-06-17 19:56:38 -05002147 case MBX_REG_VPI:
2148 case MBX_UNREG_VPI:
James Smart858c9f62007-06-17 19:56:39 -05002149 case MBX_HEARTBEAT:
James Smart84774a42008-08-24 21:50:06 -04002150 case MBX_PORT_CAPABILITIES:
2151 case MBX_PORT_IOV_CONTROL:
James Smart04c68492009-05-22 14:52:52 -04002152 case MBX_SLI4_CONFIG:
2153 case MBX_SLI4_REQ_FTRS:
2154 case MBX_REG_FCFI:
2155 case MBX_UNREG_FCFI:
2156 case MBX_REG_VFI:
2157 case MBX_UNREG_VFI:
2158 case MBX_INIT_VPI:
2159 case MBX_INIT_VFI:
2160 case MBX_RESUME_RPI:
James Smartc7495932010-04-06 15:05:28 -04002161 case MBX_READ_EVENT_LOG_STATUS:
2162 case MBX_READ_EVENT_LOG:
James Smartdcf2a4e2010-09-29 11:18:53 -04002163 case MBX_SECURITY_MGMT:
2164 case MBX_AUTH_PORT:
James Smart940eb682012-08-03 12:37:08 -04002165 case MBX_ACCESS_VDATA:
dea31012005-04-17 16:05:31 -05002166 ret = mbxCommand;
2167 break;
2168 default:
2169 ret = MBX_SHUTDOWN;
2170 break;
2171 }
James Smart2e0fef82007-06-17 19:56:36 -05002172 return ret;
dea31012005-04-17 16:05:31 -05002173}
James Smarte59058c2008-08-24 21:49:00 -04002174
2175/**
James Smart3621a712009-04-06 18:47:14 -04002176 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002177 * @phba: Pointer to HBA context object.
2178 * @pmboxq: Pointer to mailbox command.
2179 *
2180 * This is completion handler function for mailbox commands issued from
2181 * lpfc_sli_issue_mbox_wait function. This function is called by the
2182 * mailbox event handler function with no lock held. This function
2183 * will wake up thread waiting on the wait queue pointed by context1
2184 * of the mailbox.
2185 **/
James Smart04c68492009-05-22 14:52:52 -04002186void
James Smart2e0fef82007-06-17 19:56:36 -05002187lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea31012005-04-17 16:05:31 -05002188{
2189 wait_queue_head_t *pdone_q;
James Smart858c9f62007-06-17 19:56:39 -05002190 unsigned long drvr_flag;
dea31012005-04-17 16:05:31 -05002191
2192 /*
2193 * If pdone_q is empty, the driver thread gave up waiting and
2194 * continued running.
2195 */
James Smart7054a602007-04-25 09:52:34 -04002196 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
James Smart858c9f62007-06-17 19:56:39 -05002197 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002198 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2199 if (pdone_q)
2200 wake_up_interruptible(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -05002201 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002202 return;
2203}
2204
James Smarte59058c2008-08-24 21:49:00 -04002205
2206/**
James Smart3621a712009-04-06 18:47:14 -04002207 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002208 * @phba: Pointer to HBA context object.
2209 * @pmb: Pointer to mailbox object.
2210 *
2211 * This function is the default mailbox completion handler. It
2212 * frees the memory resources associated with the completed mailbox
2213 * command. If the completed command is a REG_LOGIN mailbox command,
2214 * this function will issue a UREG_LOGIN to re-claim the RPI.
2215 **/
dea31012005-04-17 16:05:31 -05002216void
James Smart2e0fef82007-06-17 19:56:36 -05002217lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -05002218{
James Smartd439d282010-09-29 11:18:45 -04002219 struct lpfc_vport *vport = pmb->vport;
dea31012005-04-17 16:05:31 -05002220 struct lpfc_dmabuf *mp;
James Smartd439d282010-09-29 11:18:45 -04002221 struct lpfc_nodelist *ndlp;
James Smart5af5eee2010-10-22 11:06:38 -04002222 struct Scsi_Host *shost;
James Smart04c68492009-05-22 14:52:52 -04002223 uint16_t rpi, vpi;
James Smart7054a602007-04-25 09:52:34 -04002224 int rc;
2225
dea31012005-04-17 16:05:31 -05002226 mp = (struct lpfc_dmabuf *) (pmb->context1);
James Smart7054a602007-04-25 09:52:34 -04002227
dea31012005-04-17 16:05:31 -05002228 if (mp) {
2229 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2230 kfree(mp);
2231 }
James Smart7054a602007-04-25 09:52:34 -04002232
2233 /*
2234 * If a REG_LOGIN succeeded after node is destroyed or node
2235 * is in re-discovery driver need to cleanup the RPI.
2236 */
James Smart2e0fef82007-06-17 19:56:36 -05002237 if (!(phba->pport->load_flag & FC_UNLOADING) &&
James Smart04c68492009-05-22 14:52:52 -04002238 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2239 !pmb->u.mb.mbxStatus) {
2240 rpi = pmb->u.mb.un.varWords[0];
James Smart6d368e52011-05-24 11:44:12 -04002241 vpi = pmb->u.mb.un.varRegLogin.vpi;
James Smart04c68492009-05-22 14:52:52 -04002242 lpfc_unreg_login(phba, vpi, rpi, pmb);
James Smartde96e9c2016-03-31 14:12:27 -07002243 pmb->vport = vport;
James Smart92d7f7b2007-06-17 19:56:38 -05002244 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart7054a602007-04-25 09:52:34 -04002245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2246 if (rc != MBX_NOT_FINISHED)
2247 return;
2248 }
2249
James Smart695a8142010-01-26 23:08:03 -05002250 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2251 !(phba->pport->load_flag & FC_UNLOADING) &&
2252 !pmb->u.mb.mbxStatus) {
James Smart5af5eee2010-10-22 11:06:38 -04002253 shost = lpfc_shost_from_vport(vport);
2254 spin_lock_irq(shost->host_lock);
2255 vport->vpi_state |= LPFC_VPI_REGISTERED;
2256 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2257 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05002258 }
2259
James Smartd439d282010-09-29 11:18:45 -04002260 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2261 ndlp = (struct lpfc_nodelist *)pmb->context2;
2262 lpfc_nlp_put(ndlp);
2263 pmb->context2 = NULL;
2264 }
2265
James Smartdcf2a4e2010-09-29 11:18:53 -04002266 /* Check security permission status on INIT_LINK mailbox command */
2267 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2268 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2269 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2270 "2860 SLI authentication is required "
2271 "for INIT_LINK but has not done yet\n");
2272
James Smart04c68492009-05-22 14:52:52 -04002273 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2274 lpfc_sli4_mbox_cmd_free(phba, pmb);
2275 else
2276 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002277}
James Smartbe6bb942015-04-07 15:07:22 -04002278 /**
2279 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2280 * @phba: Pointer to HBA context object.
2281 * @pmb: Pointer to mailbox object.
2282 *
2283 * This function is the unreg rpi mailbox completion handler. It
2284 * frees the memory resources associated with the completed mailbox
2285 * command. An additional refrenece is put on the ndlp to prevent
2286 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2287 * the unreg mailbox command completes, this routine puts the
2288 * reference back.
2289 *
2290 **/
2291void
2292lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2293{
2294 struct lpfc_vport *vport = pmb->vport;
2295 struct lpfc_nodelist *ndlp;
2296
2297 ndlp = pmb->context1;
2298 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2299 if (phba->sli_rev == LPFC_SLI_REV4 &&
2300 (bf_get(lpfc_sli_intf_if_type,
2301 &phba->sli4_hba.sli_intf) ==
2302 LPFC_SLI_INTF_IF_TYPE_2)) {
2303 if (ndlp) {
2304 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2305 "0010 UNREG_LOGIN vpi:%x "
2306 "rpi:%x DID:%x map:%x %p\n",
2307 vport->vpi, ndlp->nlp_rpi,
2308 ndlp->nlp_DID,
2309 ndlp->nlp_usg_map, ndlp);
James Smart7c5e5182015-05-22 10:42:43 -04002310 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
James Smartbe6bb942015-04-07 15:07:22 -04002311 lpfc_nlp_put(ndlp);
2312 }
2313 }
2314 }
2315
2316 mempool_free(pmb, phba->mbox_mem_pool);
2317}
dea31012005-04-17 16:05:31 -05002318
James Smarte59058c2008-08-24 21:49:00 -04002319/**
James Smart3621a712009-04-06 18:47:14 -04002320 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
James Smarte59058c2008-08-24 21:49:00 -04002321 * @phba: Pointer to HBA context object.
2322 *
2323 * This function is called with no lock held. This function processes all
2324 * the completed mailbox commands and gives it to upper layers. The interrupt
2325 * service routine processes mailbox completion interrupt and adds completed
2326 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2327 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2328 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2329 * function returns the mailbox commands to the upper layer by calling the
2330 * completion handler function of each mailbox.
2331 **/
dea31012005-04-17 16:05:31 -05002332int
James Smart2e0fef82007-06-17 19:56:36 -05002333lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002334{
James Smart92d7f7b2007-06-17 19:56:38 -05002335 MAILBOX_t *pmbox;
dea31012005-04-17 16:05:31 -05002336 LPFC_MBOXQ_t *pmb;
James Smart92d7f7b2007-06-17 19:56:38 -05002337 int rc;
2338 LIST_HEAD(cmplq);
dea31012005-04-17 16:05:31 -05002339
2340 phba->sli.slistat.mbox_event++;
2341
James Smart92d7f7b2007-06-17 19:56:38 -05002342 /* Get all completed mailboxe buffers into the cmplq */
2343 spin_lock_irq(&phba->hbalock);
2344 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2345 spin_unlock_irq(&phba->hbalock);
2346
dea31012005-04-17 16:05:31 -05002347 /* Get a Mailbox buffer to setup mailbox commands for callback */
James Smart92d7f7b2007-06-17 19:56:38 -05002348 do {
2349 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2350 if (pmb == NULL)
2351 break;
2352
James Smart04c68492009-05-22 14:52:52 -04002353 pmbox = &pmb->u.mb;
dea31012005-04-17 16:05:31 -05002354
James Smart858c9f62007-06-17 19:56:39 -05002355 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2356 if (pmb->vport) {
2357 lpfc_debugfs_disc_trc(pmb->vport,
2358 LPFC_DISC_TRC_MBOX_VPORT,
2359 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2360 (uint32_t)pmbox->mbxCommand,
2361 pmbox->un.varWords[0],
2362 pmbox->un.varWords[1]);
2363 }
2364 else {
2365 lpfc_debugfs_disc_trc(phba->pport,
2366 LPFC_DISC_TRC_MBOX,
2367 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2368 (uint32_t)pmbox->mbxCommand,
2369 pmbox->un.varWords[0],
2370 pmbox->un.varWords[1]);
2371 }
2372 }
2373
dea31012005-04-17 16:05:31 -05002374 /*
2375 * It is a fatal error if unknown mbox command completion.
2376 */
2377 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2378 MBX_SHUTDOWN) {
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002379 /* Unknown mailbox command compl */
James Smart92d7f7b2007-06-17 19:56:38 -05002380 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002381 "(%d):0323 Unknown Mailbox command "
James Smarta183a152011-10-10 21:32:43 -04002382 "x%x (x%x/x%x) Cmpl\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002383 pmb->vport ? pmb->vport->vpi : 0,
James Smart04c68492009-05-22 14:52:52 -04002384 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002385 lpfc_sli_config_mbox_subsys_get(phba,
2386 pmb),
2387 lpfc_sli_config_mbox_opcode_get(phba,
2388 pmb));
James Smart2e0fef82007-06-17 19:56:36 -05002389 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002390 phba->work_hs = HS_FFER3;
2391 lpfc_handle_eratt(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002392 continue;
dea31012005-04-17 16:05:31 -05002393 }
2394
dea31012005-04-17 16:05:31 -05002395 if (pmbox->mbxStatus) {
2396 phba->sli.slistat.mbox_stat_err++;
2397 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2398 /* Mbox cmd cmpl error - RETRYing */
James Smart92d7f7b2007-06-17 19:56:38 -05002399 lpfc_printf_log(phba, KERN_INFO,
James Smarta183a152011-10-10 21:32:43 -04002400 LOG_MBOX | LOG_SLI,
2401 "(%d):0305 Mbox cmd cmpl "
2402 "error - RETRYing Data: x%x "
2403 "(x%x/x%x) x%x x%x x%x\n",
2404 pmb->vport ? pmb->vport->vpi : 0,
2405 pmbox->mbxCommand,
2406 lpfc_sli_config_mbox_subsys_get(phba,
2407 pmb),
2408 lpfc_sli_config_mbox_opcode_get(phba,
2409 pmb),
2410 pmbox->mbxStatus,
2411 pmbox->un.varWords[0],
2412 pmb->vport->port_state);
dea31012005-04-17 16:05:31 -05002413 pmbox->mbxStatus = 0;
2414 pmbox->mbxOwner = OWN_HOST;
dea31012005-04-17 16:05:31 -05002415 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
James Smart04c68492009-05-22 14:52:52 -04002416 if (rc != MBX_NOT_FINISHED)
James Smart92d7f7b2007-06-17 19:56:38 -05002417 continue;
dea31012005-04-17 16:05:31 -05002418 }
2419 }
2420
2421 /* Mailbox cmd <cmd> Cmpl <cmpl> */
James Smart92d7f7b2007-06-17 19:56:38 -05002422 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04002423 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
James Smarte74c03c2013-04-17 20:15:19 -04002424 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2425 "x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002426 pmb->vport ? pmb->vport->vpi : 0,
dea31012005-04-17 16:05:31 -05002427 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002428 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2429 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea31012005-04-17 16:05:31 -05002430 pmb->mbox_cmpl,
2431 *((uint32_t *) pmbox),
2432 pmbox->un.varWords[0],
2433 pmbox->un.varWords[1],
2434 pmbox->un.varWords[2],
2435 pmbox->un.varWords[3],
2436 pmbox->un.varWords[4],
2437 pmbox->un.varWords[5],
2438 pmbox->un.varWords[6],
James Smarte74c03c2013-04-17 20:15:19 -04002439 pmbox->un.varWords[7],
2440 pmbox->un.varWords[8],
2441 pmbox->un.varWords[9],
2442 pmbox->un.varWords[10]);
dea31012005-04-17 16:05:31 -05002443
James Smart92d7f7b2007-06-17 19:56:38 -05002444 if (pmb->mbox_cmpl)
dea31012005-04-17 16:05:31 -05002445 pmb->mbox_cmpl(phba,pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002446 } while (1);
James Smart2e0fef82007-06-17 19:56:36 -05002447 return 0;
dea31012005-04-17 16:05:31 -05002448}
James Smart92d7f7b2007-06-17 19:56:38 -05002449
James Smarte59058c2008-08-24 21:49:00 -04002450/**
James Smart3621a712009-04-06 18:47:14 -04002451 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
James Smarte59058c2008-08-24 21:49:00 -04002452 * @phba: Pointer to HBA context object.
2453 * @pring: Pointer to driver SLI ring object.
2454 * @tag: buffer tag.
2455 *
2456 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2457 * is set in the tag the buffer is posted for a particular exchange,
2458 * the function will return the buffer without replacing the buffer.
2459 * If the buffer is for unsolicited ELS or CT traffic, this function
2460 * returns the buffer and also posts another buffer to the firmware.
2461 **/
James Smart76bb24e2007-10-27 13:38:00 -04002462static struct lpfc_dmabuf *
2463lpfc_sli_get_buff(struct lpfc_hba *phba,
James Smart9f1e1b52008-12-04 22:39:40 -05002464 struct lpfc_sli_ring *pring,
2465 uint32_t tag)
James Smart76bb24e2007-10-27 13:38:00 -04002466{
James Smart9f1e1b52008-12-04 22:39:40 -05002467 struct hbq_dmabuf *hbq_entry;
2468
James Smart76bb24e2007-10-27 13:38:00 -04002469 if (tag & QUE_BUFTAG_BIT)
2470 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
James Smart9f1e1b52008-12-04 22:39:40 -05002471 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2472 if (!hbq_entry)
2473 return NULL;
2474 return &hbq_entry->dbuf;
James Smart76bb24e2007-10-27 13:38:00 -04002475}
James Smart57127f12007-10-27 13:37:05 -04002476
James Smart3772a992009-05-22 14:50:54 -04002477/**
2478 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2479 * @phba: Pointer to HBA context object.
2480 * @pring: Pointer to driver SLI ring object.
2481 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2482 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2483 * @fch_type: the type for the first frame of the sequence.
2484 *
2485 * This function is called with no lock held. This function uses the r_ctl and
2486 * type of the received sequence to find the correct callback function to call
2487 * to process the sequence.
2488 **/
2489static int
2490lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2491 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2492 uint32_t fch_type)
2493{
2494 int i;
2495
James Smartf358dd02017-02-12 13:52:34 -08002496 switch (fch_type) {
2497 case FC_TYPE_NVME:
2498 /* todo: tgt: forward NVME LS to transport */
2499 return 1;
2500 default:
2501 break;
2502 }
2503
James Smart3772a992009-05-22 14:50:54 -04002504 /* unSolicited Responses */
2505 if (pring->prt[0].profile) {
2506 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2507 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2508 saveq);
2509 return 1;
2510 }
2511 /* We must search, based on rctl / type
2512 for the right routine */
2513 for (i = 0; i < pring->num_mask; i++) {
2514 if ((pring->prt[i].rctl == fch_r_ctl) &&
2515 (pring->prt[i].type == fch_type)) {
2516 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2517 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2518 (phba, pring, saveq);
2519 return 1;
2520 }
2521 }
2522 return 0;
2523}
James Smarte59058c2008-08-24 21:49:00 -04002524
2525/**
James Smart3621a712009-04-06 18:47:14 -04002526 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
James Smarte59058c2008-08-24 21:49:00 -04002527 * @phba: Pointer to HBA context object.
2528 * @pring: Pointer to driver SLI ring object.
2529 * @saveq: Pointer to the unsolicited iocb.
2530 *
2531 * This function is called with no lock held by the ring event handler
2532 * when there is an unsolicited iocb posted to the response ring by the
2533 * firmware. This function gets the buffer associated with the iocbs
2534 * and calls the event handler for the ring. This function handles both
2535 * qring buffers and hbq buffers.
2536 * When the function returns 1 the caller can free the iocb object otherwise
2537 * upper layer functions will free the iocb objects.
2538 **/
dea31012005-04-17 16:05:31 -05002539static int
2540lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2541 struct lpfc_iocbq *saveq)
2542{
2543 IOCB_t * irsp;
2544 WORD5 * w5p;
2545 uint32_t Rctl, Type;
James Smart76bb24e2007-10-27 13:38:00 -04002546 struct lpfc_iocbq *iocbq;
James Smart3163f722008-02-08 18:50:25 -05002547 struct lpfc_dmabuf *dmzbuf;
dea31012005-04-17 16:05:31 -05002548
dea31012005-04-17 16:05:31 -05002549 irsp = &(saveq->iocb);
James Smart57127f12007-10-27 13:37:05 -04002550
2551 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2552 if (pring->lpfc_sli_rcv_async_status)
2553 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2554 else
2555 lpfc_printf_log(phba,
2556 KERN_WARNING,
2557 LOG_SLI,
2558 "0316 Ring %d handler: unexpected "
2559 "ASYNC_STATUS iocb received evt_code "
2560 "0x%x\n",
2561 pring->ringno,
2562 irsp->un.asyncstat.evt_code);
2563 return 1;
2564 }
2565
James Smart3163f722008-02-08 18:50:25 -05002566 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2567 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2568 if (irsp->ulpBdeCount > 0) {
2569 dmzbuf = lpfc_sli_get_buff(phba, pring,
2570 irsp->un.ulpWord[3]);
2571 lpfc_in_buf_free(phba, dmzbuf);
2572 }
2573
2574 if (irsp->ulpBdeCount > 1) {
2575 dmzbuf = lpfc_sli_get_buff(phba, pring,
2576 irsp->unsli3.sli3Words[3]);
2577 lpfc_in_buf_free(phba, dmzbuf);
2578 }
2579
2580 if (irsp->ulpBdeCount > 2) {
2581 dmzbuf = lpfc_sli_get_buff(phba, pring,
2582 irsp->unsli3.sli3Words[7]);
2583 lpfc_in_buf_free(phba, dmzbuf);
2584 }
2585
2586 return 1;
2587 }
2588
James Smart92d7f7b2007-06-17 19:56:38 -05002589 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
James Smart76bb24e2007-10-27 13:38:00 -04002590 if (irsp->ulpBdeCount != 0) {
2591 saveq->context2 = lpfc_sli_get_buff(phba, pring,
James Smart92d7f7b2007-06-17 19:56:38 -05002592 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002593 if (!saveq->context2)
2594 lpfc_printf_log(phba,
2595 KERN_ERR,
2596 LOG_SLI,
2597 "0341 Ring %d Cannot find buffer for "
2598 "an unsolicited iocb. tag 0x%x\n",
2599 pring->ringno,
2600 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002601 }
2602 if (irsp->ulpBdeCount == 2) {
2603 saveq->context3 = lpfc_sli_get_buff(phba, pring,
James Smart51ef4c22007-08-02 11:10:31 -04002604 irsp->unsli3.sli3Words[7]);
James Smart76bb24e2007-10-27 13:38:00 -04002605 if (!saveq->context3)
2606 lpfc_printf_log(phba,
2607 KERN_ERR,
2608 LOG_SLI,
2609 "0342 Ring %d Cannot find buffer for an"
2610 " unsolicited iocb. tag 0x%x\n",
2611 pring->ringno,
2612 irsp->unsli3.sli3Words[7]);
2613 }
2614 list_for_each_entry(iocbq, &saveq->list, list) {
James Smart76bb24e2007-10-27 13:38:00 -04002615 irsp = &(iocbq->iocb);
James Smart76bb24e2007-10-27 13:38:00 -04002616 if (irsp->ulpBdeCount != 0) {
2617 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2618 irsp->un.ulpWord[3]);
James Smart9c2face2008-01-11 01:53:18 -05002619 if (!iocbq->context2)
James Smart76bb24e2007-10-27 13:38:00 -04002620 lpfc_printf_log(phba,
2621 KERN_ERR,
2622 LOG_SLI,
2623 "0343 Ring %d Cannot find "
2624 "buffer for an unsolicited iocb"
2625 ". tag 0x%x\n", pring->ringno,
2626 irsp->un.ulpWord[3]);
2627 }
2628 if (irsp->ulpBdeCount == 2) {
2629 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2630 irsp->unsli3.sli3Words[7]);
James Smart9c2face2008-01-11 01:53:18 -05002631 if (!iocbq->context3)
James Smart76bb24e2007-10-27 13:38:00 -04002632 lpfc_printf_log(phba,
2633 KERN_ERR,
2634 LOG_SLI,
2635 "0344 Ring %d Cannot find "
2636 "buffer for an unsolicited "
2637 "iocb. tag 0x%x\n",
2638 pring->ringno,
2639 irsp->unsli3.sli3Words[7]);
2640 }
2641 }
James Smart92d7f7b2007-06-17 19:56:38 -05002642 }
James Smart9c2face2008-01-11 01:53:18 -05002643 if (irsp->ulpBdeCount != 0 &&
2644 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2645 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2646 int found = 0;
2647
2648 /* search continue save q for same XRI */
2649 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
James Smart7851fe22011-07-22 18:36:52 -04002650 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2651 saveq->iocb.unsli3.rcvsli3.ox_id) {
James Smart9c2face2008-01-11 01:53:18 -05002652 list_add_tail(&saveq->list, &iocbq->list);
2653 found = 1;
2654 break;
2655 }
2656 }
2657 if (!found)
2658 list_add_tail(&saveq->clist,
2659 &pring->iocb_continue_saveq);
2660 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2661 list_del_init(&iocbq->clist);
2662 saveq = iocbq;
2663 irsp = &(saveq->iocb);
2664 } else
2665 return 0;
2666 }
2667 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2668 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2669 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002670 Rctl = FC_RCTL_ELS_REQ;
2671 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002672 } else {
2673 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2674 Rctl = w5p->hcsw.Rctl;
2675 Type = w5p->hcsw.Type;
2676
2677 /* Firmware Workaround */
2678 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2679 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2680 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002681 Rctl = FC_RCTL_ELS_REQ;
2682 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002683 w5p->hcsw.Rctl = Rctl;
2684 w5p->hcsw.Type = Type;
2685 }
2686 }
James Smart92d7f7b2007-06-17 19:56:38 -05002687
James Smart3772a992009-05-22 14:50:54 -04002688 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
James Smart92d7f7b2007-06-17 19:56:38 -05002689 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002690 "0313 Ring %d handler: unexpected Rctl x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05002691 "Type x%x received\n",
James Smarte8b62012007-08-02 11:10:09 -04002692 pring->ringno, Rctl, Type);
James Smart3772a992009-05-22 14:50:54 -04002693
James Smart92d7f7b2007-06-17 19:56:38 -05002694 return 1;
dea31012005-04-17 16:05:31 -05002695}
2696
James Smarte59058c2008-08-24 21:49:00 -04002697/**
James Smart3621a712009-04-06 18:47:14 -04002698 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
James Smarte59058c2008-08-24 21:49:00 -04002699 * @phba: Pointer to HBA context object.
2700 * @pring: Pointer to driver SLI ring object.
2701 * @prspiocb: Pointer to response iocb object.
2702 *
2703 * This function looks up the iocb_lookup table to get the command iocb
2704 * corresponding to the given response iocb using the iotag of the
2705 * response iocb. This function is called with the hbalock held.
2706 * This function returns the command iocb object if it finds the command
2707 * iocb else returns NULL.
2708 **/
dea31012005-04-17 16:05:31 -05002709static struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05002710lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2711 struct lpfc_sli_ring *pring,
2712 struct lpfc_iocbq *prspiocb)
dea31012005-04-17 16:05:31 -05002713{
dea31012005-04-17 16:05:31 -05002714 struct lpfc_iocbq *cmd_iocb = NULL;
2715 uint16_t iotag;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002716 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05002717
James Bottomley604a3e32005-10-29 10:28:33 -05002718 iotag = prspiocb->iocb.ulpIoTag;
dea31012005-04-17 16:05:31 -05002719
James Bottomley604a3e32005-10-29 10:28:33 -05002720 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2721 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04002722 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
James Smart89533e92016-10-13 15:06:15 -07002723 /* remove from txcmpl queue list */
2724 list_del_init(&cmd_iocb->list);
James Smart4f2e66c2012-05-09 21:17:07 -04002725 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smart89533e92016-10-13 15:06:15 -07002726 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04002727 }
dea31012005-04-17 16:05:31 -05002728 }
2729
dea31012005-04-17 16:05:31 -05002730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart89533e92016-10-13 15:06:15 -07002731 "0317 iotag x%x is out of "
James Bottomley604a3e32005-10-29 10:28:33 -05002732 "range: max iotag x%x wd0 x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04002733 iotag, phba->sli.last_iotag,
James Bottomley604a3e32005-10-29 10:28:33 -05002734 *(((uint32_t *) &prspiocb->iocb) + 7));
dea31012005-04-17 16:05:31 -05002735 return NULL;
2736}
2737
James Smarte59058c2008-08-24 21:49:00 -04002738/**
James Smart3772a992009-05-22 14:50:54 -04002739 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2740 * @phba: Pointer to HBA context object.
2741 * @pring: Pointer to driver SLI ring object.
2742 * @iotag: IOCB tag.
2743 *
2744 * This function looks up the iocb_lookup table to get the command iocb
2745 * corresponding to the given iotag. This function is called with the
2746 * hbalock held.
2747 * This function returns the command iocb object if it finds the command
2748 * iocb else returns NULL.
2749 **/
2750static struct lpfc_iocbq *
2751lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2752 struct lpfc_sli_ring *pring, uint16_t iotag)
2753{
James Smart895427b2017-02-12 13:52:30 -08002754 struct lpfc_iocbq *cmd_iocb = NULL;
James Smart3772a992009-05-22 14:50:54 -04002755
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01002756 lockdep_assert_held(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04002757 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2758 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart4f2e66c2012-05-09 21:17:07 -04002759 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2760 /* remove from txcmpl queue list */
2761 list_del_init(&cmd_iocb->list);
2762 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
James Smart4f2e66c2012-05-09 21:17:07 -04002763 return cmd_iocb;
James Smart2a9bf3d2010-06-07 15:24:45 -04002764 }
James Smart3772a992009-05-22 14:50:54 -04002765 }
James Smart89533e92016-10-13 15:06:15 -07002766
James Smart3772a992009-05-22 14:50:54 -04002767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08002768 "0372 iotag x%x lookup error: max iotag (x%x) "
2769 "iocb_flag x%x\n",
2770 iotag, phba->sli.last_iotag,
2771 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
James Smart3772a992009-05-22 14:50:54 -04002772 return NULL;
2773}
2774
2775/**
James Smart3621a712009-04-06 18:47:14 -04002776 * lpfc_sli_process_sol_iocb - process solicited iocb completion
James Smarte59058c2008-08-24 21:49:00 -04002777 * @phba: Pointer to HBA context object.
2778 * @pring: Pointer to driver SLI ring object.
2779 * @saveq: Pointer to the response iocb to be processed.
2780 *
2781 * This function is called by the ring event handler for non-fcp
2782 * rings when there is a new response iocb in the response ring.
2783 * The caller is not required to hold any locks. This function
2784 * gets the command iocb associated with the response iocb and
2785 * calls the completion handler for the command iocb. If there
2786 * is no completion handler, the function will free the resources
2787 * associated with command iocb. If the response iocb is for
2788 * an already aborted command iocb, the status of the completion
2789 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2790 * This function always returns 1.
2791 **/
dea31012005-04-17 16:05:31 -05002792static int
James Smart2e0fef82007-06-17 19:56:36 -05002793lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea31012005-04-17 16:05:31 -05002794 struct lpfc_iocbq *saveq)
2795{
James Smart2e0fef82007-06-17 19:56:36 -05002796 struct lpfc_iocbq *cmdiocbp;
dea31012005-04-17 16:05:31 -05002797 int rc = 1;
2798 unsigned long iflag;
2799
2800 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
James Smart2e0fef82007-06-17 19:56:36 -05002801 spin_lock_irqsave(&phba->hbalock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05002802 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
James Smart2e0fef82007-06-17 19:56:36 -05002803 spin_unlock_irqrestore(&phba->hbalock, iflag);
2804
dea31012005-04-17 16:05:31 -05002805 if (cmdiocbp) {
2806 if (cmdiocbp->iocb_cmpl) {
2807 /*
James Smartea2151b2008-09-07 11:52:10 -04002808 * If an ELS command failed send an event to mgmt
2809 * application.
2810 */
2811 if (saveq->iocb.ulpStatus &&
2812 (pring->ringno == LPFC_ELS_RING) &&
2813 (cmdiocbp->iocb.ulpCommand ==
2814 CMD_ELS_REQUEST64_CR))
2815 lpfc_send_els_failure_event(phba,
2816 cmdiocbp, saveq);
2817
2818 /*
dea31012005-04-17 16:05:31 -05002819 * Post all ELS completions to the worker thread.
2820 * All other are passed to the completion callback.
2821 */
2822 if (pring->ringno == LPFC_ELS_RING) {
James Smart341af102010-01-26 23:07:37 -05002823 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2824 (cmdiocbp->iocb_flag &
2825 LPFC_DRIVER_ABORTED)) {
2826 spin_lock_irqsave(&phba->hbalock,
2827 iflag);
James Smart07951072007-04-25 09:51:38 -04002828 cmdiocbp->iocb_flag &=
2829 ~LPFC_DRIVER_ABORTED;
James Smart341af102010-01-26 23:07:37 -05002830 spin_unlock_irqrestore(&phba->hbalock,
2831 iflag);
James Smart07951072007-04-25 09:51:38 -04002832 saveq->iocb.ulpStatus =
2833 IOSTAT_LOCAL_REJECT;
2834 saveq->iocb.un.ulpWord[4] =
2835 IOERR_SLI_ABORTED;
James Smart0ff10d42008-01-11 01:52:36 -05002836
2837 /* Firmware could still be in progress
2838 * of DMAing payload, so don't free data
2839 * buffer till after a hbeat.
2840 */
James Smart341af102010-01-26 23:07:37 -05002841 spin_lock_irqsave(&phba->hbalock,
2842 iflag);
James Smart0ff10d42008-01-11 01:52:36 -05002843 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
James Smart341af102010-01-26 23:07:37 -05002844 spin_unlock_irqrestore(&phba->hbalock,
2845 iflag);
2846 }
James Smart0f65ff62010-02-26 14:14:23 -05002847 if (phba->sli_rev == LPFC_SLI_REV4) {
2848 if (saveq->iocb_flag &
2849 LPFC_EXCHANGE_BUSY) {
2850 /* Set cmdiocb flag for the
2851 * exchange busy so sgl (xri)
2852 * will not be released until
2853 * the abort xri is received
2854 * from hba.
2855 */
2856 spin_lock_irqsave(
2857 &phba->hbalock, iflag);
2858 cmdiocbp->iocb_flag |=
2859 LPFC_EXCHANGE_BUSY;
2860 spin_unlock_irqrestore(
2861 &phba->hbalock, iflag);
2862 }
2863 if (cmdiocbp->iocb_flag &
2864 LPFC_DRIVER_ABORTED) {
2865 /*
2866 * Clear LPFC_DRIVER_ABORTED
2867 * bit in case it was driver
2868 * initiated abort.
2869 */
2870 spin_lock_irqsave(
2871 &phba->hbalock, iflag);
2872 cmdiocbp->iocb_flag &=
2873 ~LPFC_DRIVER_ABORTED;
2874 spin_unlock_irqrestore(
2875 &phba->hbalock, iflag);
2876 cmdiocbp->iocb.ulpStatus =
2877 IOSTAT_LOCAL_REJECT;
2878 cmdiocbp->iocb.un.ulpWord[4] =
2879 IOERR_ABORT_REQUESTED;
2880 /*
2881 * For SLI4, irsiocb contains
2882 * NO_XRI in sli_xritag, it
2883 * shall not affect releasing
2884 * sgl (xri) process.
2885 */
2886 saveq->iocb.ulpStatus =
2887 IOSTAT_LOCAL_REJECT;
2888 saveq->iocb.un.ulpWord[4] =
2889 IOERR_SLI_ABORTED;
2890 spin_lock_irqsave(
2891 &phba->hbalock, iflag);
2892 saveq->iocb_flag |=
2893 LPFC_DELAY_MEM_FREE;
2894 spin_unlock_irqrestore(
2895 &phba->hbalock, iflag);
2896 }
James Smart07951072007-04-25 09:51:38 -04002897 }
dea31012005-04-17 16:05:31 -05002898 }
James Smart2e0fef82007-06-17 19:56:36 -05002899 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
James Bottomley604a3e32005-10-29 10:28:33 -05002900 } else
2901 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea31012005-04-17 16:05:31 -05002902 } else {
2903 /*
2904 * Unknown initiating command based on the response iotag.
2905 * This could be the case on the ELS ring because of
2906 * lpfc_els_abort().
2907 */
2908 if (pring->ringno != LPFC_ELS_RING) {
2909 /*
2910 * Ring <ringno> handler: unexpected completion IoTag
2911 * <IoTag>
2912 */
James Smarta257bf92009-04-06 18:48:10 -04002913 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002914 "0322 Ring %d handler: "
2915 "unexpected completion IoTag x%x "
2916 "Data: x%x x%x x%x x%x\n",
2917 pring->ringno,
2918 saveq->iocb.ulpIoTag,
2919 saveq->iocb.ulpStatus,
2920 saveq->iocb.un.ulpWord[4],
2921 saveq->iocb.ulpCommand,
2922 saveq->iocb.ulpContext);
dea31012005-04-17 16:05:31 -05002923 }
2924 }
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04002925
dea31012005-04-17 16:05:31 -05002926 return rc;
2927}
2928
James Smarte59058c2008-08-24 21:49:00 -04002929/**
James Smart3621a712009-04-06 18:47:14 -04002930 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
James Smarte59058c2008-08-24 21:49:00 -04002931 * @phba: Pointer to HBA context object.
2932 * @pring: Pointer to driver SLI ring object.
2933 *
2934 * This function is called from the iocb ring event handlers when
2935 * put pointer is ahead of the get pointer for a ring. This function signal
2936 * an error attention condition to the worker thread and the worker
2937 * thread will transition the HBA to offline state.
2938 **/
James Smart2e0fef82007-06-17 19:56:36 -05002939static void
2940lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002941{
James Smart34b02dc2008-08-24 21:49:55 -04002942 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002943 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002944 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002945 * rsp ring <portRspMax>
2946 */
2947 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002948 "0312 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002949 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04002950 pring->ringno, le32_to_cpu(pgp->rspPutInx),
James Smart7e56aa22012-08-03 12:35:34 -04002951 pring->sli.sli3.numRiocb);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002952
James Smart2e0fef82007-06-17 19:56:36 -05002953 phba->link_state = LPFC_HBA_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002954
2955 /*
2956 * All error attention handlers are posted to
2957 * worker thread
2958 */
2959 phba->work_ha |= HA_ERATT;
2960 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05002961
James Smart5e9d9b82008-06-14 22:52:53 -04002962 lpfc_worker_wake_up(phba);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002963
2964 return;
2965}
2966
James Smarte59058c2008-08-24 21:49:00 -04002967/**
James Smart3621a712009-04-06 18:47:14 -04002968 * lpfc_poll_eratt - Error attention polling timer timeout handler
James Smart93996272008-08-24 21:50:30 -04002969 * @ptr: Pointer to address of HBA context object.
2970 *
2971 * This function is invoked by the Error Attention polling timer when the
2972 * timer times out. It will check the SLI Error Attention register for
2973 * possible attention events. If so, it will post an Error Attention event
2974 * and wake up worker thread to process it. Otherwise, it will set up the
2975 * Error Attention polling timer for the next poll.
2976 **/
2977void lpfc_poll_eratt(unsigned long ptr)
2978{
2979 struct lpfc_hba *phba;
James Smarteb016562014-09-03 12:58:06 -04002980 uint32_t eratt = 0;
James Smartaa6fbb72012-08-03 12:36:03 -04002981 uint64_t sli_intr, cnt;
James Smart93996272008-08-24 21:50:30 -04002982
2983 phba = (struct lpfc_hba *)ptr;
2984
James Smartaa6fbb72012-08-03 12:36:03 -04002985 /* Here we will also keep track of interrupts per sec of the hba */
2986 sli_intr = phba->sli.slistat.sli_intr;
2987
2988 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2989 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2990 sli_intr);
2991 else
2992 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2993
James Smart65791f12016-07-06 12:35:56 -07002994 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
2995 do_div(cnt, phba->eratt_poll_interval);
James Smartaa6fbb72012-08-03 12:36:03 -04002996 phba->sli.slistat.sli_ips = cnt;
2997
2998 phba->sli.slistat.sli_prev_intr = sli_intr;
2999
James Smart93996272008-08-24 21:50:30 -04003000 /* Check chip HA register for error event */
3001 eratt = lpfc_sli_check_eratt(phba);
3002
3003 if (eratt)
3004 /* Tell the worker thread there is work to do */
3005 lpfc_worker_wake_up(phba);
3006 else
3007 /* Restart the timer for next eratt poll */
James Smart256ec0d2013-04-17 20:14:58 -04003008 mod_timer(&phba->eratt_poll,
3009 jiffies +
James Smart65791f12016-07-06 12:35:56 -07003010 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smart93996272008-08-24 21:50:30 -04003011 return;
3012}
3013
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003014
James Smarte59058c2008-08-24 21:49:00 -04003015/**
James Smart3621a712009-04-06 18:47:14 -04003016 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
James Smarte59058c2008-08-24 21:49:00 -04003017 * @phba: Pointer to HBA context object.
3018 * @pring: Pointer to driver SLI ring object.
3019 * @mask: Host attention register mask for this ring.
3020 *
3021 * This function is called from the interrupt context when there is a ring
3022 * event for the fcp ring. The caller does not hold any lock.
3023 * The function processes each response iocb in the response ring until it
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003024 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
James Smarte59058c2008-08-24 21:49:00 -04003025 * LE bit set. The function will call the completion handler of the command iocb
3026 * if the response iocb indicates a completion for a command iocb or it is
3027 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3028 * function if this is an unsolicited iocb.
dea31012005-04-17 16:05:31 -05003029 * This routine presumes LPFC_FCP_RING handling and doesn't bother
James Smart45ed1192009-10-02 15:17:02 -04003030 * to check it explicitly.
3031 */
3032int
James Smart2e0fef82007-06-17 19:56:36 -05003033lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3034 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003035{
James Smart34b02dc2008-08-24 21:49:55 -04003036 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05003037 IOCB_t *irsp = NULL;
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003038 IOCB_t *entry = NULL;
dea31012005-04-17 16:05:31 -05003039 struct lpfc_iocbq *cmdiocbq = NULL;
3040 struct lpfc_iocbq rspiocbq;
dea31012005-04-17 16:05:31 -05003041 uint32_t status;
3042 uint32_t portRspPut, portRspMax;
3043 int rc = 1;
3044 lpfc_iocb_type type;
3045 unsigned long iflag;
3046 uint32_t rsp_cmpl = 0;
dea31012005-04-17 16:05:31 -05003047
James Smart2e0fef82007-06-17 19:56:36 -05003048 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003049 pring->stats.iocb_event++;
3050
dea31012005-04-17 16:05:31 -05003051 /*
3052 * The next available response entry should never exceed the maximum
3053 * entries. If it does, treat it as an adapter hardware error.
3054 */
James Smart7e56aa22012-08-03 12:35:34 -04003055 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003056 portRspPut = le32_to_cpu(pgp->rspPutInx);
3057 if (unlikely(portRspPut >= portRspMax)) {
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003058 lpfc_sli_rsp_pointers_error(phba, pring);
James Smart2e0fef82007-06-17 19:56:36 -05003059 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003060 return 1;
3061 }
James Smart45ed1192009-10-02 15:17:02 -04003062 if (phba->fcp_ring_in_use) {
3063 spin_unlock_irqrestore(&phba->hbalock, iflag);
3064 return 1;
3065 } else
3066 phba->fcp_ring_in_use = 1;
dea31012005-04-17 16:05:31 -05003067
3068 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003069 while (pring->sli.sli3.rspidx != portRspPut) {
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003070 /*
3071 * Fetch an entry off the ring and copy it into a local data
3072 * structure. The copy involves a byte-swap since the
3073 * network byte order and pci byte orders are different.
3074 */
James Smarted957682007-06-17 19:56:37 -05003075 entry = lpfc_resp_iocb(phba, pring);
James Smart858c9f62007-06-17 19:56:39 -05003076 phba->last_completion_time = jiffies;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003077
James Smart7e56aa22012-08-03 12:35:34 -04003078 if (++pring->sli.sli3.rspidx >= portRspMax)
3079 pring->sli.sli3.rspidx = 0;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05003080
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003081 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3082 (uint32_t *) &rspiocbq.iocb,
James Smarted957682007-06-17 19:56:37 -05003083 phba->iocb_rsp_size);
James Smarta4bc3372006-12-02 13:34:16 -05003084 INIT_LIST_HEAD(&(rspiocbq.list));
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04003085 irsp = &rspiocbq.iocb;
3086
dea31012005-04-17 16:05:31 -05003087 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3088 pring->stats.iocb_rsp++;
3089 rsp_cmpl++;
3090
3091 if (unlikely(irsp->ulpStatus)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003092 /*
3093 * If resource errors reported from HBA, reduce
3094 * queuedepths of the SCSI device.
3095 */
3096 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003097 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3098 IOERR_NO_RESOURCES)) {
James Smart92d7f7b2007-06-17 19:56:38 -05003099 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003100 phba->lpfc_rampdown_queue_depth(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05003101 spin_lock_irqsave(&phba->hbalock, iflag);
3102 }
3103
dea31012005-04-17 16:05:31 -05003104 /* Rsp ring <ringno> error: IOCB */
3105 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003106 "0336 Rsp Ring %d error: IOCB Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05003107 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003108 pring->ringno,
James Smart92d7f7b2007-06-17 19:56:38 -05003109 irsp->un.ulpWord[0],
3110 irsp->un.ulpWord[1],
3111 irsp->un.ulpWord[2],
3112 irsp->un.ulpWord[3],
3113 irsp->un.ulpWord[4],
3114 irsp->un.ulpWord[5],
James Smartd7c255b2008-08-24 21:50:00 -04003115 *(uint32_t *)&irsp->un1,
3116 *((uint32_t *)&irsp->un1 + 1));
dea31012005-04-17 16:05:31 -05003117 }
3118
3119 switch (type) {
3120 case LPFC_ABORT_IOCB:
3121 case LPFC_SOL_IOCB:
3122 /*
3123 * Idle exchange closed via ABTS from port. No iocb
3124 * resources need to be recovered.
3125 */
3126 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
James Smartdca94792006-08-01 07:34:08 -04003127 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003128 "0333 IOCB cmd 0x%x"
James Smartdca94792006-08-01 07:34:08 -04003129 " processed. Skipping"
James Smart92d7f7b2007-06-17 19:56:38 -05003130 " completion\n",
James Smartdca94792006-08-01 07:34:08 -04003131 irsp->ulpCommand);
dea31012005-04-17 16:05:31 -05003132 break;
3133 }
3134
James Bottomley604a3e32005-10-29 10:28:33 -05003135 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3136 &rspiocbq);
James Smart0f65ff62010-02-26 14:14:23 -05003137 if (unlikely(!cmdiocbq))
3138 break;
3139 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3140 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3141 if (cmdiocbq->iocb_cmpl) {
3142 spin_unlock_irqrestore(&phba->hbalock, iflag);
3143 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3144 &rspiocbq);
3145 spin_lock_irqsave(&phba->hbalock, iflag);
3146 }
dea31012005-04-17 16:05:31 -05003147 break;
James Smarta4bc3372006-12-02 13:34:16 -05003148 case LPFC_UNSOL_IOCB:
James Smart2e0fef82007-06-17 19:56:36 -05003149 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003150 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
James Smart2e0fef82007-06-17 19:56:36 -05003151 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05003152 break;
dea31012005-04-17 16:05:31 -05003153 default:
3154 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3155 char adaptermsg[LPFC_MAX_ADPTMSG];
3156 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3157 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3158 MAX_MSG_DATA);
Joe Perches898eb712007-10-18 03:06:30 -07003159 dev_warn(&((phba->pcidev)->dev),
3160 "lpfc%d: %s\n",
dea31012005-04-17 16:05:31 -05003161 phba->brd_no, adaptermsg);
3162 } else {
3163 /* Unknown IOCB command */
3164 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003165 "0334 Unknown IOCB command "
James Smart92d7f7b2007-06-17 19:56:38 -05003166 "Data: x%x, x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04003167 type, irsp->ulpCommand,
James Smart92d7f7b2007-06-17 19:56:38 -05003168 irsp->ulpStatus,
3169 irsp->ulpIoTag,
3170 irsp->ulpContext);
dea31012005-04-17 16:05:31 -05003171 }
3172 break;
3173 }
3174
3175 /*
3176 * The response IOCB has been processed. Update the ring
3177 * pointer in SLIM. If the port response put pointer has not
3178 * been updated, sync the pgp->rspPutInx and fetch the new port
3179 * response put pointer.
3180 */
James Smart7e56aa22012-08-03 12:35:34 -04003181 writel(pring->sli.sli3.rspidx,
3182 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003183
James Smart7e56aa22012-08-03 12:35:34 -04003184 if (pring->sli.sli3.rspidx == portRspPut)
dea31012005-04-17 16:05:31 -05003185 portRspPut = le32_to_cpu(pgp->rspPutInx);
3186 }
3187
3188 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3189 pring->stats.iocb_rsp_full++;
3190 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3191 writel(status, phba->CAregaddr);
3192 readl(phba->CAregaddr);
3193 }
3194 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3195 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3196 pring->stats.iocb_cmd_empty++;
3197
3198 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003199 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003200 lpfc_sli_resume_iocb(phba, pring);
3201
3202 if ((pring->lpfc_sli_cmd_available))
3203 (pring->lpfc_sli_cmd_available) (phba, pring);
3204
3205 }
3206
James Smart45ed1192009-10-02 15:17:02 -04003207 phba->fcp_ring_in_use = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003208 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003209 return rc;
3210}
3211
James Smarte59058c2008-08-24 21:49:00 -04003212/**
James Smart3772a992009-05-22 14:50:54 -04003213 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3214 * @phba: Pointer to HBA context object.
3215 * @pring: Pointer to driver SLI ring object.
3216 * @rspiocbp: Pointer to driver response IOCB object.
3217 *
3218 * This function is called from the worker thread when there is a slow-path
3219 * response IOCB to process. This function chains all the response iocbs until
3220 * seeing the iocb with the LE bit set. The function will call
3221 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3222 * completion of a command iocb. The function will call the
3223 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3224 * The function frees the resources or calls the completion handler if this
3225 * iocb is an abort completion. The function returns NULL when the response
3226 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3227 * this function shall chain the iocb on to the iocb_continueq and return the
3228 * response iocb passed in.
3229 **/
3230static struct lpfc_iocbq *
3231lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3232 struct lpfc_iocbq *rspiocbp)
3233{
3234 struct lpfc_iocbq *saveq;
3235 struct lpfc_iocbq *cmdiocbp;
3236 struct lpfc_iocbq *next_iocb;
3237 IOCB_t *irsp = NULL;
3238 uint32_t free_saveq;
3239 uint8_t iocb_cmd_type;
3240 lpfc_iocb_type type;
3241 unsigned long iflag;
3242 int rc;
3243
3244 spin_lock_irqsave(&phba->hbalock, iflag);
3245 /* First add the response iocb to the countinueq list */
3246 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3247 pring->iocb_continueq_cnt++;
3248
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02003249 /* Now, determine whether the list is completed for processing */
James Smart3772a992009-05-22 14:50:54 -04003250 irsp = &rspiocbp->iocb;
3251 if (irsp->ulpLe) {
3252 /*
3253 * By default, the driver expects to free all resources
3254 * associated with this iocb completion.
3255 */
3256 free_saveq = 1;
3257 saveq = list_get_first(&pring->iocb_continueq,
3258 struct lpfc_iocbq, list);
3259 irsp = &(saveq->iocb);
3260 list_del_init(&pring->iocb_continueq);
3261 pring->iocb_continueq_cnt = 0;
3262
3263 pring->stats.iocb_rsp++;
3264
3265 /*
3266 * If resource errors reported from HBA, reduce
3267 * queuedepths of the SCSI device.
3268 */
3269 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
James Smarte3d2b802012-08-14 14:25:43 -04003270 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3271 IOERR_NO_RESOURCES)) {
James Smart3772a992009-05-22 14:50:54 -04003272 spin_unlock_irqrestore(&phba->hbalock, iflag);
3273 phba->lpfc_rampdown_queue_depth(phba);
3274 spin_lock_irqsave(&phba->hbalock, iflag);
3275 }
3276
3277 if (irsp->ulpStatus) {
3278 /* Rsp ring <ringno> error: IOCB */
3279 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3280 "0328 Rsp Ring %d error: "
3281 "IOCB Data: "
3282 "x%x x%x x%x x%x "
3283 "x%x x%x x%x x%x "
3284 "x%x x%x x%x x%x "
3285 "x%x x%x x%x x%x\n",
3286 pring->ringno,
3287 irsp->un.ulpWord[0],
3288 irsp->un.ulpWord[1],
3289 irsp->un.ulpWord[2],
3290 irsp->un.ulpWord[3],
3291 irsp->un.ulpWord[4],
3292 irsp->un.ulpWord[5],
3293 *(((uint32_t *) irsp) + 6),
3294 *(((uint32_t *) irsp) + 7),
3295 *(((uint32_t *) irsp) + 8),
3296 *(((uint32_t *) irsp) + 9),
3297 *(((uint32_t *) irsp) + 10),
3298 *(((uint32_t *) irsp) + 11),
3299 *(((uint32_t *) irsp) + 12),
3300 *(((uint32_t *) irsp) + 13),
3301 *(((uint32_t *) irsp) + 14),
3302 *(((uint32_t *) irsp) + 15));
3303 }
3304
3305 /*
3306 * Fetch the IOCB command type and call the correct completion
3307 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3308 * get freed back to the lpfc_iocb_list by the discovery
3309 * kernel thread.
3310 */
3311 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3312 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3313 switch (type) {
3314 case LPFC_SOL_IOCB:
3315 spin_unlock_irqrestore(&phba->hbalock, iflag);
3316 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3317 spin_lock_irqsave(&phba->hbalock, iflag);
3318 break;
3319
3320 case LPFC_UNSOL_IOCB:
3321 spin_unlock_irqrestore(&phba->hbalock, iflag);
3322 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3323 spin_lock_irqsave(&phba->hbalock, iflag);
3324 if (!rc)
3325 free_saveq = 0;
3326 break;
3327
3328 case LPFC_ABORT_IOCB:
3329 cmdiocbp = NULL;
3330 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3331 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3332 saveq);
3333 if (cmdiocbp) {
3334 /* Call the specified completion routine */
3335 if (cmdiocbp->iocb_cmpl) {
3336 spin_unlock_irqrestore(&phba->hbalock,
3337 iflag);
3338 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3339 saveq);
3340 spin_lock_irqsave(&phba->hbalock,
3341 iflag);
3342 } else
3343 __lpfc_sli_release_iocbq(phba,
3344 cmdiocbp);
3345 }
3346 break;
3347
3348 case LPFC_UNKNOWN_IOCB:
3349 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3350 char adaptermsg[LPFC_MAX_ADPTMSG];
3351 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3352 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3353 MAX_MSG_DATA);
3354 dev_warn(&((phba->pcidev)->dev),
3355 "lpfc%d: %s\n",
3356 phba->brd_no, adaptermsg);
3357 } else {
3358 /* Unknown IOCB command */
3359 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3360 "0335 Unknown IOCB "
3361 "command Data: x%x "
3362 "x%x x%x x%x\n",
3363 irsp->ulpCommand,
3364 irsp->ulpStatus,
3365 irsp->ulpIoTag,
3366 irsp->ulpContext);
3367 }
3368 break;
3369 }
3370
3371 if (free_saveq) {
3372 list_for_each_entry_safe(rspiocbp, next_iocb,
3373 &saveq->list, list) {
James Smart61f35bf2013-05-31 17:03:48 -04003374 list_del_init(&rspiocbp->list);
James Smart3772a992009-05-22 14:50:54 -04003375 __lpfc_sli_release_iocbq(phba, rspiocbp);
3376 }
3377 __lpfc_sli_release_iocbq(phba, saveq);
3378 }
3379 rspiocbp = NULL;
3380 }
3381 spin_unlock_irqrestore(&phba->hbalock, iflag);
3382 return rspiocbp;
3383}
3384
3385/**
3386 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
James Smarte59058c2008-08-24 21:49:00 -04003387 * @phba: Pointer to HBA context object.
3388 * @pring: Pointer to driver SLI ring object.
3389 * @mask: Host attention register mask for this ring.
3390 *
James Smart3772a992009-05-22 14:50:54 -04003391 * This routine wraps the actual slow_ring event process routine from the
3392 * API jump table function pointer from the lpfc_hba struct.
James Smarte59058c2008-08-24 21:49:00 -04003393 **/
James Smart3772a992009-05-22 14:50:54 -04003394void
James Smart2e0fef82007-06-17 19:56:36 -05003395lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3396 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003397{
James Smart3772a992009-05-22 14:50:54 -04003398 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3399}
3400
3401/**
3402 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3403 * @phba: Pointer to HBA context object.
3404 * @pring: Pointer to driver SLI ring object.
3405 * @mask: Host attention register mask for this ring.
3406 *
3407 * This function is called from the worker thread when there is a ring event
3408 * for non-fcp rings. The caller does not hold any lock. The function will
3409 * remove each response iocb in the response ring and calls the handle
3410 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3411 **/
3412static void
3413lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3414 struct lpfc_sli_ring *pring, uint32_t mask)
3415{
James Smart34b02dc2008-08-24 21:49:55 -04003416 struct lpfc_pgp *pgp;
dea31012005-04-17 16:05:31 -05003417 IOCB_t *entry;
3418 IOCB_t *irsp = NULL;
3419 struct lpfc_iocbq *rspiocbp = NULL;
dea31012005-04-17 16:05:31 -05003420 uint32_t portRspPut, portRspMax;
dea31012005-04-17 16:05:31 -05003421 unsigned long iflag;
James Smart3772a992009-05-22 14:50:54 -04003422 uint32_t status;
dea31012005-04-17 16:05:31 -05003423
James Smart34b02dc2008-08-24 21:49:55 -04003424 pgp = &phba->port_gp[pring->ringno];
James Smart2e0fef82007-06-17 19:56:36 -05003425 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003426 pring->stats.iocb_event++;
3427
dea31012005-04-17 16:05:31 -05003428 /*
3429 * The next available response entry should never exceed the maximum
3430 * entries. If it does, treat it as an adapter hardware error.
3431 */
James Smart7e56aa22012-08-03 12:35:34 -04003432 portRspMax = pring->sli.sli3.numRiocb;
dea31012005-04-17 16:05:31 -05003433 portRspPut = le32_to_cpu(pgp->rspPutInx);
3434 if (portRspPut >= portRspMax) {
3435 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003436 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea31012005-04-17 16:05:31 -05003437 * rsp ring <portRspMax>
3438 */
James Smarted957682007-06-17 19:56:37 -05003439 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003440 "0303 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003441 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003442 pring->ringno, portRspPut, portRspMax);
dea31012005-04-17 16:05:31 -05003443
James Smart2e0fef82007-06-17 19:56:36 -05003444 phba->link_state = LPFC_HBA_ERROR;
3445 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003446
3447 phba->work_hs = HS_FFER3;
3448 lpfc_handle_eratt(phba);
3449
James Smart3772a992009-05-22 14:50:54 -04003450 return;
dea31012005-04-17 16:05:31 -05003451 }
3452
3453 rmb();
James Smart7e56aa22012-08-03 12:35:34 -04003454 while (pring->sli.sli3.rspidx != portRspPut) {
dea31012005-04-17 16:05:31 -05003455 /*
3456 * Build a completion list and call the appropriate handler.
3457 * The process is to get the next available response iocb, get
3458 * a free iocb from the list, copy the response data into the
3459 * free iocb, insert to the continuation list, and update the
3460 * next response index to slim. This process makes response
3461 * iocb's in the ring available to DMA as fast as possible but
3462 * pays a penalty for a copy operation. Since the iocb is
3463 * only 32 bytes, this penalty is considered small relative to
3464 * the PCI reads for register values and a slim write. When
3465 * the ulpLe field is set, the entire Command has been
3466 * received.
3467 */
James Smarted957682007-06-17 19:56:37 -05003468 entry = lpfc_resp_iocb(phba, pring);
3469
James Smart858c9f62007-06-17 19:56:39 -05003470 phba->last_completion_time = jiffies;
James Smart2e0fef82007-06-17 19:56:36 -05003471 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05003472 if (rspiocbp == NULL) {
3473 printk(KERN_ERR "%s: out of buffers! Failing "
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07003474 "completion.\n", __func__);
dea31012005-04-17 16:05:31 -05003475 break;
3476 }
3477
James Smarted957682007-06-17 19:56:37 -05003478 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3479 phba->iocb_rsp_size);
dea31012005-04-17 16:05:31 -05003480 irsp = &rspiocbp->iocb;
3481
James Smart7e56aa22012-08-03 12:35:34 -04003482 if (++pring->sli.sli3.rspidx >= portRspMax)
3483 pring->sli.sli3.rspidx = 0;
dea31012005-04-17 16:05:31 -05003484
James Smarta58cbd52007-08-02 11:09:43 -04003485 if (pring->ringno == LPFC_ELS_RING) {
3486 lpfc_debugfs_slow_ring_trc(phba,
3487 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3488 *(((uint32_t *) irsp) + 4),
3489 *(((uint32_t *) irsp) + 6),
3490 *(((uint32_t *) irsp) + 7));
3491 }
3492
James Smart7e56aa22012-08-03 12:35:34 -04003493 writel(pring->sli.sli3.rspidx,
3494 &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003495
James Smart3772a992009-05-22 14:50:54 -04003496 spin_unlock_irqrestore(&phba->hbalock, iflag);
3497 /* Handle the response IOCB */
3498 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3499 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003500
3501 /*
3502 * If the port response put pointer has not been updated, sync
3503 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3504 * response put pointer.
3505 */
James Smart7e56aa22012-08-03 12:35:34 -04003506 if (pring->sli.sli3.rspidx == portRspPut) {
dea31012005-04-17 16:05:31 -05003507 portRspPut = le32_to_cpu(pgp->rspPutInx);
3508 }
James Smart7e56aa22012-08-03 12:35:34 -04003509 } /* while (pring->sli.sli3.rspidx != portRspPut) */
dea31012005-04-17 16:05:31 -05003510
James Smart92d7f7b2007-06-17 19:56:38 -05003511 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea31012005-04-17 16:05:31 -05003512 /* At least one response entry has been freed */
3513 pring->stats.iocb_rsp_full++;
3514 /* SET RxRE_RSP in Chip Att register */
3515 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3516 writel(status, phba->CAregaddr);
3517 readl(phba->CAregaddr); /* flush */
3518 }
3519 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3520 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3521 pring->stats.iocb_cmd_empty++;
3522
3523 /* Force update of the local copy of cmdGetInx */
James Smart7e56aa22012-08-03 12:35:34 -04003524 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
dea31012005-04-17 16:05:31 -05003525 lpfc_sli_resume_iocb(phba, pring);
3526
3527 if ((pring->lpfc_sli_cmd_available))
3528 (pring->lpfc_sli_cmd_available) (phba, pring);
3529
3530 }
3531
James Smart2e0fef82007-06-17 19:56:36 -05003532 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003533 return;
dea31012005-04-17 16:05:31 -05003534}
3535
James Smarte59058c2008-08-24 21:49:00 -04003536/**
James Smart4f774512009-05-22 14:52:35 -04003537 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3538 * @phba: Pointer to HBA context object.
3539 * @pring: Pointer to driver SLI ring object.
3540 * @mask: Host attention register mask for this ring.
3541 *
3542 * This function is called from the worker thread when there is a pending
3543 * ELS response iocb on the driver internal slow-path response iocb worker
3544 * queue. The caller does not hold any lock. The function will remove each
3545 * response iocb from the response worker queue and calls the handle
3546 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3547 **/
3548static void
3549lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3550 struct lpfc_sli_ring *pring, uint32_t mask)
3551{
3552 struct lpfc_iocbq *irspiocbq;
James Smart4d9ab992009-10-02 15:16:39 -04003553 struct hbq_dmabuf *dmabuf;
3554 struct lpfc_cq_event *cq_event;
James Smart4f774512009-05-22 14:52:35 -04003555 unsigned long iflag;
3556
James Smart45ed1192009-10-02 15:17:02 -04003557 spin_lock_irqsave(&phba->hbalock, iflag);
3558 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3559 spin_unlock_irqrestore(&phba->hbalock, iflag);
3560 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
James Smart4f774512009-05-22 14:52:35 -04003561 /* Get the response iocb from the head of work queue */
3562 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart45ed1192009-10-02 15:17:02 -04003563 list_remove_head(&phba->sli4_hba.sp_queue_event,
James Smart4d9ab992009-10-02 15:16:39 -04003564 cq_event, struct lpfc_cq_event, list);
James Smart4f774512009-05-22 14:52:35 -04003565 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart4d9ab992009-10-02 15:16:39 -04003566
3567 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3568 case CQE_CODE_COMPL_WQE:
3569 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3570 cq_event);
James Smart45ed1192009-10-02 15:17:02 -04003571 /* Translate ELS WCQE to response IOCBQ */
3572 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3573 irspiocbq);
3574 if (irspiocbq)
3575 lpfc_sli_sp_handle_rspiocb(phba, pring,
3576 irspiocbq);
James Smart4d9ab992009-10-02 15:16:39 -04003577 break;
3578 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -04003579 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -04003580 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3581 cq_event);
3582 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3583 break;
3584 default:
3585 break;
3586 }
James Smart4f774512009-05-22 14:52:35 -04003587 }
3588}
3589
3590/**
James Smart3621a712009-04-06 18:47:14 -04003591 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
James Smarte59058c2008-08-24 21:49:00 -04003592 * @phba: Pointer to HBA context object.
3593 * @pring: Pointer to driver SLI ring object.
3594 *
3595 * This function aborts all iocbs in the given ring and frees all the iocb
3596 * objects in txq. This function issues an abort iocb for all the iocb commands
3597 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3598 * the return of this function. The caller is not required to hold any locks.
3599 **/
James Smart2e0fef82007-06-17 19:56:36 -05003600void
dea31012005-04-17 16:05:31 -05003601lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3602{
James Smart2534ba72007-04-25 09:52:20 -04003603 LIST_HEAD(completions);
dea31012005-04-17 16:05:31 -05003604 struct lpfc_iocbq *iocb, *next_iocb;
dea31012005-04-17 16:05:31 -05003605
James Smart92d7f7b2007-06-17 19:56:38 -05003606 if (pring->ringno == LPFC_ELS_RING) {
3607 lpfc_fabric_abort_hba(phba);
3608 }
3609
dea31012005-04-17 16:05:31 -05003610 /* Error everything on txq and txcmplq
3611 * First do the txq.
3612 */
James Smartdb55fba2014-04-04 13:52:02 -04003613 if (phba->sli_rev >= LPFC_SLI_REV4) {
3614 spin_lock_irq(&pring->ring_lock);
3615 list_splice_init(&pring->txq, &completions);
3616 pring->txq_cnt = 0;
3617 spin_unlock_irq(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05003618
James Smartdb55fba2014-04-04 13:52:02 -04003619 spin_lock_irq(&phba->hbalock);
3620 /* Next issue ABTS for everything on the txcmplq */
3621 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3622 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3623 spin_unlock_irq(&phba->hbalock);
3624 } else {
3625 spin_lock_irq(&phba->hbalock);
3626 list_splice_init(&pring->txq, &completions);
3627 pring->txq_cnt = 0;
James Smart2534ba72007-04-25 09:52:20 -04003628
James Smartdb55fba2014-04-04 13:52:02 -04003629 /* Next issue ABTS for everything on the txcmplq */
3630 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3631 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3632 spin_unlock_irq(&phba->hbalock);
3633 }
James Smart2534ba72007-04-25 09:52:20 -04003634
James Smarta257bf92009-04-06 18:48:10 -04003635 /* Cancel all the IOCBs from the completions list */
3636 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3637 IOERR_SLI_ABORTED);
dea31012005-04-17 16:05:31 -05003638}
3639
James Smarte59058c2008-08-24 21:49:00 -04003640/**
James Smart895427b2017-02-12 13:52:30 -08003641 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3642 * @phba: Pointer to HBA context object.
3643 * @pring: Pointer to driver SLI ring object.
3644 *
3645 * This function aborts all iocbs in the given ring and frees all the iocb
3646 * objects in txq. This function issues an abort iocb for all the iocb commands
3647 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3648 * the return of this function. The caller is not required to hold any locks.
3649 **/
3650void
3651lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3652{
3653 LIST_HEAD(completions);
3654 struct lpfc_iocbq *iocb, *next_iocb;
3655
3656 if (pring->ringno == LPFC_ELS_RING)
3657 lpfc_fabric_abort_hba(phba);
3658
3659 spin_lock_irq(&phba->hbalock);
3660 /* Next issue ABTS for everything on the txcmplq */
3661 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3662 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3663 spin_unlock_irq(&phba->hbalock);
3664}
3665
3666
3667/**
James Smartdb55fba2014-04-04 13:52:02 -04003668 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3669 * @phba: Pointer to HBA context object.
3670 * @pring: Pointer to driver SLI ring object.
3671 *
3672 * This function aborts all iocbs in FCP rings and frees all the iocb
3673 * objects in txq. This function issues an abort iocb for all the iocb commands
3674 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3675 * the return of this function. The caller is not required to hold any locks.
3676 **/
3677void
3678lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3679{
3680 struct lpfc_sli *psli = &phba->sli;
3681 struct lpfc_sli_ring *pring;
3682 uint32_t i;
3683
3684 /* Look on all the FCP Rings for the iotag */
3685 if (phba->sli_rev >= LPFC_SLI_REV4) {
3686 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
James Smart895427b2017-02-12 13:52:30 -08003687 pring = phba->sli4_hba.fcp_wq[i]->pring;
James Smartdb55fba2014-04-04 13:52:02 -04003688 lpfc_sli_abort_iocb_ring(phba, pring);
3689 }
3690 } else {
James Smart895427b2017-02-12 13:52:30 -08003691 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04003692 lpfc_sli_abort_iocb_ring(phba, pring);
3693 }
3694}
3695
James Smart895427b2017-02-12 13:52:30 -08003696/**
3697 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3698 * @phba: Pointer to HBA context object.
3699 *
3700 * This function aborts all wqes in NVME rings. This function issues an
3701 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3702 * the txcmplq is not guaranteed to complete before the return of this
3703 * function. The caller is not required to hold any locks.
3704 **/
3705void
3706lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3707{
3708 struct lpfc_sli_ring *pring;
3709 uint32_t i;
3710
3711 if (phba->sli_rev < LPFC_SLI_REV4)
3712 return;
3713
3714 /* Abort all IO on each NVME ring. */
3715 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3716 pring = phba->sli4_hba.nvme_wq[i]->pring;
3717 lpfc_sli_abort_wqe_ring(phba, pring);
3718 }
3719}
3720
James Smartdb55fba2014-04-04 13:52:02 -04003721
3722/**
James Smart3621a712009-04-06 18:47:14 -04003723 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
James Smarta8e497d2008-08-24 21:50:11 -04003724 * @phba: Pointer to HBA context object.
3725 *
3726 * This function flushes all iocbs in the fcp ring and frees all the iocb
3727 * objects in txq and txcmplq. This function will not issue abort iocbs
3728 * for all the iocb commands in txcmplq, they will just be returned with
3729 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3730 * slot has been permanently disabled.
3731 **/
3732void
3733lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3734{
3735 LIST_HEAD(txq);
3736 LIST_HEAD(txcmplq);
James Smarta8e497d2008-08-24 21:50:11 -04003737 struct lpfc_sli *psli = &phba->sli;
3738 struct lpfc_sli_ring *pring;
James Smartdb55fba2014-04-04 13:52:02 -04003739 uint32_t i;
James Smarta8e497d2008-08-24 21:50:11 -04003740
3741 spin_lock_irq(&phba->hbalock);
James Smart4f2e66c2012-05-09 21:17:07 -04003742 /* Indicate the I/O queues are flushed */
3743 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
James Smarta8e497d2008-08-24 21:50:11 -04003744 spin_unlock_irq(&phba->hbalock);
3745
James Smartdb55fba2014-04-04 13:52:02 -04003746 /* Look on all the FCP Rings for the iotag */
3747 if (phba->sli_rev >= LPFC_SLI_REV4) {
3748 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
James Smart895427b2017-02-12 13:52:30 -08003749 pring = phba->sli4_hba.fcp_wq[i]->pring;
James Smarta8e497d2008-08-24 21:50:11 -04003750
James Smartdb55fba2014-04-04 13:52:02 -04003751 spin_lock_irq(&pring->ring_lock);
3752 /* Retrieve everything on txq */
3753 list_splice_init(&pring->txq, &txq);
3754 /* Retrieve everything on the txcmplq */
3755 list_splice_init(&pring->txcmplq, &txcmplq);
3756 pring->txq_cnt = 0;
3757 pring->txcmplq_cnt = 0;
3758 spin_unlock_irq(&pring->ring_lock);
3759
3760 /* Flush the txq */
3761 lpfc_sli_cancel_iocbs(phba, &txq,
3762 IOSTAT_LOCAL_REJECT,
3763 IOERR_SLI_DOWN);
3764 /* Flush the txcmpq */
3765 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3766 IOSTAT_LOCAL_REJECT,
3767 IOERR_SLI_DOWN);
3768 }
3769 } else {
James Smart895427b2017-02-12 13:52:30 -08003770 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smartdb55fba2014-04-04 13:52:02 -04003771
3772 spin_lock_irq(&phba->hbalock);
3773 /* Retrieve everything on txq */
3774 list_splice_init(&pring->txq, &txq);
3775 /* Retrieve everything on the txcmplq */
3776 list_splice_init(&pring->txcmplq, &txcmplq);
3777 pring->txq_cnt = 0;
3778 pring->txcmplq_cnt = 0;
3779 spin_unlock_irq(&phba->hbalock);
3780
3781 /* Flush the txq */
3782 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3783 IOERR_SLI_DOWN);
3784 /* Flush the txcmpq */
3785 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3786 IOERR_SLI_DOWN);
3787 }
James Smarta8e497d2008-08-24 21:50:11 -04003788}
3789
3790/**
James Smart895427b2017-02-12 13:52:30 -08003791 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3792 * @phba: Pointer to HBA context object.
3793 *
3794 * This function flushes all wqes in the nvme rings and frees all resources
3795 * in the txcmplq. This function does not issue abort wqes for the IO
3796 * commands in txcmplq, they will just be returned with
3797 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3798 * slot has been permanently disabled.
3799 **/
3800void
3801lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3802{
3803 LIST_HEAD(txcmplq);
3804 struct lpfc_sli_ring *pring;
3805 uint32_t i;
3806
3807 if (phba->sli_rev < LPFC_SLI_REV4)
3808 return;
3809
3810 /* Hint to other driver operations that a flush is in progress. */
3811 spin_lock_irq(&phba->hbalock);
3812 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3813 spin_unlock_irq(&phba->hbalock);
3814
3815 /* Cycle through all NVME rings and complete each IO with
3816 * a local driver reason code. This is a flush so no
3817 * abort exchange to FW.
3818 */
3819 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3820 pring = phba->sli4_hba.nvme_wq[i]->pring;
3821
3822 /* Retrieve everything on the txcmplq */
3823 spin_lock_irq(&pring->ring_lock);
3824 list_splice_init(&pring->txcmplq, &txcmplq);
3825 pring->txcmplq_cnt = 0;
3826 spin_unlock_irq(&pring->ring_lock);
3827
3828 /* Flush the txcmpq &&&PAE */
3829 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3830 IOSTAT_LOCAL_REJECT,
3831 IOERR_SLI_DOWN);
3832 }
3833}
3834
3835/**
James Smart3772a992009-05-22 14:50:54 -04003836 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
James Smarte59058c2008-08-24 21:49:00 -04003837 * @phba: Pointer to HBA context object.
3838 * @mask: Bit mask to be checked.
3839 *
3840 * This function reads the host status register and compares
3841 * with the provided bit mask to check if HBA completed
3842 * the restart. This function will wait in a loop for the
3843 * HBA to complete restart. If the HBA does not restart within
3844 * 15 iterations, the function will reset the HBA again. The
3845 * function returns 1 when HBA fail to restart otherwise returns
3846 * zero.
3847 **/
James Smart3772a992009-05-22 14:50:54 -04003848static int
3849lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea31012005-04-17 16:05:31 -05003850{
Jamie Wellnitz41415862006-02-28 19:25:27 -05003851 uint32_t status;
3852 int i = 0;
3853 int retval = 0;
dea31012005-04-17 16:05:31 -05003854
Jamie Wellnitz41415862006-02-28 19:25:27 -05003855 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003856 if (lpfc_readl(phba->HSregaddr, &status))
3857 return 1;
dea31012005-04-17 16:05:31 -05003858
Jamie Wellnitz41415862006-02-28 19:25:27 -05003859 /*
3860 * Check status register every 100ms for 5 retries, then every
3861 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3862 * every 2.5 sec for 4.
3863 * Break our of the loop if errors occurred during init.
3864 */
3865 while (((status & mask) != mask) &&
3866 !(status & HS_FFERM) &&
3867 i++ < 20) {
dea31012005-04-17 16:05:31 -05003868
Jamie Wellnitz41415862006-02-28 19:25:27 -05003869 if (i <= 5)
3870 msleep(10);
3871 else if (i <= 10)
3872 msleep(500);
3873 else
3874 msleep(2500);
dea31012005-04-17 16:05:31 -05003875
Jamie Wellnitz41415862006-02-28 19:25:27 -05003876 if (i == 15) {
James Smart2e0fef82007-06-17 19:56:36 -05003877 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05003878 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003879 lpfc_sli_brdrestart(phba);
3880 }
3881 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003882 if (lpfc_readl(phba->HSregaddr, &status)) {
3883 retval = 1;
3884 break;
3885 }
dea31012005-04-17 16:05:31 -05003886 }
dea31012005-04-17 16:05:31 -05003887
Jamie Wellnitz41415862006-02-28 19:25:27 -05003888 /* Check to see if any errors occurred during init */
3889 if ((status & HS_FFERM) || (i >= 20)) {
James Smarte40a02c2010-02-26 14:13:54 -05003890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3891 "2751 Adapter failed to restart, "
3892 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3893 status,
3894 readl(phba->MBslimaddr + 0xa8),
3895 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05003896 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003897 retval = 1;
3898 }
dea31012005-04-17 16:05:31 -05003899
Jamie Wellnitz41415862006-02-28 19:25:27 -05003900 return retval;
dea31012005-04-17 16:05:31 -05003901}
3902
James Smartda0436e2009-05-22 14:51:39 -04003903/**
3904 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3905 * @phba: Pointer to HBA context object.
3906 * @mask: Bit mask to be checked.
3907 *
3908 * This function checks the host status register to check if HBA is
3909 * ready. This function will wait in a loop for the HBA to be ready
3910 * If the HBA is not ready , the function will will reset the HBA PCI
3911 * function again. The function returns 1 when HBA fail to be ready
3912 * otherwise returns zero.
3913 **/
3914static int
3915lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3916{
3917 uint32_t status;
3918 int retval = 0;
3919
3920 /* Read the HBA Host Status Register */
3921 status = lpfc_sli4_post_status_check(phba);
3922
3923 if (status) {
3924 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3925 lpfc_sli_brdrestart(phba);
3926 status = lpfc_sli4_post_status_check(phba);
3927 }
3928
3929 /* Check to see if any errors occurred during init */
3930 if (status) {
3931 phba->link_state = LPFC_HBA_ERROR;
3932 retval = 1;
3933 } else
3934 phba->sli4_hba.intr_enable = 0;
3935
3936 return retval;
3937}
3938
3939/**
3940 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3941 * @phba: Pointer to HBA context object.
3942 * @mask: Bit mask to be checked.
3943 *
3944 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3945 * from the API jump table function pointer from the lpfc_hba struct.
3946 **/
3947int
3948lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3949{
3950 return phba->lpfc_sli_brdready(phba, mask);
3951}
3952
James Smart92908312006-03-07 15:04:13 -05003953#define BARRIER_TEST_PATTERN (0xdeadbeef)
3954
James Smarte59058c2008-08-24 21:49:00 -04003955/**
James Smart3621a712009-04-06 18:47:14 -04003956 * lpfc_reset_barrier - Make HBA ready for HBA reset
James Smarte59058c2008-08-24 21:49:00 -04003957 * @phba: Pointer to HBA context object.
3958 *
James Smart1b511972011-12-13 13:23:09 -05003959 * This function is called before resetting an HBA. This function is called
3960 * with hbalock held and requests HBA to quiesce DMAs before a reset.
James Smarte59058c2008-08-24 21:49:00 -04003961 **/
James Smart2e0fef82007-06-17 19:56:36 -05003962void lpfc_reset_barrier(struct lpfc_hba *phba)
James Smart92908312006-03-07 15:04:13 -05003963{
James Smart65a29c12006-07-06 15:50:50 -04003964 uint32_t __iomem *resp_buf;
3965 uint32_t __iomem *mbox_buf;
James Smart92908312006-03-07 15:04:13 -05003966 volatile uint32_t mbox;
James Smart9940b972011-03-11 16:06:12 -05003967 uint32_t hc_copy, ha_copy, resp_data;
James Smart92908312006-03-07 15:04:13 -05003968 int i;
3969 uint8_t hdrtype;
3970
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01003971 lockdep_assert_held(&phba->hbalock);
3972
James Smart92908312006-03-07 15:04:13 -05003973 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3974 if (hdrtype != 0x80 ||
3975 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3976 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3977 return;
3978
3979 /*
3980 * Tell the other part of the chip to suspend temporarily all
3981 * its DMA activity.
3982 */
James Smart65a29c12006-07-06 15:50:50 -04003983 resp_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05003984
3985 /* Disable the error attention */
James Smart9940b972011-03-11 16:06:12 -05003986 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3987 return;
James Smart92908312006-03-07 15:04:13 -05003988 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3989 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05003990 phba->link_flag |= LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05003991
James Smart9940b972011-03-11 16:06:12 -05003992 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3993 return;
3994 if (ha_copy & HA_ERATT) {
James Smart92908312006-03-07 15:04:13 -05003995 /* Clear Chip error bit */
3996 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05003997 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05003998 }
3999
4000 mbox = 0;
4001 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4002 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4003
4004 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
James Smart65a29c12006-07-06 15:50:50 -04004005 mbox_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05004006 writel(mbox, mbox_buf);
4007
James Smart9940b972011-03-11 16:06:12 -05004008 for (i = 0; i < 50; i++) {
4009 if (lpfc_readl((resp_buf + 1), &resp_data))
4010 return;
4011 if (resp_data != ~(BARRIER_TEST_PATTERN))
4012 mdelay(1);
4013 else
4014 break;
4015 }
4016 resp_data = 0;
4017 if (lpfc_readl((resp_buf + 1), &resp_data))
4018 return;
4019 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
James Smartf4b4c682009-05-22 14:53:12 -04004020 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
James Smart2e0fef82007-06-17 19:56:36 -05004021 phba->pport->stopped)
James Smart92908312006-03-07 15:04:13 -05004022 goto restore_hc;
4023 else
4024 goto clear_errat;
4025 }
4026
4027 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
James Smart9940b972011-03-11 16:06:12 -05004028 resp_data = 0;
4029 for (i = 0; i < 500; i++) {
4030 if (lpfc_readl(resp_buf, &resp_data))
4031 return;
4032 if (resp_data != mbox)
4033 mdelay(1);
4034 else
4035 break;
4036 }
James Smart92908312006-03-07 15:04:13 -05004037
4038clear_errat:
4039
James Smart9940b972011-03-11 16:06:12 -05004040 while (++i < 500) {
4041 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4042 return;
4043 if (!(ha_copy & HA_ERATT))
4044 mdelay(1);
4045 else
4046 break;
4047 }
James Smart92908312006-03-07 15:04:13 -05004048
4049 if (readl(phba->HAregaddr) & HA_ERATT) {
4050 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004051 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004052 }
4053
4054restore_hc:
James Smart2e0fef82007-06-17 19:56:36 -05004055 phba->link_flag &= ~LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05004056 writel(hc_copy, phba->HCregaddr);
4057 readl(phba->HCregaddr); /* flush */
4058}
4059
James Smarte59058c2008-08-24 21:49:00 -04004060/**
James Smart3621a712009-04-06 18:47:14 -04004061 * lpfc_sli_brdkill - Issue a kill_board mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004062 * @phba: Pointer to HBA context object.
4063 *
4064 * This function issues a kill_board mailbox command and waits for
4065 * the error attention interrupt. This function is called for stopping
4066 * the firmware processing. The caller is not required to hold any
4067 * locks. This function calls lpfc_hba_down_post function to free
4068 * any pending commands after the kill. The function will return 1 when it
4069 * fails to kill the board else will return 0.
4070 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004071int
James Smart2e0fef82007-06-17 19:56:36 -05004072lpfc_sli_brdkill(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05004073{
Jamie Wellnitz41415862006-02-28 19:25:27 -05004074 struct lpfc_sli *psli;
4075 LPFC_MBOXQ_t *pmb;
4076 uint32_t status;
4077 uint32_t ha_copy;
4078 int retval;
4079 int i = 0;
4080
4081 psli = &phba->sli;
4082
4083 /* Kill HBA */
James Smarted957682007-06-17 19:56:37 -05004084 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004085 "0329 Kill HBA Data: x%x x%x\n",
4086 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004087
James Smart98c9ea52007-10-27 13:37:33 -04004088 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4089 if (!pmb)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004090 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004091
4092 /* Disable the error attention */
James Smart2e0fef82007-06-17 19:56:36 -05004093 spin_lock_irq(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -05004094 if (lpfc_readl(phba->HCregaddr, &status)) {
4095 spin_unlock_irq(&phba->hbalock);
4096 mempool_free(pmb, phba->mbox_mem_pool);
4097 return 1;
4098 }
Jamie Wellnitz41415862006-02-28 19:25:27 -05004099 status &= ~HC_ERINT_ENA;
4100 writel(status, phba->HCregaddr);
4101 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05004102 phba->link_flag |= LS_IGNORE_ERATT;
4103 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004104
4105 lpfc_kill_board(phba, pmb);
4106 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4107 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4108
4109 if (retval != MBX_SUCCESS) {
4110 if (retval != MBX_BUSY)
4111 mempool_free(pmb, phba->mbox_mem_pool);
James Smarte40a02c2010-02-26 14:13:54 -05004112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4113 "2752 KILL_BOARD command failed retval %d\n",
4114 retval);
James Smart2e0fef82007-06-17 19:56:36 -05004115 spin_lock_irq(&phba->hbalock);
4116 phba->link_flag &= ~LS_IGNORE_ERATT;
4117 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004118 return 1;
4119 }
4120
James Smartf4b4c682009-05-22 14:53:12 -04004121 spin_lock_irq(&phba->hbalock);
4122 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4123 spin_unlock_irq(&phba->hbalock);
James Smart92908312006-03-07 15:04:13 -05004124
Jamie Wellnitz41415862006-02-28 19:25:27 -05004125 mempool_free(pmb, phba->mbox_mem_pool);
4126
4127 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4128 * attention every 100ms for 3 seconds. If we don't get ERATT after
4129 * 3 seconds we still set HBA_ERROR state because the status of the
4130 * board is now undefined.
4131 */
James Smart9940b972011-03-11 16:06:12 -05004132 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4133 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004134 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4135 mdelay(100);
James Smart9940b972011-03-11 16:06:12 -05004136 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4137 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004138 }
4139
4140 del_timer_sync(&psli->mbox_tmo);
James Smart92908312006-03-07 15:04:13 -05004141 if (ha_copy & HA_ERATT) {
4142 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05004143 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05004144 }
James Smart2e0fef82007-06-17 19:56:36 -05004145 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004146 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart04c68492009-05-22 14:52:52 -04004147 psli->mbox_active = NULL;
James Smart2e0fef82007-06-17 19:56:36 -05004148 phba->link_flag &= ~LS_IGNORE_ERATT;
4149 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004150
Jamie Wellnitz41415862006-02-28 19:25:27 -05004151 lpfc_hba_down_post(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004152 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004153
James Smart2e0fef82007-06-17 19:56:36 -05004154 return ha_copy & HA_ERATT ? 0 : 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004155}
4156
James Smarte59058c2008-08-24 21:49:00 -04004157/**
James Smart3772a992009-05-22 14:50:54 -04004158 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
James Smarte59058c2008-08-24 21:49:00 -04004159 * @phba: Pointer to HBA context object.
4160 *
4161 * This function resets the HBA by writing HC_INITFF to the control
4162 * register. After the HBA resets, this function resets all the iocb ring
4163 * indices. This function disables PCI layer parity checking during
4164 * the reset.
4165 * This function returns 0 always.
4166 * The caller is not required to hold any locks.
4167 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05004168int
James Smart2e0fef82007-06-17 19:56:36 -05004169lpfc_sli_brdreset(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004170{
4171 struct lpfc_sli *psli;
dea31012005-04-17 16:05:31 -05004172 struct lpfc_sli_ring *pring;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004173 uint16_t cfg_value;
dea31012005-04-17 16:05:31 -05004174 int i;
dea31012005-04-17 16:05:31 -05004175
Jamie Wellnitz41415862006-02-28 19:25:27 -05004176 psli = &phba->sli;
dea31012005-04-17 16:05:31 -05004177
Jamie Wellnitz41415862006-02-28 19:25:27 -05004178 /* Reset HBA */
4179 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004180 "0325 Reset HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05004181 phba->pport->port_state, psli->sli_flag);
dea31012005-04-17 16:05:31 -05004182
4183 /* perform board reset */
4184 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004185 phba->link_events = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004186 phba->pport->fc_myDID = 0;
4187 phba->pport->fc_prevDID = 0;
dea31012005-04-17 16:05:31 -05004188
Jamie Wellnitz41415862006-02-28 19:25:27 -05004189 /* Turn off parity checking and serr during the physical reset */
4190 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4191 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4192 (cfg_value &
4193 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4194
James Smart3772a992009-05-22 14:50:54 -04004195 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4196
Jamie Wellnitz41415862006-02-28 19:25:27 -05004197 /* Now toggle INITFF bit in the Host Control Register */
4198 writel(HC_INITFF, phba->HCregaddr);
4199 mdelay(1);
4200 readl(phba->HCregaddr); /* flush */
4201 writel(0, phba->HCregaddr);
4202 readl(phba->HCregaddr); /* flush */
4203
4204 /* Restore PCI cmd register */
4205 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea31012005-04-17 16:05:31 -05004206
4207 /* Initialize relevant SLI info */
Jamie Wellnitz41415862006-02-28 19:25:27 -05004208 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08004209 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05004210 pring->flag = 0;
James Smart7e56aa22012-08-03 12:35:34 -04004211 pring->sli.sli3.rspidx = 0;
4212 pring->sli.sli3.next_cmdidx = 0;
4213 pring->sli.sli3.local_getidx = 0;
4214 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05004215 pring->missbufcnt = 0;
4216 }
dea31012005-04-17 16:05:31 -05004217
James Smart2e0fef82007-06-17 19:56:36 -05004218 phba->link_state = LPFC_WARM_START;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004219 return 0;
4220}
4221
James Smarte59058c2008-08-24 21:49:00 -04004222/**
James Smartda0436e2009-05-22 14:51:39 -04004223 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4224 * @phba: Pointer to HBA context object.
4225 *
4226 * This function resets a SLI4 HBA. This function disables PCI layer parity
4227 * checking during resets the device. The caller is not required to hold
4228 * any locks.
4229 *
4230 * This function returns 0 always.
4231 **/
4232int
4233lpfc_sli4_brdreset(struct lpfc_hba *phba)
4234{
4235 struct lpfc_sli *psli = &phba->sli;
4236 uint16_t cfg_value;
James Smart02936352014-04-04 13:52:12 -04004237 int rc = 0;
James Smartda0436e2009-05-22 14:51:39 -04004238
4239 /* Reset HBA */
4240 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart02936352014-04-04 13:52:12 -04004241 "0295 Reset HBA Data: x%x x%x x%x\n",
4242 phba->pport->port_state, psli->sli_flag,
4243 phba->hba_flag);
James Smartda0436e2009-05-22 14:51:39 -04004244
4245 /* perform board reset */
4246 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04004247 phba->link_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04004248 phba->pport->fc_myDID = 0;
4249 phba->pport->fc_prevDID = 0;
4250
James Smartda0436e2009-05-22 14:51:39 -04004251 spin_lock_irq(&phba->hbalock);
4252 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4253 phba->fcf.fcf_flag = 0;
James Smartda0436e2009-05-22 14:51:39 -04004254 spin_unlock_irq(&phba->hbalock);
4255
James Smart02936352014-04-04 13:52:12 -04004256 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4257 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4258 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4259 return rc;
4260 }
4261
James Smartda0436e2009-05-22 14:51:39 -04004262 /* Now physically reset the device */
4263 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4264 "0389 Performing PCI function reset!\n");
James Smartbe858b62010-12-15 17:57:20 -05004265
4266 /* Turn off parity checking and serr during the physical reset */
4267 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4268 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4269 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4270
James Smart88318812012-09-29 11:29:29 -04004271 /* Perform FCoE PCI function reset before freeing queue memory */
James Smart27b01b82012-05-09 21:19:44 -04004272 rc = lpfc_pci_function_reset(phba);
James Smart88318812012-09-29 11:29:29 -04004273 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04004274
James Smartbe858b62010-12-15 17:57:20 -05004275 /* Restore PCI cmd register */
4276 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4277
James Smart27b01b82012-05-09 21:19:44 -04004278 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004279}
4280
4281/**
4282 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
James Smarte59058c2008-08-24 21:49:00 -04004283 * @phba: Pointer to HBA context object.
4284 *
4285 * This function is called in the SLI initialization code path to
4286 * restart the HBA. The caller is not required to hold any lock.
4287 * This function writes MBX_RESTART mailbox command to the SLIM and
4288 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4289 * function to free any pending commands. The function enables
4290 * POST only during the first initialization. The function returns zero.
4291 * The function does not guarantee completion of MBX_RESTART mailbox
4292 * command before the return of this function.
4293 **/
James Smartda0436e2009-05-22 14:51:39 -04004294static int
4295lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004296{
4297 MAILBOX_t *mb;
4298 struct lpfc_sli *psli;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004299 volatile uint32_t word0;
4300 void __iomem *to_slim;
James Smart0d878412009-10-02 15:16:56 -04004301 uint32_t hba_aer_enabled;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004302
James Smart2e0fef82007-06-17 19:56:36 -05004303 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004304
James Smart0d878412009-10-02 15:16:56 -04004305 /* Take PCIe device Advanced Error Reporting (AER) state */
4306 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4307
Jamie Wellnitz41415862006-02-28 19:25:27 -05004308 psli = &phba->sli;
4309
4310 /* Restart HBA */
4311 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04004312 "0337 Restart HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05004313 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004314
4315 word0 = 0;
4316 mb = (MAILBOX_t *) &word0;
4317 mb->mbxCommand = MBX_RESTART;
4318 mb->mbxHc = 1;
4319
James Smart92908312006-03-07 15:04:13 -05004320 lpfc_reset_barrier(phba);
4321
Jamie Wellnitz41415862006-02-28 19:25:27 -05004322 to_slim = phba->MBslimaddr;
4323 writel(*(uint32_t *) mb, to_slim);
4324 readl(to_slim); /* flush */
4325
4326 /* Only skip post after fc_ffinit is completed */
James Smarteaf15d52008-12-04 22:39:29 -05004327 if (phba->pport->port_state)
Jamie Wellnitz41415862006-02-28 19:25:27 -05004328 word0 = 1; /* This is really setting up word1 */
James Smarteaf15d52008-12-04 22:39:29 -05004329 else
Jamie Wellnitz41415862006-02-28 19:25:27 -05004330 word0 = 0; /* This is really setting up word1 */
James Smart65a29c12006-07-06 15:50:50 -04004331 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004332 writel(*(uint32_t *) mb, to_slim);
4333 readl(to_slim); /* flush */
4334
4335 lpfc_sli_brdreset(phba);
James Smart2e0fef82007-06-17 19:56:36 -05004336 phba->pport->stopped = 0;
4337 phba->link_state = LPFC_INIT_START;
James Smartda0436e2009-05-22 14:51:39 -04004338 phba->hba_flag = 0;
James Smart2e0fef82007-06-17 19:56:36 -05004339 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05004340
James Smart64ba8812006-08-02 15:24:34 -04004341 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4342 psli->stats_start = get_seconds();
4343
James Smarteaf15d52008-12-04 22:39:29 -05004344 /* Give the INITFF and Post time to settle. */
4345 mdelay(100);
dea31012005-04-17 16:05:31 -05004346
James Smart0d878412009-10-02 15:16:56 -04004347 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4348 if (hba_aer_enabled)
4349 pci_disable_pcie_error_reporting(phba->pcidev);
4350
Jamie Wellnitz41415862006-02-28 19:25:27 -05004351 lpfc_hba_down_post(phba);
dea31012005-04-17 16:05:31 -05004352
4353 return 0;
4354}
4355
James Smarte59058c2008-08-24 21:49:00 -04004356/**
James Smartda0436e2009-05-22 14:51:39 -04004357 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4358 * @phba: Pointer to HBA context object.
4359 *
4360 * This function is called in the SLI initialization code path to restart
4361 * a SLI4 HBA. The caller is not required to hold any lock.
4362 * At the end of the function, it calls lpfc_hba_down_post function to
4363 * free any pending commands.
4364 **/
4365static int
4366lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4367{
4368 struct lpfc_sli *psli = &phba->sli;
James Smart75baf692010-06-08 18:31:21 -04004369 uint32_t hba_aer_enabled;
James Smart27b01b82012-05-09 21:19:44 -04004370 int rc;
James Smartda0436e2009-05-22 14:51:39 -04004371
4372 /* Restart HBA */
4373 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4374 "0296 Restart HBA Data: x%x x%x\n",
4375 phba->pport->port_state, psli->sli_flag);
4376
James Smart75baf692010-06-08 18:31:21 -04004377 /* Take PCIe device Advanced Error Reporting (AER) state */
4378 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4379
James Smart27b01b82012-05-09 21:19:44 -04004380 rc = lpfc_sli4_brdreset(phba);
James Smartda0436e2009-05-22 14:51:39 -04004381
4382 spin_lock_irq(&phba->hbalock);
4383 phba->pport->stopped = 0;
4384 phba->link_state = LPFC_INIT_START;
4385 phba->hba_flag = 0;
4386 spin_unlock_irq(&phba->hbalock);
4387
4388 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4389 psli->stats_start = get_seconds();
4390
James Smart75baf692010-06-08 18:31:21 -04004391 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4392 if (hba_aer_enabled)
4393 pci_disable_pcie_error_reporting(phba->pcidev);
4394
James Smartda0436e2009-05-22 14:51:39 -04004395 lpfc_hba_down_post(phba);
4396
James Smart27b01b82012-05-09 21:19:44 -04004397 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004398}
4399
4400/**
4401 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4402 * @phba: Pointer to HBA context object.
4403 *
4404 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4405 * API jump table function pointer from the lpfc_hba struct.
4406**/
4407int
4408lpfc_sli_brdrestart(struct lpfc_hba *phba)
4409{
4410 return phba->lpfc_sli_brdrestart(phba);
4411}
4412
4413/**
James Smart3621a712009-04-06 18:47:14 -04004414 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
James Smarte59058c2008-08-24 21:49:00 -04004415 * @phba: Pointer to HBA context object.
4416 *
4417 * This function is called after a HBA restart to wait for successful
4418 * restart of the HBA. Successful restart of the HBA is indicated by
4419 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4420 * iteration, the function will restart the HBA again. The function returns
4421 * zero if HBA successfully restarted else returns negative error code.
4422 **/
dea31012005-04-17 16:05:31 -05004423static int
4424lpfc_sli_chipset_init(struct lpfc_hba *phba)
4425{
4426 uint32_t status, i = 0;
4427
4428 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004429 if (lpfc_readl(phba->HSregaddr, &status))
4430 return -EIO;
dea31012005-04-17 16:05:31 -05004431
4432 /* Check status register to see what current state is */
4433 i = 0;
4434 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4435
James Smartdcf2a4e2010-09-29 11:18:53 -04004436 /* Check every 10ms for 10 retries, then every 100ms for 90
4437 * retries, then every 1 sec for 50 retires for a total of
4438 * ~60 seconds before reset the board again and check every
4439 * 1 sec for 50 retries. The up to 60 seconds before the
4440 * board ready is required by the Falcon FIPS zeroization
4441 * complete, and any reset the board in between shall cause
4442 * restart of zeroization, further delay the board ready.
dea31012005-04-17 16:05:31 -05004443 */
James Smartdcf2a4e2010-09-29 11:18:53 -04004444 if (i++ >= 200) {
dea31012005-04-17 16:05:31 -05004445 /* Adapter failed to init, timeout, status reg
4446 <status> */
James Smarted957682007-06-17 19:56:37 -05004447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004448 "0436 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004449 "timeout, status reg x%x, "
4450 "FW Data: A8 x%x AC x%x\n", status,
4451 readl(phba->MBslimaddr + 0xa8),
4452 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004453 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004454 return -ETIMEDOUT;
4455 }
4456
4457 /* Check to see if any errors occurred during init */
4458 if (status & HS_FFERM) {
4459 /* ERROR: During chipset initialization */
4460 /* Adapter failed to init, chipset, status reg
4461 <status> */
James Smarted957682007-06-17 19:56:37 -05004462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004463 "0437 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004464 "chipset, status reg x%x, "
4465 "FW Data: A8 x%x AC x%x\n", status,
4466 readl(phba->MBslimaddr + 0xa8),
4467 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004468 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004469 return -EIO;
4470 }
4471
James Smartdcf2a4e2010-09-29 11:18:53 -04004472 if (i <= 10)
dea31012005-04-17 16:05:31 -05004473 msleep(10);
James Smartdcf2a4e2010-09-29 11:18:53 -04004474 else if (i <= 100)
4475 msleep(100);
4476 else
4477 msleep(1000);
dea31012005-04-17 16:05:31 -05004478
James Smartdcf2a4e2010-09-29 11:18:53 -04004479 if (i == 150) {
4480 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004481 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004482 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004483 }
4484 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004485 if (lpfc_readl(phba->HSregaddr, &status))
4486 return -EIO;
dea31012005-04-17 16:05:31 -05004487 }
4488
4489 /* Check to see if any errors occurred during init */
4490 if (status & HS_FFERM) {
4491 /* ERROR: During chipset initialization */
4492 /* Adapter failed to init, chipset, status reg <status> */
James Smarted957682007-06-17 19:56:37 -05004493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004494 "0438 Adapter failed to init, chipset, "
James Smart09372822008-01-11 01:52:54 -05004495 "status reg x%x, "
4496 "FW Data: A8 x%x AC x%x\n", status,
4497 readl(phba->MBslimaddr + 0xa8),
4498 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004499 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004500 return -EIO;
4501 }
4502
4503 /* Clear all interrupt enable conditions */
4504 writel(0, phba->HCregaddr);
4505 readl(phba->HCregaddr); /* flush */
4506
4507 /* setup host attn register */
4508 writel(0xffffffff, phba->HAregaddr);
4509 readl(phba->HAregaddr); /* flush */
4510 return 0;
4511}
4512
James Smarte59058c2008-08-24 21:49:00 -04004513/**
James Smart3621a712009-04-06 18:47:14 -04004514 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
James Smarte59058c2008-08-24 21:49:00 -04004515 *
4516 * This function calculates and returns the number of HBQs required to be
4517 * configured.
4518 **/
James Smart78b2d852007-08-02 11:10:21 -04004519int
James Smarted957682007-06-17 19:56:37 -05004520lpfc_sli_hbq_count(void)
4521{
James Smart92d7f7b2007-06-17 19:56:38 -05004522 return ARRAY_SIZE(lpfc_hbq_defs);
James Smarted957682007-06-17 19:56:37 -05004523}
4524
James Smarte59058c2008-08-24 21:49:00 -04004525/**
James Smart3621a712009-04-06 18:47:14 -04004526 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004527 *
4528 * This function adds the number of hbq entries in every HBQ to get
4529 * the total number of hbq entries required for the HBA and returns
4530 * the total count.
4531 **/
James Smarted957682007-06-17 19:56:37 -05004532static int
4533lpfc_sli_hbq_entry_count(void)
4534{
4535 int hbq_count = lpfc_sli_hbq_count();
4536 int count = 0;
4537 int i;
4538
4539 for (i = 0; i < hbq_count; ++i)
James Smart92d7f7b2007-06-17 19:56:38 -05004540 count += lpfc_hbq_defs[i]->entry_count;
James Smarted957682007-06-17 19:56:37 -05004541 return count;
4542}
4543
James Smarte59058c2008-08-24 21:49:00 -04004544/**
James Smart3621a712009-04-06 18:47:14 -04004545 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004546 *
4547 * This function calculates amount of memory required for all hbq entries
4548 * to be configured and returns the total memory required.
4549 **/
dea31012005-04-17 16:05:31 -05004550int
James Smarted957682007-06-17 19:56:37 -05004551lpfc_sli_hbq_size(void)
4552{
4553 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4554}
4555
James Smarte59058c2008-08-24 21:49:00 -04004556/**
James Smart3621a712009-04-06 18:47:14 -04004557 * lpfc_sli_hbq_setup - configure and initialize HBQs
James Smarte59058c2008-08-24 21:49:00 -04004558 * @phba: Pointer to HBA context object.
4559 *
4560 * This function is called during the SLI initialization to configure
4561 * all the HBQs and post buffers to the HBQ. The caller is not
4562 * required to hold any locks. This function will return zero if successful
4563 * else it will return negative error code.
4564 **/
James Smarted957682007-06-17 19:56:37 -05004565static int
4566lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4567{
4568 int hbq_count = lpfc_sli_hbq_count();
4569 LPFC_MBOXQ_t *pmb;
4570 MAILBOX_t *pmbox;
4571 uint32_t hbqno;
4572 uint32_t hbq_entry_index;
James Smarted957682007-06-17 19:56:37 -05004573
James Smart92d7f7b2007-06-17 19:56:38 -05004574 /* Get a Mailbox buffer to setup mailbox
4575 * commands for HBA initialization
4576 */
James Smarted957682007-06-17 19:56:37 -05004577 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4578
4579 if (!pmb)
4580 return -ENOMEM;
4581
James Smart04c68492009-05-22 14:52:52 -04004582 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05004583
4584 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4585 phba->link_state = LPFC_INIT_MBX_CMDS;
James Smart3163f722008-02-08 18:50:25 -05004586 phba->hbq_in_use = 1;
James Smarted957682007-06-17 19:56:37 -05004587
4588 hbq_entry_index = 0;
4589 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4590 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4591 phba->hbqs[hbqno].hbqPutIdx = 0;
4592 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4593 phba->hbqs[hbqno].entry_count =
James Smart92d7f7b2007-06-17 19:56:38 -05004594 lpfc_hbq_defs[hbqno]->entry_count;
James Smart51ef4c22007-08-02 11:10:31 -04004595 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4596 hbq_entry_index, pmb);
James Smarted957682007-06-17 19:56:37 -05004597 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4598
4599 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4600 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4601 mbxStatus <status>, ring <num> */
4602
4603 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05004604 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004605 "1805 Adapter failed to init. "
James Smarted957682007-06-17 19:56:37 -05004606 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04004607 pmbox->mbxCommand,
James Smarted957682007-06-17 19:56:37 -05004608 pmbox->mbxStatus, hbqno);
4609
4610 phba->link_state = LPFC_HBA_ERROR;
4611 mempool_free(pmb, phba->mbox_mem_pool);
James Smart6e7288d2010-06-07 15:23:35 -04004612 return -ENXIO;
James Smarted957682007-06-17 19:56:37 -05004613 }
4614 }
4615 phba->hbq_count = hbq_count;
4616
James Smarted957682007-06-17 19:56:37 -05004617 mempool_free(pmb, phba->mbox_mem_pool);
4618
James Smart92d7f7b2007-06-17 19:56:38 -05004619 /* Initially populate or replenish the HBQs */
James Smartd7c255b2008-08-24 21:50:00 -04004620 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4621 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
James Smarted957682007-06-17 19:56:37 -05004622 return 0;
4623}
4624
James Smarte59058c2008-08-24 21:49:00 -04004625/**
James Smart4f774512009-05-22 14:52:35 -04004626 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4627 * @phba: Pointer to HBA context object.
4628 *
4629 * This function is called during the SLI initialization to configure
4630 * all the HBQs and post buffers to the HBQ. The caller is not
4631 * required to hold any locks. This function will return zero if successful
4632 * else it will return negative error code.
4633 **/
4634static int
4635lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4636{
4637 phba->hbq_in_use = 1;
James Smart895427b2017-02-12 13:52:30 -08004638 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4639 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
James Smart4f774512009-05-22 14:52:35 -04004640 phba->hbq_count = 1;
James Smart895427b2017-02-12 13:52:30 -08004641 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
James Smart4f774512009-05-22 14:52:35 -04004642 /* Initially populate or replenish the HBQs */
James Smart4f774512009-05-22 14:52:35 -04004643 return 0;
4644}
4645
4646/**
James Smart3621a712009-04-06 18:47:14 -04004647 * lpfc_sli_config_port - Issue config port mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004648 * @phba: Pointer to HBA context object.
4649 * @sli_mode: sli mode - 2/3
4650 *
4651 * This function is called by the sli intialization code path
4652 * to issue config_port mailbox command. This function restarts the
4653 * HBA firmware and issues a config_port mailbox command to configure
4654 * the SLI interface in the sli mode specified by sli_mode
4655 * variable. The caller is not required to hold any locks.
4656 * The function returns 0 if successful, else returns negative error
4657 * code.
4658 **/
James Smart93996272008-08-24 21:50:30 -04004659int
4660lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea31012005-04-17 16:05:31 -05004661{
4662 LPFC_MBOXQ_t *pmb;
4663 uint32_t resetcount = 0, rc = 0, done = 0;
4664
4665 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4666 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -05004667 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004668 return -ENOMEM;
4669 }
4670
James Smarted957682007-06-17 19:56:37 -05004671 phba->sli_rev = sli_mode;
dea31012005-04-17 16:05:31 -05004672 while (resetcount < 2 && !done) {
James Smart2e0fef82007-06-17 19:56:36 -05004673 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004674 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004675 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05004676 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004677 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004678 rc = lpfc_sli_chipset_init(phba);
4679 if (rc)
4680 break;
4681
James Smart2e0fef82007-06-17 19:56:36 -05004682 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004683 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004684 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004685 resetcount++;
4686
James Smarted957682007-06-17 19:56:37 -05004687 /* Call pre CONFIG_PORT mailbox command initialization. A
4688 * value of 0 means the call was successful. Any other
4689 * nonzero value is a failure, but if ERESTART is returned,
4690 * the driver may reset the HBA and try again.
4691 */
dea31012005-04-17 16:05:31 -05004692 rc = lpfc_config_port_prep(phba);
4693 if (rc == -ERESTART) {
James Smarted957682007-06-17 19:56:37 -05004694 phba->link_state = LPFC_LINK_UNKNOWN;
dea31012005-04-17 16:05:31 -05004695 continue;
James Smart34b02dc2008-08-24 21:49:55 -04004696 } else if (rc)
dea31012005-04-17 16:05:31 -05004697 break;
James Smart6d368e52011-05-24 11:44:12 -04004698
James Smart2e0fef82007-06-17 19:56:36 -05004699 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05004700 lpfc_config_port(phba, pmb);
4701 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart34b02dc2008-08-24 21:49:55 -04004702 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4703 LPFC_SLI3_HBQ_ENABLED |
4704 LPFC_SLI3_CRP_ENABLED |
James Smartbc739052010-08-04 16:11:18 -04004705 LPFC_SLI3_BG_ENABLED |
4706 LPFC_SLI3_DSS_ENABLED);
James Smarted957682007-06-17 19:56:37 -05004707 if (rc != MBX_SUCCESS) {
dea31012005-04-17 16:05:31 -05004708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004709 "0442 Adapter failed to init, mbxCmd x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05004710 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
James Smart04c68492009-05-22 14:52:52 -04004711 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
James Smart2e0fef82007-06-17 19:56:36 -05004712 spin_lock_irq(&phba->hbalock);
James Smart04c68492009-05-22 14:52:52 -04004713 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004714 spin_unlock_irq(&phba->hbalock);
4715 rc = -ENXIO;
James Smart04c68492009-05-22 14:52:52 -04004716 } else {
4717 /* Allow asynchronous mailbox command to go through */
4718 spin_lock_irq(&phba->hbalock);
4719 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4720 spin_unlock_irq(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05004721 done = 1;
James Smartcb69f7d2011-12-13 13:21:57 -05004722
4723 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4724 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4725 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4726 "3110 Port did not grant ASABT\n");
James Smart04c68492009-05-22 14:52:52 -04004727 }
dea31012005-04-17 16:05:31 -05004728 }
James Smarted957682007-06-17 19:56:37 -05004729 if (!done) {
4730 rc = -EINVAL;
4731 goto do_prep_failed;
4732 }
James Smart04c68492009-05-22 14:52:52 -04004733 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4734 if (!pmb->u.mb.un.varCfgPort.cMA) {
James Smart34b02dc2008-08-24 21:49:55 -04004735 rc = -ENXIO;
4736 goto do_prep_failed;
4737 }
James Smart04c68492009-05-22 14:52:52 -04004738 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
James Smart34b02dc2008-08-24 21:49:55 -04004739 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004740 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4741 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4742 phba->max_vpi : phba->max_vports;
4743
James Smart34b02dc2008-08-24 21:49:55 -04004744 } else
4745 phba->max_vpi = 0;
James Smartbc739052010-08-04 16:11:18 -04004746 phba->fips_level = 0;
4747 phba->fips_spec_rev = 0;
4748 if (pmb->u.mb.un.varCfgPort.gdss) {
James Smart04c68492009-05-22 14:52:52 -04004749 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
James Smartbc739052010-08-04 16:11:18 -04004750 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4751 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4752 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4753 "2850 Security Crypto Active. FIPS x%d "
4754 "(Spec Rev: x%d)",
4755 phba->fips_level, phba->fips_spec_rev);
4756 }
4757 if (pmb->u.mb.un.varCfgPort.sec_err) {
4758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4759 "2856 Config Port Security Crypto "
4760 "Error: x%x ",
4761 pmb->u.mb.un.varCfgPort.sec_err);
4762 }
James Smart04c68492009-05-22 14:52:52 -04004763 if (pmb->u.mb.un.varCfgPort.gerbm)
James Smart34b02dc2008-08-24 21:49:55 -04004764 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004765 if (pmb->u.mb.un.varCfgPort.gcrp)
James Smart34b02dc2008-08-24 21:49:55 -04004766 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
James Smart6e7288d2010-06-07 15:23:35 -04004767
4768 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4769 phba->port_gp = phba->mbox->us.s3_pgp.port;
James Smarte2a0a9d2008-12-04 22:40:02 -05004770
4771 if (phba->cfg_enable_bg) {
James Smart04c68492009-05-22 14:52:52 -04004772 if (pmb->u.mb.un.varCfgPort.gbg)
James Smarte2a0a9d2008-12-04 22:40:02 -05004773 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4774 else
4775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4776 "0443 Adapter did not grant "
4777 "BlockGuard\n");
4778 }
James Smart34b02dc2008-08-24 21:49:55 -04004779 } else {
James Smart8f34f4c2008-12-04 22:39:23 -05004780 phba->hbq_get = NULL;
James Smart34b02dc2008-08-24 21:49:55 -04004781 phba->port_gp = phba->mbox->us.s2.port;
James Smartd7c255b2008-08-24 21:50:00 -04004782 phba->max_vpi = 0;
James Smarted957682007-06-17 19:56:37 -05004783 }
James Smart92d7f7b2007-06-17 19:56:38 -05004784do_prep_failed:
James Smarted957682007-06-17 19:56:37 -05004785 mempool_free(pmb, phba->mbox_mem_pool);
4786 return rc;
4787}
4788
James Smarte59058c2008-08-24 21:49:00 -04004789
4790/**
James Smart3621a712009-04-06 18:47:14 -04004791 * lpfc_sli_hba_setup - SLI intialization function
James Smarte59058c2008-08-24 21:49:00 -04004792 * @phba: Pointer to HBA context object.
4793 *
4794 * This function is the main SLI intialization function. This function
4795 * is called by the HBA intialization code, HBA reset code and HBA
4796 * error attention handler code. Caller is not required to hold any
4797 * locks. This function issues config_port mailbox command to configure
4798 * the SLI, setup iocb rings and HBQ rings. In the end the function
4799 * calls the config_port_post function to issue init_link mailbox
4800 * command and to start the discovery. The function will return zero
4801 * if successful, else it will return negative error code.
4802 **/
James Smarted957682007-06-17 19:56:37 -05004803int
4804lpfc_sli_hba_setup(struct lpfc_hba *phba)
4805{
4806 uint32_t rc;
James Smart6d368e52011-05-24 11:44:12 -04004807 int mode = 3, i;
4808 int longs;
James Smarted957682007-06-17 19:56:37 -05004809
James Smart12247e82016-07-06 12:36:09 -07004810 switch (phba->cfg_sli_mode) {
James Smarted957682007-06-17 19:56:37 -05004811 case 2:
James Smart78b2d852007-08-02 11:10:21 -04004812 if (phba->cfg_enable_npiv) {
James Smart92d7f7b2007-06-17 19:56:38 -05004813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07004814 "1824 NPIV enabled: Override sli_mode "
James Smart92d7f7b2007-06-17 19:56:38 -05004815 "parameter (%d) to auto (0).\n",
James Smart12247e82016-07-06 12:36:09 -07004816 phba->cfg_sli_mode);
James Smart92d7f7b2007-06-17 19:56:38 -05004817 break;
4818 }
James Smarted957682007-06-17 19:56:37 -05004819 mode = 2;
4820 break;
4821 case 0:
4822 case 3:
4823 break;
4824 default:
James Smart92d7f7b2007-06-17 19:56:38 -05004825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smart12247e82016-07-06 12:36:09 -07004826 "1819 Unrecognized sli_mode parameter: %d.\n",
4827 phba->cfg_sli_mode);
James Smarted957682007-06-17 19:56:37 -05004828
4829 break;
4830 }
James Smartb5c53952016-03-31 14:12:30 -07004831 phba->fcp_embed_io = 0; /* SLI4 FC support only */
James Smarted957682007-06-17 19:56:37 -05004832
James Smart93996272008-08-24 21:50:30 -04004833 rc = lpfc_sli_config_port(phba, mode);
4834
James Smart12247e82016-07-06 12:36:09 -07004835 if (rc && phba->cfg_sli_mode == 3)
James Smart92d7f7b2007-06-17 19:56:38 -05004836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004837 "1820 Unable to select SLI-3. "
4838 "Not supported by adapter.\n");
James Smarted957682007-06-17 19:56:37 -05004839 if (rc && mode != 2)
James Smart93996272008-08-24 21:50:30 -04004840 rc = lpfc_sli_config_port(phba, 2);
James Smart4597663f2016-07-06 12:36:01 -07004841 else if (rc && mode == 2)
4842 rc = lpfc_sli_config_port(phba, 3);
James Smarted957682007-06-17 19:56:37 -05004843 if (rc)
dea31012005-04-17 16:05:31 -05004844 goto lpfc_sli_hba_setup_error;
4845
James Smart0d878412009-10-02 15:16:56 -04004846 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4847 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4848 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4849 if (!rc) {
4850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4851 "2709 This device supports "
4852 "Advanced Error Reporting (AER)\n");
4853 spin_lock_irq(&phba->hbalock);
4854 phba->hba_flag |= HBA_AER_ENABLED;
4855 spin_unlock_irq(&phba->hbalock);
4856 } else {
4857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4858 "2708 This device does not support "
James Smartb069d7e2013-05-31 17:04:36 -04004859 "Advanced Error Reporting (AER): %d\n",
4860 rc);
James Smart0d878412009-10-02 15:16:56 -04004861 phba->cfg_aer_support = 0;
4862 }
4863 }
4864
James Smarted957682007-06-17 19:56:37 -05004865 if (phba->sli_rev == 3) {
4866 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4867 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
James Smarted957682007-06-17 19:56:37 -05004868 } else {
4869 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4870 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
James Smart92d7f7b2007-06-17 19:56:38 -05004871 phba->sli3_options = 0;
James Smarted957682007-06-17 19:56:37 -05004872 }
4873
4874 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004875 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4876 phba->sli_rev, phba->max_vpi);
James Smarted957682007-06-17 19:56:37 -05004877 rc = lpfc_sli_ring_map(phba);
dea31012005-04-17 16:05:31 -05004878
4879 if (rc)
4880 goto lpfc_sli_hba_setup_error;
4881
James Smart6d368e52011-05-24 11:44:12 -04004882 /* Initialize VPIs. */
4883 if (phba->sli_rev == LPFC_SLI_REV3) {
4884 /*
4885 * The VPI bitmask and physical ID array are allocated
4886 * and initialized once only - at driver load. A port
4887 * reset doesn't need to reinitialize this memory.
4888 */
4889 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4890 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4891 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4892 GFP_KERNEL);
4893 if (!phba->vpi_bmask) {
4894 rc = -ENOMEM;
4895 goto lpfc_sli_hba_setup_error;
4896 }
4897
4898 phba->vpi_ids = kzalloc(
4899 (phba->max_vpi+1) * sizeof(uint16_t),
4900 GFP_KERNEL);
4901 if (!phba->vpi_ids) {
4902 kfree(phba->vpi_bmask);
4903 rc = -ENOMEM;
4904 goto lpfc_sli_hba_setup_error;
4905 }
4906 for (i = 0; i < phba->max_vpi; i++)
4907 phba->vpi_ids[i] = i;
4908 }
4909 }
4910
James Smart93996272008-08-24 21:50:30 -04004911 /* Init HBQs */
James Smarted957682007-06-17 19:56:37 -05004912 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4913 rc = lpfc_sli_hbq_setup(phba);
4914 if (rc)
4915 goto lpfc_sli_hba_setup_error;
4916 }
James Smart04c68492009-05-22 14:52:52 -04004917 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004918 phba->sli.sli_flag |= LPFC_PROCESS_LA;
James Smart04c68492009-05-22 14:52:52 -04004919 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004920
4921 rc = lpfc_config_port_post(phba);
4922 if (rc)
4923 goto lpfc_sli_hba_setup_error;
4924
James Smarted957682007-06-17 19:56:37 -05004925 return rc;
4926
James Smart92d7f7b2007-06-17 19:56:38 -05004927lpfc_sli_hba_setup_error:
James Smart2e0fef82007-06-17 19:56:36 -05004928 phba->link_state = LPFC_HBA_ERROR;
James Smarte40a02c2010-02-26 14:13:54 -05004929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004930 "0445 Firmware initialization failed\n");
dea31012005-04-17 16:05:31 -05004931 return rc;
4932}
4933
James Smartda0436e2009-05-22 14:51:39 -04004934/**
4935 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4936 * @phba: Pointer to HBA context object.
4937 * @mboxq: mailbox pointer.
4938 * This function issue a dump mailbox command to read config region
4939 * 23 and parse the records in the region and populate driver
4940 * data structure.
4941 **/
4942static int
James Smartff78d8f2011-12-13 13:21:35 -05004943lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04004944{
James Smartff78d8f2011-12-13 13:21:35 -05004945 LPFC_MBOXQ_t *mboxq;
James Smartda0436e2009-05-22 14:51:39 -04004946 struct lpfc_dmabuf *mp;
4947 struct lpfc_mqe *mqe;
4948 uint32_t data_length;
4949 int rc;
4950
4951 /* Program the default value of vlan_id and fc_map */
4952 phba->valid_vlan = 0;
4953 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4954 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4955 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4956
James Smartff78d8f2011-12-13 13:21:35 -05004957 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4958 if (!mboxq)
James Smartda0436e2009-05-22 14:51:39 -04004959 return -ENOMEM;
4960
James Smartff78d8f2011-12-13 13:21:35 -05004961 mqe = &mboxq->u.mqe;
4962 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4963 rc = -ENOMEM;
4964 goto out_free_mboxq;
4965 }
4966
James Smartda0436e2009-05-22 14:51:39 -04004967 mp = (struct lpfc_dmabuf *) mboxq->context1;
4968 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4969
4970 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4971 "(%d):2571 Mailbox cmd x%x Status x%x "
4972 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4973 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4974 "CQ: x%x x%x x%x x%x\n",
4975 mboxq->vport ? mboxq->vport->vpi : 0,
4976 bf_get(lpfc_mqe_command, mqe),
4977 bf_get(lpfc_mqe_status, mqe),
4978 mqe->un.mb_words[0], mqe->un.mb_words[1],
4979 mqe->un.mb_words[2], mqe->un.mb_words[3],
4980 mqe->un.mb_words[4], mqe->un.mb_words[5],
4981 mqe->un.mb_words[6], mqe->un.mb_words[7],
4982 mqe->un.mb_words[8], mqe->un.mb_words[9],
4983 mqe->un.mb_words[10], mqe->un.mb_words[11],
4984 mqe->un.mb_words[12], mqe->un.mb_words[13],
4985 mqe->un.mb_words[14], mqe->un.mb_words[15],
4986 mqe->un.mb_words[16], mqe->un.mb_words[50],
4987 mboxq->mcqe.word0,
4988 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4989 mboxq->mcqe.trailer);
4990
4991 if (rc) {
4992 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4993 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05004994 rc = -EIO;
4995 goto out_free_mboxq;
James Smartda0436e2009-05-22 14:51:39 -04004996 }
4997 data_length = mqe->un.mb_words[5];
James Smarta0c87cb2009-07-19 10:01:10 -04004998 if (data_length > DMP_RGN23_SIZE) {
James Smartd11e31d2009-06-10 17:23:06 -04004999 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5000 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005001 rc = -EIO;
5002 goto out_free_mboxq;
James Smartd11e31d2009-06-10 17:23:06 -04005003 }
James Smartda0436e2009-05-22 14:51:39 -04005004
5005 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5006 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5007 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05005008 rc = 0;
5009
5010out_free_mboxq:
5011 mempool_free(mboxq, phba->mbox_mem_pool);
5012 return rc;
James Smartda0436e2009-05-22 14:51:39 -04005013}
5014
5015/**
5016 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5017 * @phba: pointer to lpfc hba data structure.
5018 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5019 * @vpd: pointer to the memory to hold resulting port vpd data.
5020 * @vpd_size: On input, the number of bytes allocated to @vpd.
5021 * On output, the number of data bytes in @vpd.
5022 *
5023 * This routine executes a READ_REV SLI4 mailbox command. In
5024 * addition, this routine gets the port vpd data.
5025 *
5026 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02005027 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04005028 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04005029 **/
5030static int
5031lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5032 uint8_t *vpd, uint32_t *vpd_size)
5033{
5034 int rc = 0;
5035 uint32_t dma_size;
5036 struct lpfc_dmabuf *dmabuf;
5037 struct lpfc_mqe *mqe;
5038
5039 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5040 if (!dmabuf)
5041 return -ENOMEM;
5042
5043 /*
5044 * Get a DMA buffer for the vpd data resulting from the READ_REV
5045 * mailbox command.
5046 */
5047 dma_size = *vpd_size;
Joe Perches1aee3832014-09-03 12:56:12 -04005048 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5049 &dmabuf->phys, GFP_KERNEL);
James Smartda0436e2009-05-22 14:51:39 -04005050 if (!dmabuf->virt) {
5051 kfree(dmabuf);
5052 return -ENOMEM;
5053 }
James Smartda0436e2009-05-22 14:51:39 -04005054
5055 /*
5056 * The SLI4 implementation of READ_REV conflicts at word1,
5057 * bits 31:16 and SLI4 adds vpd functionality not present
5058 * in SLI3. This code corrects the conflicts.
5059 */
5060 lpfc_read_rev(phba, mboxq);
5061 mqe = &mboxq->u.mqe;
5062 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5063 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5064 mqe->un.read_rev.word1 &= 0x0000FFFF;
5065 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5066 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5067
5068 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5069 if (rc) {
5070 dma_free_coherent(&phba->pcidev->dev, dma_size,
5071 dmabuf->virt, dmabuf->phys);
James Smartdef9c7a2009-12-21 17:02:28 -05005072 kfree(dmabuf);
James Smartda0436e2009-05-22 14:51:39 -04005073 return -EIO;
5074 }
5075
James Smartda0436e2009-05-22 14:51:39 -04005076 /*
5077 * The available vpd length cannot be bigger than the
5078 * DMA buffer passed to the port. Catch the less than
5079 * case and update the caller's size.
5080 */
5081 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5082 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5083
James Smartd7c47992010-06-08 18:31:54 -04005084 memcpy(vpd, dmabuf->virt, *vpd_size);
5085
James Smartda0436e2009-05-22 14:51:39 -04005086 dma_free_coherent(&phba->pcidev->dev, dma_size,
5087 dmabuf->virt, dmabuf->phys);
5088 kfree(dmabuf);
5089 return 0;
5090}
5091
5092/**
James Smartcd1c8302011-10-10 21:33:25 -04005093 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5094 * @phba: pointer to lpfc hba data structure.
5095 *
5096 * This routine retrieves SLI4 device physical port name this PCI function
5097 * is attached to.
5098 *
5099 * Return codes
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005100 * 0 - successful
James Smartcd1c8302011-10-10 21:33:25 -04005101 * otherwise - failed to retrieve physical port name
5102 **/
5103static int
5104lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5105{
5106 LPFC_MBOXQ_t *mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04005107 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5108 struct lpfc_controller_attribute *cntl_attr;
5109 struct lpfc_mbx_get_port_name *get_port_name;
5110 void *virtaddr = NULL;
5111 uint32_t alloclen, reqlen;
5112 uint32_t shdr_status, shdr_add_status;
5113 union lpfc_sli4_cfg_shdr *shdr;
5114 char cport_name = 0;
5115 int rc;
5116
5117 /* We assume nothing at this point */
5118 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5119 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5120
5121 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5122 if (!mboxq)
5123 return -ENOMEM;
James Smartcd1c8302011-10-10 21:33:25 -04005124 /* obtain link type and link number via READ_CONFIG */
James Smartff78d8f2011-12-13 13:21:35 -05005125 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5126 lpfc_sli4_read_config(phba);
5127 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5128 goto retrieve_ppname;
James Smartcd1c8302011-10-10 21:33:25 -04005129
5130 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5131 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5132 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5133 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5134 LPFC_SLI4_MBX_NEMBED);
5135 if (alloclen < reqlen) {
5136 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5137 "3084 Allocated DMA memory size (%d) is "
5138 "less than the requested DMA memory size "
5139 "(%d)\n", alloclen, reqlen);
5140 rc = -ENOMEM;
5141 goto out_free_mboxq;
5142 }
5143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5144 virtaddr = mboxq->sge_array->addr[0];
5145 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5146 shdr = &mbx_cntl_attr->cfg_shdr;
5147 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5148 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5149 if (shdr_status || shdr_add_status || rc) {
5150 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5151 "3085 Mailbox x%x (x%x/x%x) failed, "
5152 "rc:x%x, status:x%x, add_status:x%x\n",
5153 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5154 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5155 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5156 rc, shdr_status, shdr_add_status);
5157 rc = -ENXIO;
5158 goto out_free_mboxq;
5159 }
5160 cntl_attr = &mbx_cntl_attr->cntl_attr;
5161 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5162 phba->sli4_hba.lnk_info.lnk_tp =
5163 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5164 phba->sli4_hba.lnk_info.lnk_no =
5165 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5167 "3086 lnk_type:%d, lnk_numb:%d\n",
5168 phba->sli4_hba.lnk_info.lnk_tp,
5169 phba->sli4_hba.lnk_info.lnk_no);
5170
5171retrieve_ppname:
5172 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5173 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5174 sizeof(struct lpfc_mbx_get_port_name) -
5175 sizeof(struct lpfc_sli4_cfg_mhdr),
5176 LPFC_SLI4_MBX_EMBED);
5177 get_port_name = &mboxq->u.mqe.un.get_port_name;
5178 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5179 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5180 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5181 phba->sli4_hba.lnk_info.lnk_tp);
5182 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5183 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5184 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5185 if (shdr_status || shdr_add_status || rc) {
5186 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5187 "3087 Mailbox x%x (x%x/x%x) failed: "
5188 "rc:x%x, status:x%x, add_status:x%x\n",
5189 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5190 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5191 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5192 rc, shdr_status, shdr_add_status);
5193 rc = -ENXIO;
5194 goto out_free_mboxq;
5195 }
5196 switch (phba->sli4_hba.lnk_info.lnk_no) {
5197 case LPFC_LINK_NUMBER_0:
5198 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5199 &get_port_name->u.response);
5200 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5201 break;
5202 case LPFC_LINK_NUMBER_1:
5203 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5204 &get_port_name->u.response);
5205 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5206 break;
5207 case LPFC_LINK_NUMBER_2:
5208 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5209 &get_port_name->u.response);
5210 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5211 break;
5212 case LPFC_LINK_NUMBER_3:
5213 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5214 &get_port_name->u.response);
5215 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5216 break;
5217 default:
5218 break;
5219 }
5220
5221 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5222 phba->Port[0] = cport_name;
5223 phba->Port[1] = '\0';
5224 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5225 "3091 SLI get port name: %s\n", phba->Port);
5226 }
5227
5228out_free_mboxq:
5229 if (rc != MBX_TIMEOUT) {
5230 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5231 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5232 else
5233 mempool_free(mboxq, phba->mbox_mem_pool);
5234 }
5235 return rc;
5236}
5237
5238/**
James Smartda0436e2009-05-22 14:51:39 -04005239 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5240 * @phba: pointer to lpfc hba data structure.
5241 *
5242 * This routine is called to explicitly arm the SLI4 device's completion and
5243 * event queues
5244 **/
5245static void
5246lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5247{
James Smart895427b2017-02-12 13:52:30 -08005248 int qidx;
James Smartda0436e2009-05-22 14:51:39 -04005249
5250 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5251 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
James Smart895427b2017-02-12 13:52:30 -08005252 if (phba->sli4_hba.nvmels_cq)
5253 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5254 LPFC_QUEUE_REARM);
5255
5256 if (phba->sli4_hba.fcp_cq)
5257 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5258 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5259 LPFC_QUEUE_REARM);
5260
5261 if (phba->sli4_hba.nvme_cq)
5262 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5263 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5264 LPFC_QUEUE_REARM);
James Smart1ba981f2014-02-20 09:56:45 -05005265
James Smartf38fa0b2014-04-04 13:52:21 -04005266 if (phba->cfg_fof)
James Smart1ba981f2014-02-20 09:56:45 -05005267 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5268
James Smart895427b2017-02-12 13:52:30 -08005269 if (phba->sli4_hba.hba_eq)
5270 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5271 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5272 LPFC_QUEUE_REARM);
James Smart1ba981f2014-02-20 09:56:45 -05005273
5274 if (phba->cfg_fof)
5275 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
James Smartda0436e2009-05-22 14:51:39 -04005276}
5277
5278/**
James Smart6d368e52011-05-24 11:44:12 -04005279 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5280 * @phba: Pointer to HBA context object.
5281 * @type: The resource extent type.
James Smartb76f2dc2011-07-22 18:37:42 -04005282 * @extnt_count: buffer to hold port available extent count.
5283 * @extnt_size: buffer to hold element count per extent.
James Smart6d368e52011-05-24 11:44:12 -04005284 *
James Smartb76f2dc2011-07-22 18:37:42 -04005285 * This function calls the port and retrievs the number of available
5286 * extents and their size for a particular extent type.
5287 *
5288 * Returns: 0 if successful. Nonzero otherwise.
James Smart6d368e52011-05-24 11:44:12 -04005289 **/
James Smartb76f2dc2011-07-22 18:37:42 -04005290int
James Smart6d368e52011-05-24 11:44:12 -04005291lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5292 uint16_t *extnt_count, uint16_t *extnt_size)
5293{
5294 int rc = 0;
5295 uint32_t length;
5296 uint32_t mbox_tmo;
5297 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5298 LPFC_MBOXQ_t *mbox;
5299
5300 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5301 if (!mbox)
5302 return -ENOMEM;
5303
5304 /* Find out how many extents are available for this resource type */
5305 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5306 sizeof(struct lpfc_sli4_cfg_mhdr));
5307 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5308 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5309 length, LPFC_SLI4_MBX_EMBED);
5310
5311 /* Send an extents count of 0 - the GET doesn't use it. */
5312 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5313 LPFC_SLI4_MBX_EMBED);
5314 if (unlikely(rc)) {
5315 rc = -EIO;
5316 goto err_exit;
5317 }
5318
5319 if (!phba->sli4_hba.intr_enable)
5320 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5321 else {
James Smarta183a152011-10-10 21:32:43 -04005322 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005323 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5324 }
5325 if (unlikely(rc)) {
5326 rc = -EIO;
5327 goto err_exit;
5328 }
5329
5330 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5331 if (bf_get(lpfc_mbox_hdr_status,
5332 &rsrc_info->header.cfg_shdr.response)) {
5333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5334 "2930 Failed to get resource extents "
5335 "Status 0x%x Add'l Status 0x%x\n",
5336 bf_get(lpfc_mbox_hdr_status,
5337 &rsrc_info->header.cfg_shdr.response),
5338 bf_get(lpfc_mbox_hdr_add_status,
5339 &rsrc_info->header.cfg_shdr.response));
5340 rc = -EIO;
5341 goto err_exit;
5342 }
5343
5344 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5345 &rsrc_info->u.rsp);
5346 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5347 &rsrc_info->u.rsp);
James Smart8a9d2e82012-05-09 21:16:12 -04005348
5349 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5350 "3162 Retrieved extents type-%d from port: count:%d, "
5351 "size:%d\n", type, *extnt_count, *extnt_size);
5352
5353err_exit:
James Smart6d368e52011-05-24 11:44:12 -04005354 mempool_free(mbox, phba->mbox_mem_pool);
5355 return rc;
5356}
5357
5358/**
5359 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5360 * @phba: Pointer to HBA context object.
5361 * @type: The extent type to check.
5362 *
5363 * This function reads the current available extents from the port and checks
5364 * if the extent count or extent size has changed since the last access.
5365 * Callers use this routine post port reset to understand if there is a
5366 * extent reprovisioning requirement.
5367 *
5368 * Returns:
5369 * -Error: error indicates problem.
5370 * 1: Extent count or size has changed.
5371 * 0: No changes.
5372 **/
5373static int
5374lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5375{
5376 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5377 uint16_t size_diff, rsrc_ext_size;
5378 int rc = 0;
5379 struct lpfc_rsrc_blks *rsrc_entry;
5380 struct list_head *rsrc_blk_list = NULL;
5381
5382 size_diff = 0;
5383 curr_ext_cnt = 0;
5384 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5385 &rsrc_ext_cnt,
5386 &rsrc_ext_size);
5387 if (unlikely(rc))
5388 return -EIO;
5389
5390 switch (type) {
5391 case LPFC_RSC_TYPE_FCOE_RPI:
5392 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5393 break;
5394 case LPFC_RSC_TYPE_FCOE_VPI:
5395 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5396 break;
5397 case LPFC_RSC_TYPE_FCOE_XRI:
5398 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5399 break;
5400 case LPFC_RSC_TYPE_FCOE_VFI:
5401 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5402 break;
5403 default:
5404 break;
5405 }
5406
5407 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5408 curr_ext_cnt++;
5409 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5410 size_diff++;
5411 }
5412
5413 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5414 rc = 1;
5415
5416 return rc;
5417}
5418
5419/**
5420 * lpfc_sli4_cfg_post_extnts -
5421 * @phba: Pointer to HBA context object.
5422 * @extnt_cnt - number of available extents.
5423 * @type - the extent type (rpi, xri, vfi, vpi).
5424 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5425 * @mbox - pointer to the caller's allocated mailbox structure.
5426 *
5427 * This function executes the extents allocation request. It also
5428 * takes care of the amount of memory needed to allocate or get the
5429 * allocated extents. It is the caller's responsibility to evaluate
5430 * the response.
5431 *
5432 * Returns:
5433 * -Error: Error value describes the condition found.
5434 * 0: if successful
5435 **/
5436static int
James Smart8a9d2e82012-05-09 21:16:12 -04005437lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
James Smart6d368e52011-05-24 11:44:12 -04005438 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5439{
5440 int rc = 0;
5441 uint32_t req_len;
5442 uint32_t emb_len;
5443 uint32_t alloc_len, mbox_tmo;
5444
5445 /* Calculate the total requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -04005446 req_len = extnt_cnt * sizeof(uint16_t);
James Smart6d368e52011-05-24 11:44:12 -04005447
5448 /*
5449 * Calculate the size of an embedded mailbox. The uint32_t
5450 * accounts for extents-specific word.
5451 */
5452 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5453 sizeof(uint32_t);
5454
5455 /*
5456 * Presume the allocation and response will fit into an embedded
5457 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5458 */
5459 *emb = LPFC_SLI4_MBX_EMBED;
5460 if (req_len > emb_len) {
James Smart8a9d2e82012-05-09 21:16:12 -04005461 req_len = extnt_cnt * sizeof(uint16_t) +
James Smart6d368e52011-05-24 11:44:12 -04005462 sizeof(union lpfc_sli4_cfg_shdr) +
5463 sizeof(uint32_t);
5464 *emb = LPFC_SLI4_MBX_NEMBED;
5465 }
5466
5467 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5468 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5469 req_len, *emb);
5470 if (alloc_len < req_len) {
5471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartb76f2dc2011-07-22 18:37:42 -04005472 "2982 Allocated DMA memory size (x%x) is "
James Smart6d368e52011-05-24 11:44:12 -04005473 "less than the requested DMA memory "
5474 "size (x%x)\n", alloc_len, req_len);
5475 return -ENOMEM;
5476 }
James Smart8a9d2e82012-05-09 21:16:12 -04005477 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
James Smart6d368e52011-05-24 11:44:12 -04005478 if (unlikely(rc))
5479 return -EIO;
5480
5481 if (!phba->sli4_hba.intr_enable)
5482 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5483 else {
James Smarta183a152011-10-10 21:32:43 -04005484 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005485 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5486 }
5487
5488 if (unlikely(rc))
5489 rc = -EIO;
5490 return rc;
5491}
5492
5493/**
5494 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5495 * @phba: Pointer to HBA context object.
5496 * @type: The resource extent type to allocate.
5497 *
5498 * This function allocates the number of elements for the specified
5499 * resource type.
5500 **/
5501static int
5502lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5503{
5504 bool emb = false;
5505 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5506 uint16_t rsrc_id, rsrc_start, j, k;
5507 uint16_t *ids;
5508 int i, rc;
5509 unsigned long longs;
5510 unsigned long *bmask;
5511 struct lpfc_rsrc_blks *rsrc_blks;
5512 LPFC_MBOXQ_t *mbox;
5513 uint32_t length;
5514 struct lpfc_id_range *id_array = NULL;
5515 void *virtaddr = NULL;
5516 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5517 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5518 struct list_head *ext_blk_list;
5519
5520 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5521 &rsrc_cnt,
5522 &rsrc_size);
5523 if (unlikely(rc))
5524 return -EIO;
5525
5526 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5527 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5528 "3009 No available Resource Extents "
5529 "for resource type 0x%x: Count: 0x%x, "
5530 "Size 0x%x\n", type, rsrc_cnt,
5531 rsrc_size);
5532 return -ENOMEM;
5533 }
5534
James Smart8a9d2e82012-05-09 21:16:12 -04005535 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5536 "2903 Post resource extents type-0x%x: "
5537 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
James Smart6d368e52011-05-24 11:44:12 -04005538
5539 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5540 if (!mbox)
5541 return -ENOMEM;
5542
James Smart8a9d2e82012-05-09 21:16:12 -04005543 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005544 if (unlikely(rc)) {
5545 rc = -EIO;
5546 goto err_exit;
5547 }
5548
5549 /*
5550 * Figure out where the response is located. Then get local pointers
5551 * to the response data. The port does not guarantee to respond to
5552 * all extents counts request so update the local variable with the
5553 * allocated count from the port.
5554 */
5555 if (emb == LPFC_SLI4_MBX_EMBED) {
5556 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5557 id_array = &rsrc_ext->u.rsp.id[0];
5558 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5559 } else {
5560 virtaddr = mbox->sge_array->addr[0];
5561 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5562 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5563 id_array = &n_rsrc->id;
5564 }
5565
5566 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5567 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5568
5569 /*
5570 * Based on the resource size and count, correct the base and max
5571 * resource values.
5572 */
5573 length = sizeof(struct lpfc_rsrc_blks);
5574 switch (type) {
5575 case LPFC_RSC_TYPE_FCOE_RPI:
5576 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5577 sizeof(unsigned long),
5578 GFP_KERNEL);
5579 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5580 rc = -ENOMEM;
5581 goto err_exit;
5582 }
5583 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5584 sizeof(uint16_t),
5585 GFP_KERNEL);
5586 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5587 kfree(phba->sli4_hba.rpi_bmask);
5588 rc = -ENOMEM;
5589 goto err_exit;
5590 }
5591
5592 /*
5593 * The next_rpi was initialized with the maximum available
5594 * count but the port may allocate a smaller number. Catch
5595 * that case and update the next_rpi.
5596 */
5597 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5598
5599 /* Initialize local ptrs for common extent processing later. */
5600 bmask = phba->sli4_hba.rpi_bmask;
5601 ids = phba->sli4_hba.rpi_ids;
5602 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5603 break;
5604 case LPFC_RSC_TYPE_FCOE_VPI:
5605 phba->vpi_bmask = kzalloc(longs *
5606 sizeof(unsigned long),
5607 GFP_KERNEL);
5608 if (unlikely(!phba->vpi_bmask)) {
5609 rc = -ENOMEM;
5610 goto err_exit;
5611 }
5612 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5613 sizeof(uint16_t),
5614 GFP_KERNEL);
5615 if (unlikely(!phba->vpi_ids)) {
5616 kfree(phba->vpi_bmask);
5617 rc = -ENOMEM;
5618 goto err_exit;
5619 }
5620
5621 /* Initialize local ptrs for common extent processing later. */
5622 bmask = phba->vpi_bmask;
5623 ids = phba->vpi_ids;
5624 ext_blk_list = &phba->lpfc_vpi_blk_list;
5625 break;
5626 case LPFC_RSC_TYPE_FCOE_XRI:
5627 phba->sli4_hba.xri_bmask = kzalloc(longs *
5628 sizeof(unsigned long),
5629 GFP_KERNEL);
5630 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5631 rc = -ENOMEM;
5632 goto err_exit;
5633 }
James Smart8a9d2e82012-05-09 21:16:12 -04005634 phba->sli4_hba.max_cfg_param.xri_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04005635 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5636 sizeof(uint16_t),
5637 GFP_KERNEL);
5638 if (unlikely(!phba->sli4_hba.xri_ids)) {
5639 kfree(phba->sli4_hba.xri_bmask);
5640 rc = -ENOMEM;
5641 goto err_exit;
5642 }
5643
5644 /* Initialize local ptrs for common extent processing later. */
5645 bmask = phba->sli4_hba.xri_bmask;
5646 ids = phba->sli4_hba.xri_ids;
5647 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5648 break;
5649 case LPFC_RSC_TYPE_FCOE_VFI:
5650 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5651 sizeof(unsigned long),
5652 GFP_KERNEL);
5653 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5654 rc = -ENOMEM;
5655 goto err_exit;
5656 }
5657 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5658 sizeof(uint16_t),
5659 GFP_KERNEL);
5660 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5661 kfree(phba->sli4_hba.vfi_bmask);
5662 rc = -ENOMEM;
5663 goto err_exit;
5664 }
5665
5666 /* Initialize local ptrs for common extent processing later. */
5667 bmask = phba->sli4_hba.vfi_bmask;
5668 ids = phba->sli4_hba.vfi_ids;
5669 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5670 break;
5671 default:
5672 /* Unsupported Opcode. Fail call. */
5673 id_array = NULL;
5674 bmask = NULL;
5675 ids = NULL;
5676 ext_blk_list = NULL;
5677 goto err_exit;
5678 }
5679
5680 /*
5681 * Complete initializing the extent configuration with the
5682 * allocated ids assigned to this function. The bitmask serves
5683 * as an index into the array and manages the available ids. The
5684 * array just stores the ids communicated to the port via the wqes.
5685 */
5686 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5687 if ((i % 2) == 0)
5688 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5689 &id_array[k]);
5690 else
5691 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5692 &id_array[k]);
5693
5694 rsrc_blks = kzalloc(length, GFP_KERNEL);
5695 if (unlikely(!rsrc_blks)) {
5696 rc = -ENOMEM;
5697 kfree(bmask);
5698 kfree(ids);
5699 goto err_exit;
5700 }
5701 rsrc_blks->rsrc_start = rsrc_id;
5702 rsrc_blks->rsrc_size = rsrc_size;
5703 list_add_tail(&rsrc_blks->list, ext_blk_list);
5704 rsrc_start = rsrc_id;
James Smart895427b2017-02-12 13:52:30 -08005705 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
James Smart6d368e52011-05-24 11:44:12 -04005706 phba->sli4_hba.scsi_xri_start = rsrc_start +
James Smart895427b2017-02-12 13:52:30 -08005707 lpfc_sli4_get_iocb_cnt(phba);
5708 phba->sli4_hba.nvme_xri_start =
5709 phba->sli4_hba.scsi_xri_start +
5710 phba->sli4_hba.scsi_xri_max;
5711 }
James Smart6d368e52011-05-24 11:44:12 -04005712
5713 while (rsrc_id < (rsrc_start + rsrc_size)) {
5714 ids[j] = rsrc_id;
5715 rsrc_id++;
5716 j++;
5717 }
5718 /* Entire word processed. Get next word.*/
5719 if ((i % 2) == 1)
5720 k++;
5721 }
5722 err_exit:
5723 lpfc_sli4_mbox_cmd_free(phba, mbox);
5724 return rc;
5725}
5726
James Smart895427b2017-02-12 13:52:30 -08005727
5728
James Smart6d368e52011-05-24 11:44:12 -04005729/**
5730 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5731 * @phba: Pointer to HBA context object.
5732 * @type: the extent's type.
5733 *
5734 * This function deallocates all extents of a particular resource type.
5735 * SLI4 does not allow for deallocating a particular extent range. It
5736 * is the caller's responsibility to release all kernel memory resources.
5737 **/
5738static int
5739lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5740{
5741 int rc;
5742 uint32_t length, mbox_tmo = 0;
5743 LPFC_MBOXQ_t *mbox;
5744 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5745 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5746
5747 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5748 if (!mbox)
5749 return -ENOMEM;
5750
5751 /*
5752 * This function sends an embedded mailbox because it only sends the
5753 * the resource type. All extents of this type are released by the
5754 * port.
5755 */
5756 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5757 sizeof(struct lpfc_sli4_cfg_mhdr));
5758 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5759 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5760 length, LPFC_SLI4_MBX_EMBED);
5761
5762 /* Send an extents count of 0 - the dealloc doesn't use it. */
5763 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5764 LPFC_SLI4_MBX_EMBED);
5765 if (unlikely(rc)) {
5766 rc = -EIO;
5767 goto out_free_mbox;
5768 }
5769 if (!phba->sli4_hba.intr_enable)
5770 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5771 else {
James Smarta183a152011-10-10 21:32:43 -04005772 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005773 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5774 }
5775 if (unlikely(rc)) {
5776 rc = -EIO;
5777 goto out_free_mbox;
5778 }
5779
5780 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5781 if (bf_get(lpfc_mbox_hdr_status,
5782 &dealloc_rsrc->header.cfg_shdr.response)) {
5783 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5784 "2919 Failed to release resource extents "
5785 "for type %d - Status 0x%x Add'l Status 0x%x. "
5786 "Resource memory not released.\n",
5787 type,
5788 bf_get(lpfc_mbox_hdr_status,
5789 &dealloc_rsrc->header.cfg_shdr.response),
5790 bf_get(lpfc_mbox_hdr_add_status,
5791 &dealloc_rsrc->header.cfg_shdr.response));
5792 rc = -EIO;
5793 goto out_free_mbox;
5794 }
5795
5796 /* Release kernel memory resources for the specific type. */
5797 switch (type) {
5798 case LPFC_RSC_TYPE_FCOE_VPI:
5799 kfree(phba->vpi_bmask);
5800 kfree(phba->vpi_ids);
5801 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5802 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5803 &phba->lpfc_vpi_blk_list, list) {
5804 list_del_init(&rsrc_blk->list);
5805 kfree(rsrc_blk);
5806 }
James Smart16a3a202013-04-17 20:14:38 -04005807 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04005808 break;
5809 case LPFC_RSC_TYPE_FCOE_XRI:
5810 kfree(phba->sli4_hba.xri_bmask);
5811 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04005812 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5813 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5814 list_del_init(&rsrc_blk->list);
5815 kfree(rsrc_blk);
5816 }
5817 break;
5818 case LPFC_RSC_TYPE_FCOE_VFI:
5819 kfree(phba->sli4_hba.vfi_bmask);
5820 kfree(phba->sli4_hba.vfi_ids);
5821 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5822 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5823 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5824 list_del_init(&rsrc_blk->list);
5825 kfree(rsrc_blk);
5826 }
5827 break;
5828 case LPFC_RSC_TYPE_FCOE_RPI:
5829 /* RPI bitmask and physical id array are cleaned up earlier. */
5830 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5831 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5832 list_del_init(&rsrc_blk->list);
5833 kfree(rsrc_blk);
5834 }
5835 break;
5836 default:
5837 break;
5838 }
5839
5840 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5841
5842 out_free_mbox:
5843 mempool_free(mbox, phba->mbox_mem_pool);
5844 return rc;
5845}
5846
Baoyou Xiebd4b3e52016-09-25 13:44:55 +08005847static void
James Smart7bdedb32016-07-06 12:36:00 -07005848lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5849 uint32_t feature)
James Smart65791f12016-07-06 12:35:56 -07005850{
James Smart65791f12016-07-06 12:35:56 -07005851 uint32_t len;
James Smart65791f12016-07-06 12:35:56 -07005852
James Smart65791f12016-07-06 12:35:56 -07005853 len = sizeof(struct lpfc_mbx_set_feature) -
5854 sizeof(struct lpfc_sli4_cfg_mhdr);
5855 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5856 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5857 LPFC_SLI4_MBX_EMBED);
James Smart65791f12016-07-06 12:35:56 -07005858
James Smart7bdedb32016-07-06 12:36:00 -07005859 switch (feature) {
5860 case LPFC_SET_UE_RECOVERY:
5861 bf_set(lpfc_mbx_set_feature_UER,
5862 &mbox->u.mqe.un.set_feature, 1);
5863 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5864 mbox->u.mqe.un.set_feature.param_len = 8;
5865 break;
5866 case LPFC_SET_MDS_DIAGS:
5867 bf_set(lpfc_mbx_set_feature_mds,
5868 &mbox->u.mqe.un.set_feature, 1);
5869 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5870 &mbox->u.mqe.un.set_feature, 0);
5871 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5872 mbox->u.mqe.un.set_feature.param_len = 8;
5873 break;
James Smart65791f12016-07-06 12:35:56 -07005874 }
James Smart7bdedb32016-07-06 12:36:00 -07005875
5876 return;
James Smart65791f12016-07-06 12:35:56 -07005877}
5878
James Smart6d368e52011-05-24 11:44:12 -04005879/**
5880 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5881 * @phba: Pointer to HBA context object.
5882 *
5883 * This function allocates all SLI4 resource identifiers.
5884 **/
5885int
5886lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5887{
5888 int i, rc, error = 0;
5889 uint16_t count, base;
5890 unsigned long longs;
5891
James Smartff78d8f2011-12-13 13:21:35 -05005892 if (!phba->sli4_hba.rpi_hdrs_in_use)
5893 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6d368e52011-05-24 11:44:12 -04005894 if (phba->sli4_hba.extents_in_use) {
5895 /*
5896 * The port supports resource extents. The XRI, VPI, VFI, RPI
5897 * resource extent count must be read and allocated before
5898 * provisioning the resource id arrays.
5899 */
5900 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5901 LPFC_IDX_RSRC_RDY) {
5902 /*
5903 * Extent-based resources are set - the driver could
5904 * be in a port reset. Figure out if any corrective
5905 * actions need to be taken.
5906 */
5907 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5908 LPFC_RSC_TYPE_FCOE_VFI);
5909 if (rc != 0)
5910 error++;
5911 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5912 LPFC_RSC_TYPE_FCOE_VPI);
5913 if (rc != 0)
5914 error++;
5915 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5916 LPFC_RSC_TYPE_FCOE_XRI);
5917 if (rc != 0)
5918 error++;
5919 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5920 LPFC_RSC_TYPE_FCOE_RPI);
5921 if (rc != 0)
5922 error++;
5923
5924 /*
5925 * It's possible that the number of resources
5926 * provided to this port instance changed between
5927 * resets. Detect this condition and reallocate
5928 * resources. Otherwise, there is no action.
5929 */
5930 if (error) {
5931 lpfc_printf_log(phba, KERN_INFO,
5932 LOG_MBOX | LOG_INIT,
5933 "2931 Detected extent resource "
5934 "change. Reallocating all "
5935 "extents.\n");
5936 rc = lpfc_sli4_dealloc_extent(phba,
5937 LPFC_RSC_TYPE_FCOE_VFI);
5938 rc = lpfc_sli4_dealloc_extent(phba,
5939 LPFC_RSC_TYPE_FCOE_VPI);
5940 rc = lpfc_sli4_dealloc_extent(phba,
5941 LPFC_RSC_TYPE_FCOE_XRI);
5942 rc = lpfc_sli4_dealloc_extent(phba,
5943 LPFC_RSC_TYPE_FCOE_RPI);
5944 } else
5945 return 0;
5946 }
5947
5948 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5949 if (unlikely(rc))
5950 goto err_exit;
5951
5952 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5953 if (unlikely(rc))
5954 goto err_exit;
5955
5956 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5957 if (unlikely(rc))
5958 goto err_exit;
5959
5960 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5961 if (unlikely(rc))
5962 goto err_exit;
5963 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5964 LPFC_IDX_RSRC_RDY);
5965 return rc;
5966 } else {
5967 /*
5968 * The port does not support resource extents. The XRI, VPI,
5969 * VFI, RPI resource ids were determined from READ_CONFIG.
5970 * Just allocate the bitmasks and provision the resource id
5971 * arrays. If a port reset is active, the resources don't
5972 * need any action - just exit.
5973 */
5974 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
James Smartff78d8f2011-12-13 13:21:35 -05005975 LPFC_IDX_RSRC_RDY) {
5976 lpfc_sli4_dealloc_resource_identifiers(phba);
5977 lpfc_sli4_remove_rpis(phba);
5978 }
James Smart6d368e52011-05-24 11:44:12 -04005979 /* RPIs. */
5980 count = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart0a630c22013-01-03 15:44:09 -05005981 if (count <= 0) {
5982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5983 "3279 Invalid provisioning of "
5984 "rpi:%d\n", count);
5985 rc = -EINVAL;
5986 goto err_exit;
5987 }
James Smart6d368e52011-05-24 11:44:12 -04005988 base = phba->sli4_hba.max_cfg_param.rpi_base;
5989 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5990 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5991 sizeof(unsigned long),
5992 GFP_KERNEL);
5993 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5994 rc = -ENOMEM;
5995 goto err_exit;
5996 }
5997 phba->sli4_hba.rpi_ids = kzalloc(count *
5998 sizeof(uint16_t),
5999 GFP_KERNEL);
6000 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6001 rc = -ENOMEM;
6002 goto free_rpi_bmask;
6003 }
6004
6005 for (i = 0; i < count; i++)
6006 phba->sli4_hba.rpi_ids[i] = base + i;
6007
6008 /* VPIs. */
6009 count = phba->sli4_hba.max_cfg_param.max_vpi;
James Smart0a630c22013-01-03 15:44:09 -05006010 if (count <= 0) {
6011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6012 "3280 Invalid provisioning of "
6013 "vpi:%d\n", count);
6014 rc = -EINVAL;
6015 goto free_rpi_ids;
6016 }
James Smart6d368e52011-05-24 11:44:12 -04006017 base = phba->sli4_hba.max_cfg_param.vpi_base;
6018 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6019 phba->vpi_bmask = kzalloc(longs *
6020 sizeof(unsigned long),
6021 GFP_KERNEL);
6022 if (unlikely(!phba->vpi_bmask)) {
6023 rc = -ENOMEM;
6024 goto free_rpi_ids;
6025 }
6026 phba->vpi_ids = kzalloc(count *
6027 sizeof(uint16_t),
6028 GFP_KERNEL);
6029 if (unlikely(!phba->vpi_ids)) {
6030 rc = -ENOMEM;
6031 goto free_vpi_bmask;
6032 }
6033
6034 for (i = 0; i < count; i++)
6035 phba->vpi_ids[i] = base + i;
6036
6037 /* XRIs. */
6038 count = phba->sli4_hba.max_cfg_param.max_xri;
James Smart0a630c22013-01-03 15:44:09 -05006039 if (count <= 0) {
6040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6041 "3281 Invalid provisioning of "
6042 "xri:%d\n", count);
6043 rc = -EINVAL;
6044 goto free_vpi_ids;
6045 }
James Smart6d368e52011-05-24 11:44:12 -04006046 base = phba->sli4_hba.max_cfg_param.xri_base;
6047 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6048 phba->sli4_hba.xri_bmask = kzalloc(longs *
6049 sizeof(unsigned long),
6050 GFP_KERNEL);
6051 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6052 rc = -ENOMEM;
6053 goto free_vpi_ids;
6054 }
James Smart41899be2012-03-01 22:34:19 -05006055 phba->sli4_hba.max_cfg_param.xri_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006056 phba->sli4_hba.xri_ids = kzalloc(count *
6057 sizeof(uint16_t),
6058 GFP_KERNEL);
6059 if (unlikely(!phba->sli4_hba.xri_ids)) {
6060 rc = -ENOMEM;
6061 goto free_xri_bmask;
6062 }
6063
6064 for (i = 0; i < count; i++)
6065 phba->sli4_hba.xri_ids[i] = base + i;
6066
6067 /* VFIs. */
6068 count = phba->sli4_hba.max_cfg_param.max_vfi;
James Smart0a630c22013-01-03 15:44:09 -05006069 if (count <= 0) {
6070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6071 "3282 Invalid provisioning of "
6072 "vfi:%d\n", count);
6073 rc = -EINVAL;
6074 goto free_xri_ids;
6075 }
James Smart6d368e52011-05-24 11:44:12 -04006076 base = phba->sli4_hba.max_cfg_param.vfi_base;
6077 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6078 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6079 sizeof(unsigned long),
6080 GFP_KERNEL);
6081 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6082 rc = -ENOMEM;
6083 goto free_xri_ids;
6084 }
6085 phba->sli4_hba.vfi_ids = kzalloc(count *
6086 sizeof(uint16_t),
6087 GFP_KERNEL);
6088 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6089 rc = -ENOMEM;
6090 goto free_vfi_bmask;
6091 }
6092
6093 for (i = 0; i < count; i++)
6094 phba->sli4_hba.vfi_ids[i] = base + i;
6095
6096 /*
6097 * Mark all resources ready. An HBA reset doesn't need
6098 * to reset the initialization.
6099 */
6100 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6101 LPFC_IDX_RSRC_RDY);
6102 return 0;
6103 }
6104
6105 free_vfi_bmask:
6106 kfree(phba->sli4_hba.vfi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006107 phba->sli4_hba.vfi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006108 free_xri_ids:
6109 kfree(phba->sli4_hba.xri_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006110 phba->sli4_hba.xri_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006111 free_xri_bmask:
6112 kfree(phba->sli4_hba.xri_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006113 phba->sli4_hba.xri_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006114 free_vpi_ids:
6115 kfree(phba->vpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006116 phba->vpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006117 free_vpi_bmask:
6118 kfree(phba->vpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006119 phba->vpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006120 free_rpi_ids:
6121 kfree(phba->sli4_hba.rpi_ids);
Roberto Sassucd60be42017-01-11 11:06:42 +01006122 phba->sli4_hba.rpi_ids = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006123 free_rpi_bmask:
6124 kfree(phba->sli4_hba.rpi_bmask);
Roberto Sassucd60be42017-01-11 11:06:42 +01006125 phba->sli4_hba.rpi_bmask = NULL;
James Smart6d368e52011-05-24 11:44:12 -04006126 err_exit:
6127 return rc;
6128}
6129
6130/**
6131 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6132 * @phba: Pointer to HBA context object.
6133 *
6134 * This function allocates the number of elements for the specified
6135 * resource type.
6136 **/
6137int
6138lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6139{
6140 if (phba->sli4_hba.extents_in_use) {
6141 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6142 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6143 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6144 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6145 } else {
6146 kfree(phba->vpi_bmask);
James Smart16a3a202013-04-17 20:14:38 -04006147 phba->sli4_hba.max_cfg_param.vpi_used = 0;
James Smart6d368e52011-05-24 11:44:12 -04006148 kfree(phba->vpi_ids);
6149 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6150 kfree(phba->sli4_hba.xri_bmask);
6151 kfree(phba->sli4_hba.xri_ids);
James Smart6d368e52011-05-24 11:44:12 -04006152 kfree(phba->sli4_hba.vfi_bmask);
6153 kfree(phba->sli4_hba.vfi_ids);
6154 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6155 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6156 }
6157
6158 return 0;
6159}
6160
6161/**
James Smartb76f2dc2011-07-22 18:37:42 -04006162 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6163 * @phba: Pointer to HBA context object.
6164 * @type: The resource extent type.
6165 * @extnt_count: buffer to hold port extent count response
6166 * @extnt_size: buffer to hold port extent size response.
6167 *
6168 * This function calls the port to read the host allocated extents
6169 * for a particular type.
6170 **/
6171int
6172lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6173 uint16_t *extnt_cnt, uint16_t *extnt_size)
6174{
6175 bool emb;
6176 int rc = 0;
6177 uint16_t curr_blks = 0;
6178 uint32_t req_len, emb_len;
6179 uint32_t alloc_len, mbox_tmo;
6180 struct list_head *blk_list_head;
6181 struct lpfc_rsrc_blks *rsrc_blk;
6182 LPFC_MBOXQ_t *mbox;
6183 void *virtaddr = NULL;
6184 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6185 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6186 union lpfc_sli4_cfg_shdr *shdr;
6187
6188 switch (type) {
6189 case LPFC_RSC_TYPE_FCOE_VPI:
6190 blk_list_head = &phba->lpfc_vpi_blk_list;
6191 break;
6192 case LPFC_RSC_TYPE_FCOE_XRI:
6193 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6194 break;
6195 case LPFC_RSC_TYPE_FCOE_VFI:
6196 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6197 break;
6198 case LPFC_RSC_TYPE_FCOE_RPI:
6199 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6200 break;
6201 default:
6202 return -EIO;
6203 }
6204
6205 /* Count the number of extents currently allocatd for this type. */
6206 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6207 if (curr_blks == 0) {
6208 /*
6209 * The GET_ALLOCATED mailbox does not return the size,
6210 * just the count. The size should be just the size
6211 * stored in the current allocated block and all sizes
6212 * for an extent type are the same so set the return
6213 * value now.
6214 */
6215 *extnt_size = rsrc_blk->rsrc_size;
6216 }
6217 curr_blks++;
6218 }
6219
James Smartb76f2dc2011-07-22 18:37:42 -04006220 /*
6221 * Calculate the size of an embedded mailbox. The uint32_t
6222 * accounts for extents-specific word.
6223 */
6224 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6225 sizeof(uint32_t);
6226
6227 /*
6228 * Presume the allocation and response will fit into an embedded
6229 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6230 */
6231 emb = LPFC_SLI4_MBX_EMBED;
6232 req_len = emb_len;
6233 if (req_len > emb_len) {
6234 req_len = curr_blks * sizeof(uint16_t) +
6235 sizeof(union lpfc_sli4_cfg_shdr) +
6236 sizeof(uint32_t);
6237 emb = LPFC_SLI4_MBX_NEMBED;
6238 }
6239
6240 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6241 if (!mbox)
6242 return -ENOMEM;
6243 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6244
6245 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6246 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6247 req_len, emb);
6248 if (alloc_len < req_len) {
6249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6250 "2983 Allocated DMA memory size (x%x) is "
6251 "less than the requested DMA memory "
6252 "size (x%x)\n", alloc_len, req_len);
6253 rc = -ENOMEM;
6254 goto err_exit;
6255 }
6256 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6257 if (unlikely(rc)) {
6258 rc = -EIO;
6259 goto err_exit;
6260 }
6261
6262 if (!phba->sli4_hba.intr_enable)
6263 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6264 else {
James Smarta183a152011-10-10 21:32:43 -04006265 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smartb76f2dc2011-07-22 18:37:42 -04006266 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6267 }
6268
6269 if (unlikely(rc)) {
6270 rc = -EIO;
6271 goto err_exit;
6272 }
6273
6274 /*
6275 * Figure out where the response is located. Then get local pointers
6276 * to the response data. The port does not guarantee to respond to
6277 * all extents counts request so update the local variable with the
6278 * allocated count from the port.
6279 */
6280 if (emb == LPFC_SLI4_MBX_EMBED) {
6281 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6282 shdr = &rsrc_ext->header.cfg_shdr;
6283 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6284 } else {
6285 virtaddr = mbox->sge_array->addr[0];
6286 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6287 shdr = &n_rsrc->cfg_shdr;
6288 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6289 }
6290
6291 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6292 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6293 "2984 Failed to read allocated resources "
6294 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6295 type,
6296 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6297 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6298 rc = -EIO;
6299 goto err_exit;
6300 }
6301 err_exit:
6302 lpfc_sli4_mbox_cmd_free(phba, mbox);
6303 return rc;
6304}
6305
6306/**
James Smart895427b2017-02-12 13:52:30 -08006307 * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block
James Smart8a9d2e82012-05-09 21:16:12 -04006308 * @phba: pointer to lpfc hba data structure.
James Smart895427b2017-02-12 13:52:30 -08006309 * @pring: Pointer to driver SLI ring object.
6310 * @sgl_list: linked link of sgl buffers to post
6311 * @cnt: number of linked list buffers
James Smart8a9d2e82012-05-09 21:16:12 -04006312 *
James Smart895427b2017-02-12 13:52:30 -08006313 * This routine walks the list of buffers that have been allocated and
James Smart8a9d2e82012-05-09 21:16:12 -04006314 * repost them to the port by using SGL block post. This is needed after a
6315 * pci_function_reset/warm_start or start. It attempts to construct blocks
James Smart895427b2017-02-12 13:52:30 -08006316 * of buffer sgls which contains contiguous xris and uses the non-embedded
6317 * SGL block post mailbox commands to post them to the port. For single
James Smart8a9d2e82012-05-09 21:16:12 -04006318 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6319 * mailbox command for posting.
6320 *
6321 * Returns: 0 = success, non-zero failure.
6322 **/
6323static int
James Smart895427b2017-02-12 13:52:30 -08006324lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6325 struct list_head *sgl_list, int cnt)
James Smart8a9d2e82012-05-09 21:16:12 -04006326{
6327 struct lpfc_sglq *sglq_entry = NULL;
6328 struct lpfc_sglq *sglq_entry_next = NULL;
6329 struct lpfc_sglq *sglq_entry_first = NULL;
James Smart895427b2017-02-12 13:52:30 -08006330 int status, total_cnt;
6331 int post_cnt = 0, num_posted = 0, block_cnt = 0;
James Smart8a9d2e82012-05-09 21:16:12 -04006332 int last_xritag = NO_XRI;
6333 LIST_HEAD(prep_sgl_list);
6334 LIST_HEAD(blck_sgl_list);
6335 LIST_HEAD(allc_sgl_list);
6336 LIST_HEAD(post_sgl_list);
6337 LIST_HEAD(free_sgl_list);
6338
James Smart38c20672013-03-01 16:37:44 -05006339 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08006340 spin_lock(&phba->sli4_hba.sgl_list_lock);
6341 list_splice_init(sgl_list, &allc_sgl_list);
6342 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05006343 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04006344
James Smart895427b2017-02-12 13:52:30 -08006345 total_cnt = cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006346 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6347 &allc_sgl_list, list) {
6348 list_del_init(&sglq_entry->list);
6349 block_cnt++;
6350 if ((last_xritag != NO_XRI) &&
6351 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6352 /* a hole in xri block, form a sgl posting block */
6353 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6354 post_cnt = block_cnt - 1;
6355 /* prepare list for next posting block */
6356 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6357 block_cnt = 1;
6358 } else {
6359 /* prepare list for next posting block */
6360 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6361 /* enough sgls for non-embed sgl mbox command */
6362 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6363 list_splice_init(&prep_sgl_list,
6364 &blck_sgl_list);
6365 post_cnt = block_cnt;
6366 block_cnt = 0;
6367 }
6368 }
6369 num_posted++;
6370
6371 /* keep track of last sgl's xritag */
6372 last_xritag = sglq_entry->sli4_xritag;
6373
James Smart895427b2017-02-12 13:52:30 -08006374 /* end of repost sgl list condition for buffers */
6375 if (num_posted == total_cnt) {
James Smart8a9d2e82012-05-09 21:16:12 -04006376 if (post_cnt == 0) {
6377 list_splice_init(&prep_sgl_list,
6378 &blck_sgl_list);
6379 post_cnt = block_cnt;
6380 } else if (block_cnt == 1) {
6381 status = lpfc_sli4_post_sgl(phba,
6382 sglq_entry->phys, 0,
6383 sglq_entry->sli4_xritag);
6384 if (!status) {
6385 /* successful, put sgl to posted list */
6386 list_add_tail(&sglq_entry->list,
6387 &post_sgl_list);
6388 } else {
6389 /* Failure, put sgl to free list */
6390 lpfc_printf_log(phba, KERN_WARNING,
6391 LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006392 "3159 Failed to post "
James Smart8a9d2e82012-05-09 21:16:12 -04006393 "sgl, xritag:x%x\n",
6394 sglq_entry->sli4_xritag);
6395 list_add_tail(&sglq_entry->list,
6396 &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04006397 total_cnt--;
James Smart8a9d2e82012-05-09 21:16:12 -04006398 }
6399 }
6400 }
6401
6402 /* continue until a nembed page worth of sgls */
6403 if (post_cnt == 0)
6404 continue;
6405
James Smart895427b2017-02-12 13:52:30 -08006406 /* post the buffer list sgls as a block */
6407 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6408 post_cnt);
James Smart8a9d2e82012-05-09 21:16:12 -04006409
6410 if (!status) {
6411 /* success, put sgl list to posted sgl list */
6412 list_splice_init(&blck_sgl_list, &post_sgl_list);
6413 } else {
6414 /* Failure, put sgl list to free sgl list */
6415 sglq_entry_first = list_first_entry(&blck_sgl_list,
6416 struct lpfc_sglq,
6417 list);
6418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006419 "3160 Failed to post sgl-list, "
James Smart8a9d2e82012-05-09 21:16:12 -04006420 "xritag:x%x-x%x\n",
6421 sglq_entry_first->sli4_xritag,
6422 (sglq_entry_first->sli4_xritag +
6423 post_cnt - 1));
6424 list_splice_init(&blck_sgl_list, &free_sgl_list);
James Smart711ea882013-04-17 20:18:29 -04006425 total_cnt -= post_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006426 }
6427
6428 /* don't reset xirtag due to hole in xri block */
6429 if (block_cnt == 0)
6430 last_xritag = NO_XRI;
6431
James Smart895427b2017-02-12 13:52:30 -08006432 /* reset sgl post count for next round of posting */
James Smart8a9d2e82012-05-09 21:16:12 -04006433 post_cnt = 0;
6434 }
6435
James Smart895427b2017-02-12 13:52:30 -08006436 /* free the sgls failed to post */
James Smart8a9d2e82012-05-09 21:16:12 -04006437 lpfc_free_sgl_list(phba, &free_sgl_list);
6438
James Smart895427b2017-02-12 13:52:30 -08006439 /* push sgls posted to the available list */
James Smart8a9d2e82012-05-09 21:16:12 -04006440 if (!list_empty(&post_sgl_list)) {
James Smart38c20672013-03-01 16:37:44 -05006441 spin_lock_irq(&phba->hbalock);
James Smart895427b2017-02-12 13:52:30 -08006442 spin_lock(&phba->sli4_hba.sgl_list_lock);
6443 list_splice_init(&post_sgl_list, sgl_list);
6444 spin_unlock(&phba->sli4_hba.sgl_list_lock);
James Smart38c20672013-03-01 16:37:44 -05006445 spin_unlock_irq(&phba->hbalock);
James Smart8a9d2e82012-05-09 21:16:12 -04006446 } else {
6447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -08006448 "3161 Failure to post sgl to port.\n");
James Smart8a9d2e82012-05-09 21:16:12 -04006449 return -EIO;
6450 }
James Smart895427b2017-02-12 13:52:30 -08006451
6452 /* return the number of XRIs actually posted */
6453 return total_cnt;
James Smart8a9d2e82012-05-09 21:16:12 -04006454}
6455
James Smart61bda8f2016-10-13 15:06:05 -07006456void
6457lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6458{
6459 uint32_t len;
6460
6461 len = sizeof(struct lpfc_mbx_set_host_data) -
6462 sizeof(struct lpfc_sli4_cfg_mhdr);
6463 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6464 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6465 LPFC_SLI4_MBX_EMBED);
6466
6467 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
James Smartb2fd1032016-12-19 15:07:21 -08006468 mbox->u.mqe.un.set_host_data.param_len =
6469 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
James Smart61bda8f2016-10-13 15:06:05 -07006470 snprintf(mbox->u.mqe.un.set_host_data.data,
6471 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6472 "Linux %s v"LPFC_DRIVER_VERSION,
6473 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6474}
6475
James Smart8a9d2e82012-05-09 21:16:12 -04006476/**
James Smartda0436e2009-05-22 14:51:39 -04006477 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6478 * @phba: Pointer to HBA context object.
6479 *
6480 * This function is the main SLI4 device intialization PCI function. This
6481 * function is called by the HBA intialization code, HBA reset code and
6482 * HBA error attention handler code. Caller is not required to hold any
6483 * locks.
6484 **/
6485int
6486lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6487{
6488 int rc;
6489 LPFC_MBOXQ_t *mboxq;
6490 struct lpfc_mqe *mqe;
6491 uint8_t *vpd;
6492 uint32_t vpd_size;
6493 uint32_t ftr_rsp = 0;
6494 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6495 struct lpfc_vport *vport = phba->pport;
6496 struct lpfc_dmabuf *mp;
6497
6498 /* Perform a PCI function reset to start from clean */
6499 rc = lpfc_pci_function_reset(phba);
6500 if (unlikely(rc))
6501 return -ENODEV;
6502
6503 /* Check the HBA Host Status Register for readyness */
6504 rc = lpfc_sli4_post_status_check(phba);
6505 if (unlikely(rc))
6506 return -ENODEV;
6507 else {
6508 spin_lock_irq(&phba->hbalock);
6509 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6510 spin_unlock_irq(&phba->hbalock);
6511 }
6512
6513 /*
6514 * Allocate a single mailbox container for initializing the
6515 * port.
6516 */
6517 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6518 if (!mboxq)
6519 return -ENOMEM;
6520
James Smartda0436e2009-05-22 14:51:39 -04006521 /* Issue READ_REV to collect vpd and FW information. */
James Smart49198b32010-04-06 15:04:33 -04006522 vpd_size = SLI4_PAGE_SIZE;
James Smartda0436e2009-05-22 14:51:39 -04006523 vpd = kzalloc(vpd_size, GFP_KERNEL);
6524 if (!vpd) {
6525 rc = -ENOMEM;
6526 goto out_free_mbox;
6527 }
6528
6529 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
James Smart76a95d72010-11-20 23:11:48 -05006530 if (unlikely(rc)) {
6531 kfree(vpd);
6532 goto out_free_mbox;
6533 }
James Smart572709e2013-07-15 18:32:43 -04006534
James Smartda0436e2009-05-22 14:51:39 -04006535 mqe = &mboxq->u.mqe;
James Smartf1126682009-06-10 17:22:44 -04006536 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
James Smartb5c53952016-03-31 14:12:30 -07006537 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
James Smart76a95d72010-11-20 23:11:48 -05006538 phba->hba_flag |= HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07006539 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6540 } else {
James Smart76a95d72010-11-20 23:11:48 -05006541 phba->hba_flag &= ~HBA_FCOE_MODE;
James Smartb5c53952016-03-31 14:12:30 -07006542 }
James Smart45ed1192009-10-02 15:17:02 -04006543
6544 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6545 LPFC_DCBX_CEE_MODE)
6546 phba->hba_flag |= HBA_FIP_SUPPORT;
6547 else
6548 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6549
James Smart4f2e66c2012-05-09 21:17:07 -04006550 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6551
James Smartc31098c2011-04-16 11:03:33 -04006552 if (phba->sli_rev != LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -04006553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6554 "0376 READ_REV Error. SLI Level %d "
6555 "FCoE enabled %d\n",
James Smart76a95d72010-11-20 23:11:48 -05006556 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
James Smartda0436e2009-05-22 14:51:39 -04006557 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006558 kfree(vpd);
6559 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006560 }
James Smartcd1c8302011-10-10 21:33:25 -04006561
6562 /*
James Smartff78d8f2011-12-13 13:21:35 -05006563 * Continue initialization with default values even if driver failed
6564 * to read FCoE param config regions, only read parameters if the
6565 * board is FCoE
6566 */
6567 if (phba->hba_flag & HBA_FCOE_MODE &&
6568 lpfc_sli4_read_fcoe_params(phba))
6569 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6570 "2570 Failed to read FCoE parameters\n");
6571
6572 /*
James Smartcd1c8302011-10-10 21:33:25 -04006573 * Retrieve sli4 device physical port name, failure of doing it
6574 * is considered as non-fatal.
6575 */
6576 rc = lpfc_sli4_retrieve_pport_name(phba);
6577 if (!rc)
6578 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6579 "3080 Successful retrieving SLI4 device "
6580 "physical port name: %s.\n", phba->Port);
6581
James Smartda0436e2009-05-22 14:51:39 -04006582 /*
6583 * Evaluate the read rev and vpd data. Populate the driver
6584 * state with the results. If this routine fails, the failure
6585 * is not fatal as the driver will use generic values.
6586 */
6587 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6588 if (unlikely(!rc)) {
6589 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6590 "0377 Error %d parsing vpd. "
6591 "Using defaults.\n", rc);
6592 rc = 0;
6593 }
James Smart76a95d72010-11-20 23:11:48 -05006594 kfree(vpd);
James Smartda0436e2009-05-22 14:51:39 -04006595
James Smartf1126682009-06-10 17:22:44 -04006596 /* Save information as VPD data */
6597 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6598 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6599 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6600 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6601 &mqe->un.read_rev);
6602 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6603 &mqe->un.read_rev);
6604 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6605 &mqe->un.read_rev);
6606 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6607 &mqe->un.read_rev);
6608 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6609 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6610 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6611 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6612 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6613 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6614 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6615 "(%d):0380 READ_REV Status x%x "
6616 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6617 mboxq->vport ? mboxq->vport->vpi : 0,
6618 bf_get(lpfc_mqe_status, mqe),
6619 phba->vpd.rev.opFwName,
6620 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6621 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
James Smartda0436e2009-05-22 14:51:39 -04006622
James Smart572709e2013-07-15 18:32:43 -04006623 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6624 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6625 if (phba->pport->cfg_lun_queue_depth > rc) {
6626 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6627 "3362 LUN queue depth changed from %d to %d\n",
6628 phba->pport->cfg_lun_queue_depth, rc);
6629 phba->pport->cfg_lun_queue_depth = rc;
6630 }
6631
James Smart65791f12016-07-06 12:35:56 -07006632 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
James Smart7bdedb32016-07-06 12:36:00 -07006633 LPFC_SLI_INTF_IF_TYPE_0) {
6634 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6635 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6636 if (rc == MBX_SUCCESS) {
6637 phba->hba_flag |= HBA_RECOVERABLE_UE;
6638 /* Set 1Sec interval to detect UE */
6639 phba->eratt_poll_interval = 1;
6640 phba->sli4_hba.ue_to_sr = bf_get(
6641 lpfc_mbx_set_feature_UESR,
6642 &mboxq->u.mqe.un.set_feature);
6643 phba->sli4_hba.ue_to_rp = bf_get(
6644 lpfc_mbx_set_feature_UERP,
6645 &mboxq->u.mqe.un.set_feature);
6646 }
6647 }
6648
6649 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6650 /* Enable MDS Diagnostics only if the SLI Port supports it */
6651 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6653 if (rc != MBX_SUCCESS)
6654 phba->mds_diags_support = 0;
6655 }
James Smart572709e2013-07-15 18:32:43 -04006656
James Smartda0436e2009-05-22 14:51:39 -04006657 /*
6658 * Discover the port's supported feature set and match it against the
6659 * hosts requests.
6660 */
6661 lpfc_request_features(phba, mboxq);
6662 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6663 if (unlikely(rc)) {
6664 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006665 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006666 }
6667
6668 /*
6669 * The port must support FCP initiator mode as this is the
6670 * only mode running in the host.
6671 */
6672 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6673 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6674 "0378 No support for fcpi mode.\n");
6675 ftr_rsp++;
6676 }
James Smartfedd3b72011-02-16 12:39:24 -05006677 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6678 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6679 else
6680 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
James Smartda0436e2009-05-22 14:51:39 -04006681 /*
6682 * If the port cannot support the host's requested features
6683 * then turn off the global config parameters to disable the
6684 * feature in the driver. This is not a fatal error.
6685 */
James Smartbf086112011-08-21 21:48:13 -04006686 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6687 if (phba->cfg_enable_bg) {
6688 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6689 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6690 else
6691 ftr_rsp++;
6692 }
James Smartda0436e2009-05-22 14:51:39 -04006693
6694 if (phba->max_vpi && phba->cfg_enable_npiv &&
6695 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6696 ftr_rsp++;
6697
6698 if (ftr_rsp) {
6699 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6700 "0379 Feature Mismatch Data: x%08x %08x "
6701 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6702 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6703 phba->cfg_enable_npiv, phba->max_vpi);
6704 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6705 phba->cfg_enable_bg = 0;
6706 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6707 phba->cfg_enable_npiv = 0;
6708 }
6709
6710 /* These SLI3 features are assumed in SLI4 */
6711 spin_lock_irq(&phba->hbalock);
6712 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6713 spin_unlock_irq(&phba->hbalock);
6714
James Smart6d368e52011-05-24 11:44:12 -04006715 /*
6716 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6717 * calls depends on these resources to complete port setup.
6718 */
6719 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6720 if (rc) {
6721 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6722 "2920 Failed to alloc Resource IDs "
6723 "rc = x%x\n", rc);
6724 goto out_free_mbox;
6725 }
6726
James Smart61bda8f2016-10-13 15:06:05 -07006727 lpfc_set_host_data(phba, mboxq);
6728
6729 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6730 if (rc) {
6731 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6732 "2134 Failed to set host os driver version %x",
6733 rc);
6734 }
6735
James Smartda0436e2009-05-22 14:51:39 -04006736 /* Read the port's service parameters. */
James Smart9f1177a2010-02-26 14:12:57 -05006737 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6738 if (rc) {
6739 phba->link_state = LPFC_HBA_ERROR;
6740 rc = -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -05006741 goto out_free_mbox;
James Smart9f1177a2010-02-26 14:12:57 -05006742 }
6743
James Smartda0436e2009-05-22 14:51:39 -04006744 mboxq->vport = vport;
6745 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6746 mp = (struct lpfc_dmabuf *) mboxq->context1;
6747 if (rc == MBX_SUCCESS) {
6748 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6749 rc = 0;
6750 }
6751
6752 /*
6753 * This memory was allocated by the lpfc_read_sparam routine. Release
6754 * it to the mbuf pool.
6755 */
6756 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6757 kfree(mp);
6758 mboxq->context1 = NULL;
6759 if (unlikely(rc)) {
6760 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6761 "0382 READ_SPARAM command failed "
6762 "status %d, mbxStatus x%x\n",
6763 rc, bf_get(lpfc_mqe_status, mqe));
6764 phba->link_state = LPFC_HBA_ERROR;
6765 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006766 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006767 }
6768
James Smart05580562011-05-24 11:40:48 -04006769 lpfc_update_vport_wwn(vport);
James Smartda0436e2009-05-22 14:51:39 -04006770
6771 /* Update the fc_host data structures with new wwn. */
6772 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6773 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6774
James Smart895427b2017-02-12 13:52:30 -08006775 /* Create all the SLI4 queues */
6776 rc = lpfc_sli4_queue_create(phba);
6777 if (rc) {
6778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6779 "3089 Failed to allocate queues\n");
6780 rc = -ENODEV;
6781 goto out_free_mbox;
6782 }
6783 /* Set up all the queues to the device */
6784 rc = lpfc_sli4_queue_setup(phba);
6785 if (unlikely(rc)) {
6786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6787 "0381 Error %d during queue setup.\n ", rc);
6788 goto out_stop_timers;
6789 }
6790 /* Initialize the driver internal SLI layer lists. */
6791 lpfc_sli4_setup(phba);
6792 lpfc_sli4_queue_init(phba);
6793
6794 /* update host els xri-sgl sizes and mappings */
6795 rc = lpfc_sli4_els_sgl_update(phba);
James Smart8a9d2e82012-05-09 21:16:12 -04006796 if (unlikely(rc)) {
6797 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6798 "1400 Failed to update xri-sgl size and "
6799 "mapping: %d\n", rc);
James Smart895427b2017-02-12 13:52:30 -08006800 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04006801 }
6802
James Smart8a9d2e82012-05-09 21:16:12 -04006803 /* register the els sgl pool to the port */
James Smart895427b2017-02-12 13:52:30 -08006804 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6805 phba->sli4_hba.els_xri_cnt);
6806 if (unlikely(rc < 0)) {
James Smart8a9d2e82012-05-09 21:16:12 -04006807 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6808 "0582 Error %d during els sgl post "
6809 "operation\n", rc);
6810 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08006811 goto out_destroy_queue;
6812 }
6813 phba->sli4_hba.els_xri_cnt = rc;
6814
James Smartf358dd02017-02-12 13:52:34 -08006815 if (phba->nvmet_support) {
6816 /* update host nvmet xri-sgl sizes and mappings */
6817 rc = lpfc_sli4_nvmet_sgl_update(phba);
6818 if (unlikely(rc)) {
6819 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6820 "6308 Failed to update nvmet-sgl size "
6821 "and mapping: %d\n", rc);
6822 goto out_destroy_queue;
6823 }
6824
6825 /* register the nvmet sgl pool to the port */
6826 rc = lpfc_sli4_repost_sgl_list(
6827 phba,
6828 &phba->sli4_hba.lpfc_nvmet_sgl_list,
6829 phba->sli4_hba.nvmet_xri_cnt);
6830 if (unlikely(rc < 0)) {
6831 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6832 "3117 Error %d during nvmet "
6833 "sgl post\n", rc);
6834 rc = -ENODEV;
6835 goto out_destroy_queue;
6836 }
6837 phba->sli4_hba.nvmet_xri_cnt = rc;
6838 /* todo: tgt: create targetport */
6839 } else {
James Smart895427b2017-02-12 13:52:30 -08006840 /* update host scsi xri-sgl sizes and mappings */
6841 rc = lpfc_sli4_scsi_sgl_update(phba);
6842 if (unlikely(rc)) {
6843 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6844 "6309 Failed to update scsi-sgl size "
6845 "and mapping: %d\n", rc);
6846 goto out_destroy_queue;
6847 }
6848
6849 /* update host nvme xri-sgl sizes and mappings */
6850 rc = lpfc_sli4_nvme_sgl_update(phba);
6851 if (unlikely(rc)) {
6852 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6853 "6082 Failed to update nvme-sgl size "
6854 "and mapping: %d\n", rc);
6855 goto out_destroy_queue;
6856 }
James Smart8a9d2e82012-05-09 21:16:12 -04006857 }
6858
James Smart895427b2017-02-12 13:52:30 -08006859 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6860 /* register the allocated scsi sgl pool to the port */
6861 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6862 if (unlikely(rc)) {
6863 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6864 "0383 Error %d during scsi sgl post "
6865 "operation\n", rc);
6866 /* Some Scsi buffers were moved to abort scsi list */
6867 /* A pci function reset will repost them */
6868 rc = -ENODEV;
6869 goto out_destroy_queue;
6870 }
James Smartda0436e2009-05-22 14:51:39 -04006871 }
6872
James Smart01649562017-02-12 13:52:32 -08006873 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
6874 (phba->nvmet_support == 0)) {
6875
6876 /* register the allocated nvme sgl pool to the port */
6877 rc = lpfc_repost_nvme_sgl_list(phba);
6878 if (unlikely(rc)) {
6879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6880 "6116 Error %d during nvme sgl post "
6881 "operation\n", rc);
6882 /* Some NVME buffers were moved to abort nvme list */
6883 /* A pci function reset will repost them */
6884 rc = -ENODEV;
6885 goto out_destroy_queue;
6886 }
6887 }
6888
James Smartda0436e2009-05-22 14:51:39 -04006889 /* Post the rpi header region to the device. */
6890 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6891 if (unlikely(rc)) {
6892 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6893 "0393 Error %d during rpi post operation\n",
6894 rc);
6895 rc = -ENODEV;
James Smart895427b2017-02-12 13:52:30 -08006896 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04006897 }
James Smart97f2ecf2012-03-01 22:35:23 -05006898 lpfc_sli4_node_prep(phba);
James Smartda0436e2009-05-22 14:51:39 -04006899
James Smart895427b2017-02-12 13:52:30 -08006900 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6901 if (phba->nvmet_support == 0) {
6902 /*
6903 * The FC Port needs to register FCFI (index 0)
6904 */
6905 lpfc_reg_fcfi(phba, mboxq);
6906 mboxq->vport = phba->pport;
6907 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6908 if (rc != MBX_SUCCESS)
6909 goto out_unset_queue;
6910 rc = 0;
6911 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6912 &mboxq->u.mqe.un.reg_fcfi);
6913 }
6914 /* Check if the port is configured to be disabled */
6915 lpfc_sli_read_link_ste(phba);
James Smartda0436e2009-05-22 14:51:39 -04006916 }
6917
6918 /* Arm the CQs and then EQs on device */
6919 lpfc_sli4_arm_cqeq_intr(phba);
6920
6921 /* Indicate device interrupt mode */
6922 phba->sli4_hba.intr_enable = 1;
6923
6924 /* Allow asynchronous mailbox command to go through */
6925 spin_lock_irq(&phba->hbalock);
6926 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6927 spin_unlock_irq(&phba->hbalock);
6928
6929 /* Post receive buffers to the device */
6930 lpfc_sli4_rb_setup(phba);
6931
James Smartfc2b9892010-02-26 14:15:29 -05006932 /* Reset HBA FCF states after HBA reset */
6933 phba->fcf.fcf_flag = 0;
6934 phba->fcf.current_rec.flag = 0;
6935
James Smartda0436e2009-05-22 14:51:39 -04006936 /* Start the ELS watchdog timer */
James Smart8fa38512009-07-19 10:01:03 -04006937 mod_timer(&vport->els_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04006938 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
James Smartda0436e2009-05-22 14:51:39 -04006939
6940 /* Start heart beat timer */
6941 mod_timer(&phba->hb_tmofunc,
James Smart256ec0d2013-04-17 20:14:58 -04006942 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
James Smartda0436e2009-05-22 14:51:39 -04006943 phba->hb_outstanding = 0;
6944 phba->last_completion_time = jiffies;
6945
6946 /* Start error attention (ERATT) polling timer */
James Smart256ec0d2013-04-17 20:14:58 -04006947 mod_timer(&phba->eratt_poll,
James Smart65791f12016-07-06 12:35:56 -07006948 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
James Smartda0436e2009-05-22 14:51:39 -04006949
James Smart75baf692010-06-08 18:31:21 -04006950 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6951 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6952 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6953 if (!rc) {
6954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6955 "2829 This device supports "
6956 "Advanced Error Reporting (AER)\n");
6957 spin_lock_irq(&phba->hbalock);
6958 phba->hba_flag |= HBA_AER_ENABLED;
6959 spin_unlock_irq(&phba->hbalock);
6960 } else {
6961 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6962 "2830 This device does not support "
6963 "Advanced Error Reporting (AER)\n");
6964 phba->cfg_aer_support = 0;
6965 }
James Smart0a96e972011-07-22 18:37:28 -04006966 rc = 0;
James Smart75baf692010-06-08 18:31:21 -04006967 }
6968
James Smartda0436e2009-05-22 14:51:39 -04006969 /*
6970 * The port is ready, set the host's link state to LINK_DOWN
6971 * in preparation for link interrupts.
6972 */
James Smartda0436e2009-05-22 14:51:39 -04006973 spin_lock_irq(&phba->hbalock);
6974 phba->link_state = LPFC_LINK_DOWN;
6975 spin_unlock_irq(&phba->hbalock);
James Smart026abb82011-12-13 13:20:45 -05006976 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6977 (phba->hba_flag & LINK_DISABLED)) {
6978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6979 "3103 Adapter Link is disabled.\n");
6980 lpfc_down_link(phba, mboxq);
6981 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6982 if (rc != MBX_SUCCESS) {
6983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6984 "3104 Adapter failed to issue "
6985 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6986 goto out_unset_queue;
6987 }
6988 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smart1b511972011-12-13 13:23:09 -05006989 /* don't perform init_link on SLI4 FC port loopback test */
6990 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6991 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6992 if (rc)
6993 goto out_unset_queue;
6994 }
James Smart5350d872011-10-10 21:33:49 -04006995 }
6996 mempool_free(mboxq, phba->mbox_mem_pool);
6997 return rc;
James Smart76a95d72010-11-20 23:11:48 -05006998out_unset_queue:
James Smartda0436e2009-05-22 14:51:39 -04006999 /* Unset all the queues set up in this routine when error out */
James Smart5350d872011-10-10 21:33:49 -04007000 lpfc_sli4_queue_unset(phba);
7001out_destroy_queue:
7002 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04007003out_stop_timers:
James Smart5350d872011-10-10 21:33:49 -04007004 lpfc_stop_hba_timers(phba);
James Smartda0436e2009-05-22 14:51:39 -04007005out_free_mbox:
7006 mempool_free(mboxq, phba->mbox_mem_pool);
7007 return rc;
7008}
James Smarte59058c2008-08-24 21:49:00 -04007009
7010/**
James Smart3621a712009-04-06 18:47:14 -04007011 * lpfc_mbox_timeout - Timeout call back function for mbox timer
James Smarte59058c2008-08-24 21:49:00 -04007012 * @ptr: context object - pointer to hba structure.
dea31012005-04-17 16:05:31 -05007013 *
James Smarte59058c2008-08-24 21:49:00 -04007014 * This is the callback function for mailbox timer. The mailbox
7015 * timer is armed when a new mailbox command is issued and the timer
7016 * is deleted when the mailbox complete. The function is called by
7017 * the kernel timer code when a mailbox does not complete within
7018 * expected time. This function wakes up the worker thread to
7019 * process the mailbox timeout and returns. All the processing is
7020 * done by the worker thread function lpfc_mbox_timeout_handler.
7021 **/
dea31012005-04-17 16:05:31 -05007022void
7023lpfc_mbox_timeout(unsigned long ptr)
7024{
James Smart92d7f7b2007-06-17 19:56:38 -05007025 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
dea31012005-04-17 16:05:31 -05007026 unsigned long iflag;
James Smart2e0fef82007-06-17 19:56:36 -05007027 uint32_t tmo_posted;
dea31012005-04-17 16:05:31 -05007028
James Smart2e0fef82007-06-17 19:56:36 -05007029 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -05007030 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
James Smart2e0fef82007-06-17 19:56:36 -05007031 if (!tmo_posted)
7032 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7033 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7034
James Smart5e9d9b82008-06-14 22:52:53 -04007035 if (!tmo_posted)
7036 lpfc_worker_wake_up(phba);
7037 return;
dea31012005-04-17 16:05:31 -05007038}
7039
James Smarte8d3c3b2013-10-10 12:21:30 -04007040/**
7041 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7042 * are pending
7043 * @phba: Pointer to HBA context object.
7044 *
7045 * This function checks if any mailbox completions are present on the mailbox
7046 * completion queue.
7047 **/
Nicholas Krause3bb11fc2015-08-31 16:48:13 -04007048static bool
James Smarte8d3c3b2013-10-10 12:21:30 -04007049lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7050{
7051
7052 uint32_t idx;
7053 struct lpfc_queue *mcq;
7054 struct lpfc_mcqe *mcqe;
7055 bool pending_completions = false;
7056
7057 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7058 return false;
7059
7060 /* Check for completions on mailbox completion queue */
7061
7062 mcq = phba->sli4_hba.mbx_cq;
7063 idx = mcq->hba_index;
7064 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
7065 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7066 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7067 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7068 pending_completions = true;
7069 break;
7070 }
7071 idx = (idx + 1) % mcq->entry_count;
7072 if (mcq->hba_index == idx)
7073 break;
7074 }
7075 return pending_completions;
7076
7077}
7078
7079/**
7080 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7081 * that were missed.
7082 * @phba: Pointer to HBA context object.
7083 *
7084 * For sli4, it is possible to miss an interrupt. As such mbox completions
7085 * maybe missed causing erroneous mailbox timeouts to occur. This function
7086 * checks to see if mbox completions are on the mailbox completion queue
7087 * and will process all the completions associated with the eq for the
7088 * mailbox completion queue.
7089 **/
7090bool
7091lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7092{
7093
7094 uint32_t eqidx;
7095 struct lpfc_queue *fpeq = NULL;
7096 struct lpfc_eqe *eqe;
7097 bool mbox_pending;
7098
7099 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7100 return false;
7101
7102 /* Find the eq associated with the mcq */
7103
7104 if (phba->sli4_hba.hba_eq)
James Smart895427b2017-02-12 13:52:30 -08007105 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
James Smarte8d3c3b2013-10-10 12:21:30 -04007106 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7107 phba->sli4_hba.mbx_cq->assoc_qid) {
7108 fpeq = phba->sli4_hba.hba_eq[eqidx];
7109 break;
7110 }
7111 if (!fpeq)
7112 return false;
7113
7114 /* Turn off interrupts from this EQ */
7115
7116 lpfc_sli4_eq_clr_intr(fpeq);
7117
7118 /* Check to see if a mbox completion is pending */
7119
7120 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7121
7122 /*
7123 * If a mbox completion is pending, process all the events on EQ
7124 * associated with the mbox completion queue (this could include
7125 * mailbox commands, async events, els commands, receive queue data
7126 * and fcp commands)
7127 */
7128
7129 if (mbox_pending)
7130 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7131 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7132 fpeq->EQ_processed++;
7133 }
7134
7135 /* Always clear and re-arm the EQ */
7136
7137 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7138
7139 return mbox_pending;
7140
7141}
James Smarte59058c2008-08-24 21:49:00 -04007142
7143/**
James Smart3621a712009-04-06 18:47:14 -04007144 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
James Smarte59058c2008-08-24 21:49:00 -04007145 * @phba: Pointer to HBA context object.
7146 *
7147 * This function is called from worker thread when a mailbox command times out.
7148 * The caller is not required to hold any locks. This function will reset the
7149 * HBA and recover all the pending commands.
7150 **/
dea31012005-04-17 16:05:31 -05007151void
7152lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7153{
James Smart2e0fef82007-06-17 19:56:36 -05007154 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
James Smarteb016562014-09-03 12:58:06 -04007155 MAILBOX_t *mb = NULL;
7156
James Smart1dcb58e2007-04-25 09:51:30 -04007157 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05007158
James Smarte8d3c3b2013-10-10 12:21:30 -04007159 /* If the mailbox completed, process the completion and return */
7160 if (lpfc_sli4_process_missed_mbox_completions(phba))
7161 return;
7162
James Smarteb016562014-09-03 12:58:06 -04007163 if (pmbox != NULL)
7164 mb = &pmbox->u.mb;
James Smarta257bf92009-04-06 18:48:10 -04007165 /* Check the pmbox pointer first. There is a race condition
7166 * between the mbox timeout handler getting executed in the
7167 * worklist and the mailbox actually completing. When this
7168 * race condition occurs, the mbox_active will be NULL.
7169 */
7170 spin_lock_irq(&phba->hbalock);
7171 if (pmbox == NULL) {
7172 lpfc_printf_log(phba, KERN_WARNING,
7173 LOG_MBOX | LOG_SLI,
7174 "0353 Active Mailbox cleared - mailbox timeout "
7175 "exiting\n");
7176 spin_unlock_irq(&phba->hbalock);
7177 return;
7178 }
7179
dea31012005-04-17 16:05:31 -05007180 /* Mbox cmd <mbxCommand> timeout */
James Smarted957682007-06-17 19:56:37 -05007181 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007182 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
James Smart92d7f7b2007-06-17 19:56:38 -05007183 mb->mbxCommand,
7184 phba->pport->port_state,
7185 phba->sli.sli_flag,
7186 phba->sli.mbox_active);
James Smarta257bf92009-04-06 18:48:10 -04007187 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05007188
James Smart1dcb58e2007-04-25 09:51:30 -04007189 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7190 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007191 * it to fail all outstanding SCSI IO.
James Smart1dcb58e2007-04-25 09:51:30 -04007192 */
James Smart2e0fef82007-06-17 19:56:36 -05007193 spin_lock_irq(&phba->pport->work_port_lock);
7194 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7195 spin_unlock_irq(&phba->pport->work_port_lock);
7196 spin_lock_irq(&phba->hbalock);
7197 phba->link_state = LPFC_LINK_UNKNOWN;
James Smartf4b4c682009-05-22 14:53:12 -04007198 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007199 spin_unlock_irq(&phba->hbalock);
James Smart1dcb58e2007-04-25 09:51:30 -04007200
James Smartdb55fba2014-04-04 13:52:02 -04007201 lpfc_sli_abort_fcp_rings(phba);
James Smart1dcb58e2007-04-25 09:51:30 -04007202
7203 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04007204 "0345 Resetting board due to mailbox timeout\n");
James Smart3772a992009-05-22 14:50:54 -04007205
7206 /* Reset the HBA device */
7207 lpfc_reset_hba(phba);
dea31012005-04-17 16:05:31 -05007208}
7209
James Smarte59058c2008-08-24 21:49:00 -04007210/**
James Smart3772a992009-05-22 14:50:54 -04007211 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
James Smarte59058c2008-08-24 21:49:00 -04007212 * @phba: Pointer to HBA context object.
7213 * @pmbox: Pointer to mailbox object.
7214 * @flag: Flag indicating how the mailbox need to be processed.
7215 *
7216 * This function is called by discovery code and HBA management code
James Smart3772a992009-05-22 14:50:54 -04007217 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7218 * function gets the hbalock to protect the data structures.
James Smarte59058c2008-08-24 21:49:00 -04007219 * The mailbox command can be submitted in polling mode, in which case
7220 * this function will wait in a polling loop for the completion of the
7221 * mailbox.
7222 * If the mailbox is submitted in no_wait mode (not polling) the
7223 * function will submit the command and returns immediately without waiting
7224 * for the mailbox completion. The no_wait is supported only when HBA
7225 * is in SLI2/SLI3 mode - interrupts are enabled.
7226 * The SLI interface allows only one mailbox pending at a time. If the
7227 * mailbox is issued in polling mode and there is already a mailbox
7228 * pending, then the function will return an error. If the mailbox is issued
7229 * in NO_WAIT mode and there is a mailbox pending already, the function
7230 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7231 * The sli layer owns the mailbox object until the completion of mailbox
7232 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7233 * return codes the caller owns the mailbox command after the return of
7234 * the function.
7235 **/
James Smart3772a992009-05-22 14:50:54 -04007236static int
7237lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7238 uint32_t flag)
dea31012005-04-17 16:05:31 -05007239{
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007240 MAILBOX_t *mbx;
James Smart2e0fef82007-06-17 19:56:36 -05007241 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05007242 uint32_t status, evtctr;
James Smart9940b972011-03-11 16:06:12 -05007243 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05007244 int i;
James Smart09372822008-01-11 01:52:54 -05007245 unsigned long timeout;
dea31012005-04-17 16:05:31 -05007246 unsigned long drvr_flag = 0;
James Smart34b02dc2008-08-24 21:49:55 -04007247 uint32_t word0, ldata;
dea31012005-04-17 16:05:31 -05007248 void __iomem *to_slim;
James Smart58da1ff2008-04-07 10:15:56 -04007249 int processing_queue = 0;
7250
7251 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7252 if (!pmbox) {
James Smart8568a4d2009-07-19 10:01:16 -04007253 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart58da1ff2008-04-07 10:15:56 -04007254 /* processing mbox queue from intr_handler */
James Smart3772a992009-05-22 14:50:54 -04007255 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7256 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7257 return MBX_SUCCESS;
7258 }
James Smart58da1ff2008-04-07 10:15:56 -04007259 processing_queue = 1;
James Smart58da1ff2008-04-07 10:15:56 -04007260 pmbox = lpfc_mbox_get(phba);
7261 if (!pmbox) {
7262 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7263 return MBX_SUCCESS;
7264 }
7265 }
dea31012005-04-17 16:05:31 -05007266
James Smarted957682007-06-17 19:56:37 -05007267 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
James Smart92d7f7b2007-06-17 19:56:38 -05007268 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
James Smarted957682007-06-17 19:56:37 -05007269 if(!pmbox->vport) {
James Smart58da1ff2008-04-07 10:15:56 -04007270 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
James Smarted957682007-06-17 19:56:37 -05007271 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05007272 LOG_MBOX | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04007273 "1806 Mbox x%x failed. No vport\n",
James Smart3772a992009-05-22 14:50:54 -04007274 pmbox->u.mb.mbxCommand);
James Smarted957682007-06-17 19:56:37 -05007275 dump_stack();
James Smart58da1ff2008-04-07 10:15:56 -04007276 goto out_not_finished;
James Smarted957682007-06-17 19:56:37 -05007277 }
7278 }
7279
Linas Vepstas8d63f372007-02-14 14:28:36 -06007280 /* If the PCI channel is in offline state, do not post mbox. */
James Smart58da1ff2008-04-07 10:15:56 -04007281 if (unlikely(pci_channel_offline(phba->pcidev))) {
7282 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7283 goto out_not_finished;
7284 }
Linas Vepstas8d63f372007-02-14 14:28:36 -06007285
James Smarta257bf92009-04-06 18:48:10 -04007286 /* If HBA has a deferred error attention, fail the iocb. */
7287 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7288 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7289 goto out_not_finished;
7290 }
7291
dea31012005-04-17 16:05:31 -05007292 psli = &phba->sli;
James Smart92d7f7b2007-06-17 19:56:38 -05007293
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007294 mbx = &pmbox->u.mb;
dea31012005-04-17 16:05:31 -05007295 status = MBX_SUCCESS;
7296
James Smart2e0fef82007-06-17 19:56:36 -05007297 if (phba->link_state == LPFC_HBA_ERROR) {
7298 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05007299
7300 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007301 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7302 "(%d):0311 Mailbox command x%x cannot "
7303 "issue Data: x%x x%x\n",
7304 pmbox->vport ? pmbox->vport->vpi : 0,
7305 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007306 goto out_not_finished;
Jamie Wellnitz41415862006-02-28 19:25:27 -05007307 }
7308
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007309 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
James Smart9940b972011-03-11 16:06:12 -05007310 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7311 !(hc_copy & HC_MBINT_ENA)) {
7312 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7313 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart3772a992009-05-22 14:50:54 -04007314 "(%d):2528 Mailbox command x%x cannot "
7315 "issue Data: x%x x%x\n",
7316 pmbox->vport ? pmbox->vport->vpi : 0,
7317 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart9940b972011-03-11 16:06:12 -05007318 goto out_not_finished;
7319 }
James Smart92908312006-03-07 15:04:13 -05007320 }
7321
dea31012005-04-17 16:05:31 -05007322 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7323 /* Polling for a mbox command when another one is already active
7324 * is not allowed in SLI. Also, the driver must have established
7325 * SLI2 mode to queue and process multiple mbox commands.
7326 */
7327
7328 if (flag & MBX_POLL) {
James Smart2e0fef82007-06-17 19:56:36 -05007329 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007330
7331 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7333 "(%d):2529 Mailbox command x%x "
7334 "cannot issue Data: x%x x%x\n",
7335 pmbox->vport ? pmbox->vport->vpi : 0,
7336 pmbox->u.mb.mbxCommand,
7337 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007338 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007339 }
7340
James Smart3772a992009-05-22 14:50:54 -04007341 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
James Smart2e0fef82007-06-17 19:56:36 -05007342 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007343 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007344 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7345 "(%d):2530 Mailbox command x%x "
7346 "cannot issue Data: x%x x%x\n",
7347 pmbox->vport ? pmbox->vport->vpi : 0,
7348 pmbox->u.mb.mbxCommand,
7349 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007350 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007351 }
7352
dea31012005-04-17 16:05:31 -05007353 /* Another mailbox command is still being processed, queue this
7354 * command to be processed later.
7355 */
7356 lpfc_mbox_put(phba, pmbox);
7357
7358 /* Mbox cmd issue - BUSY */
James Smarted957682007-06-17 19:56:37 -05007359 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007360 "(%d):0308 Mbox cmd issue - BUSY Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05007361 "x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05007362 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007363 mbx->mbxCommand, phba->pport->port_state,
James Smart92d7f7b2007-06-17 19:56:38 -05007364 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05007365
7366 psli->slistat.mbox_busy++;
James Smart2e0fef82007-06-17 19:56:36 -05007367 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007368
James Smart858c9f62007-06-17 19:56:39 -05007369 if (pmbox->vport) {
7370 lpfc_debugfs_disc_trc(pmbox->vport,
7371 LPFC_DISC_TRC_MBOX_VPORT,
7372 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007373 (uint32_t)mbx->mbxCommand,
7374 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007375 }
7376 else {
7377 lpfc_debugfs_disc_trc(phba->pport,
7378 LPFC_DISC_TRC_MBOX,
7379 "MBOX Bsy: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007380 (uint32_t)mbx->mbxCommand,
7381 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007382 }
7383
James Smart2e0fef82007-06-17 19:56:36 -05007384 return MBX_BUSY;
dea31012005-04-17 16:05:31 -05007385 }
7386
dea31012005-04-17 16:05:31 -05007387 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7388
7389 /* If we are not polling, we MUST be in SLI2 mode */
7390 if (flag != MBX_POLL) {
James Smart3772a992009-05-22 14:50:54 -04007391 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007392 (mbx->mbxCommand != MBX_KILL_BOARD)) {
dea31012005-04-17 16:05:31 -05007393 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007394 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05007395 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04007396 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7397 "(%d):2531 Mailbox command x%x "
7398 "cannot issue Data: x%x x%x\n",
7399 pmbox->vport ? pmbox->vport->vpi : 0,
7400 pmbox->u.mb.mbxCommand,
7401 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04007402 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007403 }
7404 /* timeout active mbox command */
James Smart256ec0d2013-04-17 20:14:58 -04007405 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7406 1000);
7407 mod_timer(&psli->mbox_tmo, jiffies + timeout);
dea31012005-04-17 16:05:31 -05007408 }
7409
7410 /* Mailbox cmd <cmd> issue */
James Smarted957682007-06-17 19:56:37 -05007411 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04007412 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05007413 "x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04007414 pmbox->vport ? pmbox->vport->vpi : 0,
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007415 mbx->mbxCommand, phba->pport->port_state,
James Smart92d7f7b2007-06-17 19:56:38 -05007416 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05007417
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007418 if (mbx->mbxCommand != MBX_HEARTBEAT) {
James Smart858c9f62007-06-17 19:56:39 -05007419 if (pmbox->vport) {
7420 lpfc_debugfs_disc_trc(pmbox->vport,
7421 LPFC_DISC_TRC_MBOX_VPORT,
7422 "MBOX Send vport: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007423 (uint32_t)mbx->mbxCommand,
7424 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007425 }
7426 else {
7427 lpfc_debugfs_disc_trc(phba->pport,
7428 LPFC_DISC_TRC_MBOX,
7429 "MBOX Send: cmd:x%x mb:x%x x%x",
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007430 (uint32_t)mbx->mbxCommand,
7431 mbx->un.varWords[0], mbx->un.varWords[1]);
James Smart858c9f62007-06-17 19:56:39 -05007432 }
7433 }
7434
dea31012005-04-17 16:05:31 -05007435 psli->slistat.mbox_cmd++;
7436 evtctr = psli->slistat.mbox_event;
7437
7438 /* next set own bit for the adapter and copy over command word */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007439 mbx->mbxOwner = OWN_CHIP;
dea31012005-04-17 16:05:31 -05007440
James Smart3772a992009-05-22 14:50:54 -04007441 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart7a470272010-03-15 11:25:20 -04007442 /* Populate mbox extension offset word. */
7443 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007444 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04007445 = (uint8_t *)phba->mbox_ext
7446 - (uint8_t *)phba->mbox;
7447 }
7448
7449 /* Copy the mailbox extension data */
7450 if (pmbox->in_ext_byte_len && pmbox->context2) {
7451 lpfc_sli_pcimem_bcopy(pmbox->context2,
7452 (uint8_t *)phba->mbox_ext,
7453 pmbox->in_ext_byte_len);
7454 }
7455 /* Copy command data to host SLIM area */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007456 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05007457 } else {
James Smart7a470272010-03-15 11:25:20 -04007458 /* Populate mbox extension offset word. */
7459 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007460 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
James Smart7a470272010-03-15 11:25:20 -04007461 = MAILBOX_HBA_EXT_OFFSET;
7462
7463 /* Copy the mailbox extension data */
James Smart895427b2017-02-12 13:52:30 -08007464 if (pmbox->in_ext_byte_len && pmbox->context2)
James Smart7a470272010-03-15 11:25:20 -04007465 lpfc_memcpy_to_slim(phba->MBslimaddr +
7466 MAILBOX_HBA_EXT_OFFSET,
7467 pmbox->context2, pmbox->in_ext_byte_len);
7468
James Smart895427b2017-02-12 13:52:30 -08007469 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05007470 /* copy command data into host mbox for cmpl */
James Smart895427b2017-02-12 13:52:30 -08007471 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7472 MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05007473
7474 /* First copy mbox command data to HBA SLIM, skip past first
7475 word */
7476 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007477 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
dea31012005-04-17 16:05:31 -05007478 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7479
7480 /* Next copy over first word, with mbxOwner set */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007481 ldata = *((uint32_t *)mbx);
dea31012005-04-17 16:05:31 -05007482 to_slim = phba->MBslimaddr;
7483 writel(ldata, to_slim);
7484 readl(to_slim); /* flush */
7485
James Smart895427b2017-02-12 13:52:30 -08007486 if (mbx->mbxCommand == MBX_CONFIG_PORT)
dea31012005-04-17 16:05:31 -05007487 /* switch over to host mailbox */
James Smart3772a992009-05-22 14:50:54 -04007488 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05007489 }
7490
7491 wmb();
dea31012005-04-17 16:05:31 -05007492
7493 switch (flag) {
7494 case MBX_NOWAIT:
James Smart09372822008-01-11 01:52:54 -05007495 /* Set up reference to mailbox command */
dea31012005-04-17 16:05:31 -05007496 psli->mbox_active = pmbox;
James Smart09372822008-01-11 01:52:54 -05007497 /* Interrupt board to do it */
7498 writel(CA_MBATT, phba->CAregaddr);
7499 readl(phba->CAregaddr); /* flush */
7500 /* Don't wait for it to finish, just return */
dea31012005-04-17 16:05:31 -05007501 break;
7502
7503 case MBX_POLL:
James Smart09372822008-01-11 01:52:54 -05007504 /* Set up null reference to mailbox command */
dea31012005-04-17 16:05:31 -05007505 psli->mbox_active = NULL;
James Smart09372822008-01-11 01:52:54 -05007506 /* Interrupt board to do it */
7507 writel(CA_MBATT, phba->CAregaddr);
7508 readl(phba->CAregaddr); /* flush */
7509
James Smart3772a992009-05-22 14:50:54 -04007510 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007511 /* First read mbox status word */
James Smart34b02dc2008-08-24 21:49:55 -04007512 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05007513 word0 = le32_to_cpu(word0);
7514 } else {
7515 /* First read mbox status word */
James Smart9940b972011-03-11 16:06:12 -05007516 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7517 spin_unlock_irqrestore(&phba->hbalock,
7518 drvr_flag);
7519 goto out_not_finished;
7520 }
dea31012005-04-17 16:05:31 -05007521 }
7522
7523 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05007524 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7525 spin_unlock_irqrestore(&phba->hbalock,
7526 drvr_flag);
7527 goto out_not_finished;
7528 }
James Smarta183a152011-10-10 21:32:43 -04007529 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7530 1000) + jiffies;
James Smart09372822008-01-11 01:52:54 -05007531 i = 0;
dea31012005-04-17 16:05:31 -05007532 /* Wait for command to complete */
Jamie Wellnitz41415862006-02-28 19:25:27 -05007533 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7534 (!(ha_copy & HA_MBATT) &&
James Smart2e0fef82007-06-17 19:56:36 -05007535 (phba->link_state > LPFC_WARM_START))) {
James Smart09372822008-01-11 01:52:54 -05007536 if (time_after(jiffies, timeout)) {
dea31012005-04-17 16:05:31 -05007537 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05007538 spin_unlock_irqrestore(&phba->hbalock,
dea31012005-04-17 16:05:31 -05007539 drvr_flag);
James Smart58da1ff2008-04-07 10:15:56 -04007540 goto out_not_finished;
dea31012005-04-17 16:05:31 -05007541 }
7542
7543 /* Check if we took a mbox interrupt while we were
7544 polling */
7545 if (((word0 & OWN_CHIP) != OWN_CHIP)
7546 && (evtctr != psli->slistat.mbox_event))
7547 break;
7548
James Smart09372822008-01-11 01:52:54 -05007549 if (i++ > 10) {
7550 spin_unlock_irqrestore(&phba->hbalock,
7551 drvr_flag);
7552 msleep(1);
7553 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7554 }
dea31012005-04-17 16:05:31 -05007555
James Smart3772a992009-05-22 14:50:54 -04007556 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007557 /* First copy command data */
James Smart34b02dc2008-08-24 21:49:55 -04007558 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05007559 word0 = le32_to_cpu(word0);
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007560 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
dea31012005-04-17 16:05:31 -05007561 MAILBOX_t *slimmb;
James Smart34b02dc2008-08-24 21:49:55 -04007562 uint32_t slimword0;
dea31012005-04-17 16:05:31 -05007563 /* Check real SLIM for any errors */
7564 slimword0 = readl(phba->MBslimaddr);
7565 slimmb = (MAILBOX_t *) & slimword0;
7566 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7567 && slimmb->mbxStatus) {
7568 psli->sli_flag &=
James Smart3772a992009-05-22 14:50:54 -04007569 ~LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05007570 word0 = slimword0;
7571 }
7572 }
7573 } else {
7574 /* First copy command data */
7575 word0 = readl(phba->MBslimaddr);
7576 }
7577 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05007578 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7579 spin_unlock_irqrestore(&phba->hbalock,
7580 drvr_flag);
7581 goto out_not_finished;
7582 }
dea31012005-04-17 16:05:31 -05007583 }
7584
James Smart3772a992009-05-22 14:50:54 -04007585 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05007586 /* copy results back to user */
James Smart2ea259e2017-02-12 13:52:27 -08007587 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7588 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04007589 /* Copy the mailbox extension data */
7590 if (pmbox->out_ext_byte_len && pmbox->context2) {
7591 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7592 pmbox->context2,
7593 pmbox->out_ext_byte_len);
7594 }
dea31012005-04-17 16:05:31 -05007595 } else {
7596 /* First copy command data */
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007597 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
James Smart2ea259e2017-02-12 13:52:27 -08007598 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04007599 /* Copy the mailbox extension data */
7600 if (pmbox->out_ext_byte_len && pmbox->context2) {
7601 lpfc_memcpy_from_slim(pmbox->context2,
7602 phba->MBslimaddr +
7603 MAILBOX_HBA_EXT_OFFSET,
7604 pmbox->out_ext_byte_len);
dea31012005-04-17 16:05:31 -05007605 }
7606 }
7607
7608 writel(HA_MBATT, phba->HAregaddr);
7609 readl(phba->HAregaddr); /* flush */
7610
7611 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
Randy Dunlapbf07bde2013-02-22 10:23:54 -08007612 status = mbx->mbxStatus;
dea31012005-04-17 16:05:31 -05007613 }
7614
James Smart2e0fef82007-06-17 19:56:36 -05007615 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7616 return status;
James Smart58da1ff2008-04-07 10:15:56 -04007617
7618out_not_finished:
7619 if (processing_queue) {
James Smartda0436e2009-05-22 14:51:39 -04007620 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
James Smart58da1ff2008-04-07 10:15:56 -04007621 lpfc_mbox_cmpl_put(phba, pmbox);
7622 }
7623 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05007624}
7625
James Smarte59058c2008-08-24 21:49:00 -04007626/**
James Smartf1126682009-06-10 17:22:44 -04007627 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7628 * @phba: Pointer to HBA context object.
7629 *
7630 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7631 * the driver internal pending mailbox queue. It will then try to wait out the
7632 * possible outstanding mailbox command before return.
7633 *
7634 * Returns:
7635 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7636 * the outstanding mailbox command timed out.
7637 **/
7638static int
7639lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7640{
7641 struct lpfc_sli *psli = &phba->sli;
James Smartf1126682009-06-10 17:22:44 -04007642 int rc = 0;
James Smarta183a152011-10-10 21:32:43 -04007643 unsigned long timeout = 0;
James Smartf1126682009-06-10 17:22:44 -04007644
7645 /* Mark the asynchronous mailbox command posting as blocked */
7646 spin_lock_irq(&phba->hbalock);
7647 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smartf1126682009-06-10 17:22:44 -04007648 /* Determine how long we might wait for the active mailbox
7649 * command to be gracefully completed by firmware.
7650 */
James Smarta183a152011-10-10 21:32:43 -04007651 if (phba->sli.mbox_active)
7652 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7653 phba->sli.mbox_active) *
7654 1000) + jiffies;
7655 spin_unlock_irq(&phba->hbalock);
7656
James Smarte8d3c3b2013-10-10 12:21:30 -04007657 /* Make sure the mailbox is really active */
7658 if (timeout)
7659 lpfc_sli4_process_missed_mbox_completions(phba);
7660
James Smartf1126682009-06-10 17:22:44 -04007661 /* Wait for the outstnading mailbox command to complete */
7662 while (phba->sli.mbox_active) {
7663 /* Check active mailbox complete status every 2ms */
7664 msleep(2);
7665 if (time_after(jiffies, timeout)) {
7666 /* Timeout, marked the outstanding cmd not complete */
7667 rc = 1;
7668 break;
7669 }
7670 }
7671
7672 /* Can not cleanly block async mailbox command, fails it */
7673 if (rc) {
7674 spin_lock_irq(&phba->hbalock);
7675 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7676 spin_unlock_irq(&phba->hbalock);
7677 }
7678 return rc;
7679}
7680
7681/**
7682 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7683 * @phba: Pointer to HBA context object.
7684 *
7685 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7686 * commands from the driver internal pending mailbox queue. It makes sure
7687 * that there is no outstanding mailbox command before resuming posting
7688 * asynchronous mailbox commands. If, for any reason, there is outstanding
7689 * mailbox command, it will try to wait it out before resuming asynchronous
7690 * mailbox command posting.
7691 **/
7692static void
7693lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7694{
7695 struct lpfc_sli *psli = &phba->sli;
7696
7697 spin_lock_irq(&phba->hbalock);
7698 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7699 /* Asynchronous mailbox posting is not blocked, do nothing */
7700 spin_unlock_irq(&phba->hbalock);
7701 return;
7702 }
7703
7704 /* Outstanding synchronous mailbox command is guaranteed to be done,
7705 * successful or timeout, after timing-out the outstanding mailbox
7706 * command shall always be removed, so just unblock posting async
7707 * mailbox command and resume
7708 */
7709 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7710 spin_unlock_irq(&phba->hbalock);
7711
7712 /* wake up worker thread to post asynchronlous mailbox command */
7713 lpfc_worker_wake_up(phba);
7714}
7715
7716/**
James Smart2d843ed2012-09-29 11:29:06 -04007717 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7718 * @phba: Pointer to HBA context object.
7719 * @mboxq: Pointer to mailbox object.
7720 *
7721 * The function waits for the bootstrap mailbox register ready bit from
7722 * port for twice the regular mailbox command timeout value.
7723 *
7724 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7725 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7726 **/
7727static int
7728lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7729{
7730 uint32_t db_ready;
7731 unsigned long timeout;
7732 struct lpfc_register bmbx_reg;
7733
7734 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7735 * 1000) + jiffies;
7736
7737 do {
7738 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7739 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7740 if (!db_ready)
7741 msleep(2);
7742
7743 if (time_after(jiffies, timeout))
7744 return MBXERR_ERROR;
7745 } while (!db_ready);
7746
7747 return 0;
7748}
7749
7750/**
James Smartda0436e2009-05-22 14:51:39 -04007751 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7752 * @phba: Pointer to HBA context object.
7753 * @mboxq: Pointer to mailbox object.
7754 *
7755 * The function posts a mailbox to the port. The mailbox is expected
7756 * to be comletely filled in and ready for the port to operate on it.
7757 * This routine executes a synchronous completion operation on the
7758 * mailbox by polling for its completion.
7759 *
7760 * The caller must not be holding any locks when calling this routine.
7761 *
7762 * Returns:
7763 * MBX_SUCCESS - mailbox posted successfully
7764 * Any of the MBX error values.
7765 **/
7766static int
7767lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7768{
7769 int rc = MBX_SUCCESS;
7770 unsigned long iflag;
James Smartda0436e2009-05-22 14:51:39 -04007771 uint32_t mcqe_status;
7772 uint32_t mbx_cmnd;
James Smartda0436e2009-05-22 14:51:39 -04007773 struct lpfc_sli *psli = &phba->sli;
7774 struct lpfc_mqe *mb = &mboxq->u.mqe;
7775 struct lpfc_bmbx_create *mbox_rgn;
7776 struct dma_address *dma_address;
James Smartda0436e2009-05-22 14:51:39 -04007777
7778 /*
7779 * Only one mailbox can be active to the bootstrap mailbox region
7780 * at a time and there is no queueing provided.
7781 */
7782 spin_lock_irqsave(&phba->hbalock, iflag);
7783 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7784 spin_unlock_irqrestore(&phba->hbalock, iflag);
7785 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007786 "(%d):2532 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04007787 "cannot issue Data: x%x x%x\n",
7788 mboxq->vport ? mboxq->vport->vpi : 0,
7789 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007790 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7791 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007792 psli->sli_flag, MBX_POLL);
7793 return MBXERR_ERROR;
7794 }
7795 /* The server grabs the token and owns it until release */
7796 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7797 phba->sli.mbox_active = mboxq;
7798 spin_unlock_irqrestore(&phba->hbalock, iflag);
7799
James Smart2d843ed2012-09-29 11:29:06 -04007800 /* wait for bootstrap mbox register for readyness */
7801 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7802 if (rc)
7803 goto exit;
7804
James Smartda0436e2009-05-22 14:51:39 -04007805 /*
7806 * Initialize the bootstrap memory region to avoid stale data areas
7807 * in the mailbox post. Then copy the caller's mailbox contents to
7808 * the bmbx mailbox region.
7809 */
7810 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7811 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7812 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7813 sizeof(struct lpfc_mqe));
7814
7815 /* Post the high mailbox dma address to the port and wait for ready. */
7816 dma_address = &phba->sli4_hba.bmbx.dma_address;
7817 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7818
James Smart2d843ed2012-09-29 11:29:06 -04007819 /* wait for bootstrap mbox register for hi-address write done */
7820 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7821 if (rc)
7822 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04007823
7824 /* Post the low mailbox dma address to the port. */
7825 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
James Smartda0436e2009-05-22 14:51:39 -04007826
James Smart2d843ed2012-09-29 11:29:06 -04007827 /* wait for bootstrap mbox register for low address write done */
7828 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7829 if (rc)
7830 goto exit;
James Smartda0436e2009-05-22 14:51:39 -04007831
7832 /*
7833 * Read the CQ to ensure the mailbox has completed.
7834 * If so, update the mailbox status so that the upper layers
7835 * can complete the request normally.
7836 */
7837 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7838 sizeof(struct lpfc_mqe));
7839 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7840 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7841 sizeof(struct lpfc_mcqe));
7842 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
James Smart05580562011-05-24 11:40:48 -04007843 /*
7844 * When the CQE status indicates a failure and the mailbox status
7845 * indicates success then copy the CQE status into the mailbox status
7846 * (and prefix it with x4000).
7847 */
James Smartda0436e2009-05-22 14:51:39 -04007848 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
James Smart05580562011-05-24 11:40:48 -04007849 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7850 bf_set(lpfc_mqe_status, mb,
7851 (LPFC_MBX_ERROR_RANGE | mcqe_status));
James Smartda0436e2009-05-22 14:51:39 -04007852 rc = MBXERR_ERROR;
James Smartd7c47992010-06-08 18:31:54 -04007853 } else
7854 lpfc_sli4_swap_str(phba, mboxq);
James Smartda0436e2009-05-22 14:51:39 -04007855
7856 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007857 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
James Smartda0436e2009-05-22 14:51:39 -04007858 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7859 " x%x x%x CQ: x%x x%x x%x x%x\n",
James Smarta183a152011-10-10 21:32:43 -04007860 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7861 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7862 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007863 bf_get(lpfc_mqe_status, mb),
7864 mb->un.mb_words[0], mb->un.mb_words[1],
7865 mb->un.mb_words[2], mb->un.mb_words[3],
7866 mb->un.mb_words[4], mb->un.mb_words[5],
7867 mb->un.mb_words[6], mb->un.mb_words[7],
7868 mb->un.mb_words[8], mb->un.mb_words[9],
7869 mb->un.mb_words[10], mb->un.mb_words[11],
7870 mb->un.mb_words[12], mboxq->mcqe.word0,
7871 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7872 mboxq->mcqe.trailer);
7873exit:
7874 /* We are holding the token, no needed for lock when release */
7875 spin_lock_irqsave(&phba->hbalock, iflag);
7876 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7877 phba->sli.mbox_active = NULL;
7878 spin_unlock_irqrestore(&phba->hbalock, iflag);
7879 return rc;
7880}
7881
7882/**
7883 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7884 * @phba: Pointer to HBA context object.
7885 * @pmbox: Pointer to mailbox object.
7886 * @flag: Flag indicating how the mailbox need to be processed.
7887 *
7888 * This function is called by discovery code and HBA management code to submit
7889 * a mailbox command to firmware with SLI-4 interface spec.
7890 *
7891 * Return codes the caller owns the mailbox command after the return of the
7892 * function.
7893 **/
7894static int
7895lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7896 uint32_t flag)
7897{
7898 struct lpfc_sli *psli = &phba->sli;
7899 unsigned long iflags;
7900 int rc;
7901
James Smartb76f2dc2011-07-22 18:37:42 -04007902 /* dump from issue mailbox command if setup */
7903 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7904
James Smart8fa38512009-07-19 10:01:03 -04007905 rc = lpfc_mbox_dev_check(phba);
7906 if (unlikely(rc)) {
7907 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007908 "(%d):2544 Mailbox command x%x (x%x/x%x) "
James Smart8fa38512009-07-19 10:01:03 -04007909 "cannot issue Data: x%x x%x\n",
7910 mboxq->vport ? mboxq->vport->vpi : 0,
7911 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007912 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7913 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smart8fa38512009-07-19 10:01:03 -04007914 psli->sli_flag, flag);
7915 goto out_not_finished;
7916 }
7917
James Smartda0436e2009-05-22 14:51:39 -04007918 /* Detect polling mode and jump to a handler */
7919 if (!phba->sli4_hba.intr_enable) {
7920 if (flag == MBX_POLL)
7921 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7922 else
7923 rc = -EIO;
7924 if (rc != MBX_SUCCESS)
James Smart05580562011-05-24 11:40:48 -04007925 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
James Smartda0436e2009-05-22 14:51:39 -04007926 "(%d):2541 Mailbox command x%x "
James Smartcc459f12012-05-09 21:18:30 -04007927 "(x%x/x%x) failure: "
7928 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7929 "Data: x%x x%x\n,",
James Smartda0436e2009-05-22 14:51:39 -04007930 mboxq->vport ? mboxq->vport->vpi : 0,
7931 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007932 lpfc_sli_config_mbox_subsys_get(phba,
7933 mboxq),
7934 lpfc_sli_config_mbox_opcode_get(phba,
7935 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04007936 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7937 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7938 bf_get(lpfc_mcqe_ext_status,
7939 &mboxq->mcqe),
James Smartda0436e2009-05-22 14:51:39 -04007940 psli->sli_flag, flag);
7941 return rc;
7942 } else if (flag == MBX_POLL) {
James Smartf1126682009-06-10 17:22:44 -04007943 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7944 "(%d):2542 Try to issue mailbox command "
James Smarta183a152011-10-10 21:32:43 -04007945 "x%x (x%x/x%x) synchronously ahead of async"
James Smartf1126682009-06-10 17:22:44 -04007946 "mailbox command queue: x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04007947 mboxq->vport ? mboxq->vport->vpi : 0,
7948 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007949 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7950 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007951 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04007952 /* Try to block the asynchronous mailbox posting */
7953 rc = lpfc_sli4_async_mbox_block(phba);
7954 if (!rc) {
7955 /* Successfully blocked, now issue sync mbox cmd */
7956 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7957 if (rc != MBX_SUCCESS)
James Smartcc459f12012-05-09 21:18:30 -04007958 lpfc_printf_log(phba, KERN_WARNING,
James Smarta183a152011-10-10 21:32:43 -04007959 LOG_MBOX | LOG_SLI,
James Smartcc459f12012-05-09 21:18:30 -04007960 "(%d):2597 Sync Mailbox command "
7961 "x%x (x%x/x%x) failure: "
7962 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7963 "Data: x%x x%x\n,",
7964 mboxq->vport ? mboxq->vport->vpi : 0,
James Smarta183a152011-10-10 21:32:43 -04007965 mboxq->u.mb.mbxCommand,
7966 lpfc_sli_config_mbox_subsys_get(phba,
7967 mboxq),
7968 lpfc_sli_config_mbox_opcode_get(phba,
7969 mboxq),
James Smartcc459f12012-05-09 21:18:30 -04007970 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7971 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7972 bf_get(lpfc_mcqe_ext_status,
7973 &mboxq->mcqe),
James Smarta183a152011-10-10 21:32:43 -04007974 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04007975 /* Unblock the async mailbox posting afterward */
7976 lpfc_sli4_async_mbox_unblock(phba);
7977 }
7978 return rc;
James Smartda0436e2009-05-22 14:51:39 -04007979 }
7980
7981 /* Now, interrupt mode asynchrous mailbox command */
7982 rc = lpfc_mbox_cmd_check(phba, mboxq);
7983 if (rc) {
7984 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007985 "(%d):2543 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04007986 "cannot issue Data: x%x x%x\n",
7987 mboxq->vport ? mboxq->vport->vpi : 0,
7988 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007989 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7990 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007991 psli->sli_flag, flag);
7992 goto out_not_finished;
7993 }
James Smartda0436e2009-05-22 14:51:39 -04007994
7995 /* Put the mailbox command to the driver internal FIFO */
7996 psli->slistat.mbox_busy++;
7997 spin_lock_irqsave(&phba->hbalock, iflags);
7998 lpfc_mbox_put(phba, mboxq);
7999 spin_unlock_irqrestore(&phba->hbalock, iflags);
8000 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8001 "(%d):0354 Mbox cmd issue - Enqueue Data: "
James Smarta183a152011-10-10 21:32:43 -04008002 "x%x (x%x/x%x) x%x x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04008003 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8004 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
James Smarta183a152011-10-10 21:32:43 -04008005 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8006 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008007 phba->pport->port_state,
8008 psli->sli_flag, MBX_NOWAIT);
8009 /* Wake up worker thread to transport mailbox command from head */
8010 lpfc_worker_wake_up(phba);
8011
8012 return MBX_BUSY;
8013
8014out_not_finished:
8015 return MBX_NOT_FINISHED;
8016}
8017
8018/**
8019 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8020 * @phba: Pointer to HBA context object.
8021 *
8022 * This function is called by worker thread to send a mailbox command to
8023 * SLI4 HBA firmware.
8024 *
8025 **/
8026int
8027lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8028{
8029 struct lpfc_sli *psli = &phba->sli;
8030 LPFC_MBOXQ_t *mboxq;
8031 int rc = MBX_SUCCESS;
8032 unsigned long iflags;
8033 struct lpfc_mqe *mqe;
8034 uint32_t mbx_cmnd;
8035
8036 /* Check interrupt mode before post async mailbox command */
8037 if (unlikely(!phba->sli4_hba.intr_enable))
8038 return MBX_NOT_FINISHED;
8039
8040 /* Check for mailbox command service token */
8041 spin_lock_irqsave(&phba->hbalock, iflags);
8042 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8043 spin_unlock_irqrestore(&phba->hbalock, iflags);
8044 return MBX_NOT_FINISHED;
8045 }
8046 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8047 spin_unlock_irqrestore(&phba->hbalock, iflags);
8048 return MBX_NOT_FINISHED;
8049 }
8050 if (unlikely(phba->sli.mbox_active)) {
8051 spin_unlock_irqrestore(&phba->hbalock, iflags);
8052 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8053 "0384 There is pending active mailbox cmd\n");
8054 return MBX_NOT_FINISHED;
8055 }
8056 /* Take the mailbox command service token */
8057 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8058
8059 /* Get the next mailbox command from head of queue */
8060 mboxq = lpfc_mbox_get(phba);
8061
8062 /* If no more mailbox command waiting for post, we're done */
8063 if (!mboxq) {
8064 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8065 spin_unlock_irqrestore(&phba->hbalock, iflags);
8066 return MBX_SUCCESS;
8067 }
8068 phba->sli.mbox_active = mboxq;
8069 spin_unlock_irqrestore(&phba->hbalock, iflags);
8070
8071 /* Check device readiness for posting mailbox command */
8072 rc = lpfc_mbox_dev_check(phba);
8073 if (unlikely(rc))
8074 /* Driver clean routine will clean up pending mailbox */
8075 goto out_not_finished;
8076
8077 /* Prepare the mbox command to be posted */
8078 mqe = &mboxq->u.mqe;
8079 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8080
8081 /* Start timer for the mbox_tmo and log some mailbox post messages */
8082 mod_timer(&psli->mbox_tmo, (jiffies +
James Smart256ec0d2013-04-17 20:14:58 -04008083 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
James Smartda0436e2009-05-22 14:51:39 -04008084
8085 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008086 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
James Smartda0436e2009-05-22 14:51:39 -04008087 "x%x x%x\n",
8088 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
James Smarta183a152011-10-10 21:32:43 -04008089 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8090 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008091 phba->pport->port_state, psli->sli_flag);
8092
8093 if (mbx_cmnd != MBX_HEARTBEAT) {
8094 if (mboxq->vport) {
8095 lpfc_debugfs_disc_trc(mboxq->vport,
8096 LPFC_DISC_TRC_MBOX_VPORT,
8097 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8098 mbx_cmnd, mqe->un.mb_words[0],
8099 mqe->un.mb_words[1]);
8100 } else {
8101 lpfc_debugfs_disc_trc(phba->pport,
8102 LPFC_DISC_TRC_MBOX,
8103 "MBOX Send: cmd:x%x mb:x%x x%x",
8104 mbx_cmnd, mqe->un.mb_words[0],
8105 mqe->un.mb_words[1]);
8106 }
8107 }
8108 psli->slistat.mbox_cmd++;
8109
8110 /* Post the mailbox command to the port */
8111 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8112 if (rc != MBX_SUCCESS) {
8113 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04008114 "(%d):2533 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04008115 "cannot issue Data: x%x x%x\n",
8116 mboxq->vport ? mboxq->vport->vpi : 0,
8117 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04008118 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8119 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04008120 psli->sli_flag, MBX_NOWAIT);
8121 goto out_not_finished;
8122 }
8123
8124 return rc;
8125
8126out_not_finished:
8127 spin_lock_irqsave(&phba->hbalock, iflags);
James Smartd7069f02012-03-01 22:36:29 -05008128 if (phba->sli.mbox_active) {
8129 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8130 __lpfc_mbox_cmpl_put(phba, mboxq);
8131 /* Release the token */
8132 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8133 phba->sli.mbox_active = NULL;
8134 }
James Smartda0436e2009-05-22 14:51:39 -04008135 spin_unlock_irqrestore(&phba->hbalock, iflags);
8136
8137 return MBX_NOT_FINISHED;
8138}
8139
8140/**
8141 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8142 * @phba: Pointer to HBA context object.
8143 * @pmbox: Pointer to mailbox object.
8144 * @flag: Flag indicating how the mailbox need to be processed.
8145 *
8146 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8147 * the API jump table function pointer from the lpfc_hba struct.
8148 *
8149 * Return codes the caller owns the mailbox command after the return of the
8150 * function.
8151 **/
8152int
8153lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8154{
8155 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8156}
8157
8158/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008159 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
James Smartda0436e2009-05-22 14:51:39 -04008160 * @phba: The hba struct for which this call is being executed.
8161 * @dev_grp: The HBA PCI-Device group number.
8162 *
8163 * This routine sets up the mbox interface API function jump table in @phba
8164 * struct.
8165 * Returns: 0 - success, -ENODEV - failure.
8166 **/
8167int
8168lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8169{
8170
8171 switch (dev_grp) {
8172 case LPFC_PCI_DEV_LP:
8173 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8174 phba->lpfc_sli_handle_slow_ring_event =
8175 lpfc_sli_handle_slow_ring_event_s3;
8176 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8177 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8178 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8179 break;
8180 case LPFC_PCI_DEV_OC:
8181 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8182 phba->lpfc_sli_handle_slow_ring_event =
8183 lpfc_sli_handle_slow_ring_event_s4;
8184 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8185 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8186 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8187 break;
8188 default:
8189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8190 "1420 Invalid HBA PCI-device group: 0x%x\n",
8191 dev_grp);
8192 return -ENODEV;
8193 break;
8194 }
8195 return 0;
8196}
8197
8198/**
James Smart3621a712009-04-06 18:47:14 -04008199 * __lpfc_sli_ringtx_put - Add an iocb to the txq
James Smarte59058c2008-08-24 21:49:00 -04008200 * @phba: Pointer to HBA context object.
8201 * @pring: Pointer to driver SLI ring object.
8202 * @piocb: Pointer to address of newly added command iocb.
8203 *
8204 * This function is called with hbalock held to add a command
8205 * iocb to the txq when SLI layer cannot submit the command iocb
8206 * to the ring.
8207 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04008208void
James Smart92d7f7b2007-06-17 19:56:38 -05008209__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05008210 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05008211{
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008212 lockdep_assert_held(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008213 /* Insert the caller's iocb in the txq tail for later processing. */
8214 list_add_tail(&piocb->list, &pring->txq);
dea31012005-04-17 16:05:31 -05008215}
8216
James Smarte59058c2008-08-24 21:49:00 -04008217/**
James Smart3621a712009-04-06 18:47:14 -04008218 * lpfc_sli_next_iocb - Get the next iocb in the txq
James Smarte59058c2008-08-24 21:49:00 -04008219 * @phba: Pointer to HBA context object.
8220 * @pring: Pointer to driver SLI ring object.
8221 * @piocb: Pointer to address of newly added command iocb.
8222 *
8223 * This function is called with hbalock held before a new
8224 * iocb is submitted to the firmware. This function checks
8225 * txq to flush the iocbs in txq to Firmware before
8226 * submitting new iocbs to the Firmware.
8227 * If there are iocbs in the txq which need to be submitted
8228 * to firmware, lpfc_sli_next_iocb returns the first element
8229 * of the txq after dequeuing it from txq.
8230 * If there is no iocb in the txq then the function will return
8231 * *piocb and *piocb is set to NULL. Caller needs to check
8232 * *piocb to find if there are more commands in the txq.
8233 **/
dea31012005-04-17 16:05:31 -05008234static struct lpfc_iocbq *
8235lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05008236 struct lpfc_iocbq **piocb)
dea31012005-04-17 16:05:31 -05008237{
8238 struct lpfc_iocbq * nextiocb;
8239
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008240 lockdep_assert_held(&phba->hbalock);
8241
dea31012005-04-17 16:05:31 -05008242 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8243 if (!nextiocb) {
8244 nextiocb = *piocb;
8245 *piocb = NULL;
8246 }
8247
8248 return nextiocb;
8249}
8250
James Smarte59058c2008-08-24 21:49:00 -04008251/**
James Smart3772a992009-05-22 14:50:54 -04008252 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04008253 * @phba: Pointer to HBA context object.
James Smart3772a992009-05-22 14:50:54 -04008254 * @ring_number: SLI ring number to issue iocb on.
James Smarte59058c2008-08-24 21:49:00 -04008255 * @piocb: Pointer to command iocb.
8256 * @flag: Flag indicating if this command can be put into txq.
8257 *
James Smart3772a992009-05-22 14:50:54 -04008258 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8259 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8260 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8261 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8262 * this function allows only iocbs for posting buffers. This function finds
8263 * next available slot in the command ring and posts the command to the
8264 * available slot and writes the port attention register to request HBA start
8265 * processing new iocb. If there is no slot available in the ring and
8266 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8267 * the function returns IOCB_BUSY.
James Smarte59058c2008-08-24 21:49:00 -04008268 *
James Smart3772a992009-05-22 14:50:54 -04008269 * This function is called with hbalock held. The function will return success
8270 * after it successfully submit the iocb to firmware or after adding to the
8271 * txq.
James Smarte59058c2008-08-24 21:49:00 -04008272 **/
James Smart98c9ea52007-10-27 13:37:33 -04008273static int
James Smart3772a992009-05-22 14:50:54 -04008274__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea31012005-04-17 16:05:31 -05008275 struct lpfc_iocbq *piocb, uint32_t flag)
8276{
8277 struct lpfc_iocbq *nextiocb;
8278 IOCB_t *iocb;
James Smart895427b2017-02-12 13:52:30 -08008279 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -05008280
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01008281 lockdep_assert_held(&phba->hbalock);
8282
James Smart92d7f7b2007-06-17 19:56:38 -05008283 if (piocb->iocb_cmpl && (!piocb->vport) &&
8284 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8285 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8286 lpfc_printf_log(phba, KERN_ERR,
8287 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04008288 "1807 IOCB x%x failed. No vport\n",
James Smart92d7f7b2007-06-17 19:56:38 -05008289 piocb->iocb.ulpCommand);
8290 dump_stack();
8291 return IOCB_ERROR;
8292 }
8293
8294
Linas Vepstas8d63f372007-02-14 14:28:36 -06008295 /* If the PCI channel is in offline state, do not post iocbs. */
8296 if (unlikely(pci_channel_offline(phba->pcidev)))
8297 return IOCB_ERROR;
8298
James Smarta257bf92009-04-06 18:48:10 -04008299 /* If HBA has a deferred error attention, fail the iocb. */
8300 if (unlikely(phba->hba_flag & DEFER_ERATT))
8301 return IOCB_ERROR;
8302
dea31012005-04-17 16:05:31 -05008303 /*
8304 * We should never get an IOCB if we are in a < LINK_DOWN state
8305 */
James Smart2e0fef82007-06-17 19:56:36 -05008306 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea31012005-04-17 16:05:31 -05008307 return IOCB_ERROR;
8308
8309 /*
8310 * Check to see if we are blocking IOCB processing because of a
James Smart0b727fe2007-10-27 13:37:25 -04008311 * outstanding event.
dea31012005-04-17 16:05:31 -05008312 */
James Smart0b727fe2007-10-27 13:37:25 -04008313 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea31012005-04-17 16:05:31 -05008314 goto iocb_busy;
8315
James Smart2e0fef82007-06-17 19:56:36 -05008316 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea31012005-04-17 16:05:31 -05008317 /*
James Smart2680eea2007-04-25 09:52:55 -04008318 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea31012005-04-17 16:05:31 -05008319 * can be issued if the link is not up.
8320 */
8321 switch (piocb->iocb.ulpCommand) {
James Smart84774a42008-08-24 21:50:06 -04008322 case CMD_GEN_REQUEST64_CR:
8323 case CMD_GEN_REQUEST64_CX:
8324 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8325 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
James Smart6a9c52c2009-10-02 15:16:51 -04008326 FC_RCTL_DD_UNSOL_CMD) ||
James Smart84774a42008-08-24 21:50:06 -04008327 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8328 MENLO_TRANSPORT_TYPE))
8329
8330 goto iocb_busy;
8331 break;
dea31012005-04-17 16:05:31 -05008332 case CMD_QUE_RING_BUF_CN:
8333 case CMD_QUE_RING_BUF64_CN:
dea31012005-04-17 16:05:31 -05008334 /*
8335 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8336 * completion, iocb_cmpl MUST be 0.
8337 */
8338 if (piocb->iocb_cmpl)
8339 piocb->iocb_cmpl = NULL;
8340 /*FALLTHROUGH*/
8341 case CMD_CREATE_XRI_CR:
James Smart2680eea2007-04-25 09:52:55 -04008342 case CMD_CLOSE_XRI_CN:
8343 case CMD_CLOSE_XRI_CX:
dea31012005-04-17 16:05:31 -05008344 break;
8345 default:
8346 goto iocb_busy;
8347 }
8348
8349 /*
8350 * For FCP commands, we must be in a state where we can process link
8351 * attention events.
8352 */
James Smart895427b2017-02-12 13:52:30 -08008353 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
James Smart92d7f7b2007-06-17 19:56:38 -05008354 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea31012005-04-17 16:05:31 -05008355 goto iocb_busy;
James Smart92d7f7b2007-06-17 19:56:38 -05008356 }
dea31012005-04-17 16:05:31 -05008357
dea31012005-04-17 16:05:31 -05008358 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8359 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8360 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8361
8362 if (iocb)
8363 lpfc_sli_update_ring(phba, pring);
8364 else
8365 lpfc_sli_update_full_ring(phba, pring);
8366
8367 if (!piocb)
8368 return IOCB_SUCCESS;
8369
8370 goto out_busy;
8371
8372 iocb_busy:
8373 pring->stats.iocb_cmd_delay++;
8374
8375 out_busy:
8376
8377 if (!(flag & SLI_IOCB_RET_IOCB)) {
James Smart92d7f7b2007-06-17 19:56:38 -05008378 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea31012005-04-17 16:05:31 -05008379 return IOCB_SUCCESS;
8380 }
8381
8382 return IOCB_BUSY;
8383}
8384
James Smart3772a992009-05-22 14:50:54 -04008385/**
James Smart4f774512009-05-22 14:52:35 -04008386 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8387 * @phba: Pointer to HBA context object.
8388 * @piocb: Pointer to command iocb.
8389 * @sglq: Pointer to the scatter gather queue object.
8390 *
8391 * This routine converts the bpl or bde that is in the IOCB
8392 * to a sgl list for the sli4 hardware. The physical address
8393 * of the bpl/bde is converted back to a virtual address.
8394 * If the IOCB contains a BPL then the list of BDE's is
8395 * converted to sli4_sge's. If the IOCB contains a single
8396 * BDE then it is converted to a single sli_sge.
8397 * The IOCB is still in cpu endianess so the contents of
8398 * the bpl can be used without byte swapping.
8399 *
8400 * Returns valid XRI = Success, NO_XRI = Failure.
8401**/
8402static uint16_t
8403lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8404 struct lpfc_sglq *sglq)
8405{
8406 uint16_t xritag = NO_XRI;
8407 struct ulp_bde64 *bpl = NULL;
8408 struct ulp_bde64 bde;
8409 struct sli4_sge *sgl = NULL;
James Smart1b511972011-12-13 13:23:09 -05008410 struct lpfc_dmabuf *dmabuf;
James Smart4f774512009-05-22 14:52:35 -04008411 IOCB_t *icmd;
8412 int numBdes = 0;
8413 int i = 0;
James Smart63e801c2010-11-20 23:14:19 -05008414 uint32_t offset = 0; /* accumulated offset in the sg request list */
8415 int inbound = 0; /* number of sg reply entries inbound from firmware */
James Smart4f774512009-05-22 14:52:35 -04008416
8417 if (!piocbq || !sglq)
8418 return xritag;
8419
8420 sgl = (struct sli4_sge *)sglq->sgl;
8421 icmd = &piocbq->iocb;
James Smart6b5151f2012-01-18 16:24:06 -05008422 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8423 return sglq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04008424 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8425 numBdes = icmd->un.genreq64.bdl.bdeSize /
8426 sizeof(struct ulp_bde64);
8427 /* The addrHigh and addrLow fields within the IOCB
8428 * have not been byteswapped yet so there is no
8429 * need to swap them back.
8430 */
James Smart1b511972011-12-13 13:23:09 -05008431 if (piocbq->context3)
8432 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8433 else
8434 return xritag;
James Smart4f774512009-05-22 14:52:35 -04008435
James Smart1b511972011-12-13 13:23:09 -05008436 bpl = (struct ulp_bde64 *)dmabuf->virt;
James Smart4f774512009-05-22 14:52:35 -04008437 if (!bpl)
8438 return xritag;
8439
8440 for (i = 0; i < numBdes; i++) {
8441 /* Should already be byte swapped. */
James Smart28baac72010-02-12 14:42:03 -05008442 sgl->addr_hi = bpl->addrHigh;
8443 sgl->addr_lo = bpl->addrLow;
8444
James Smart05580562011-05-24 11:40:48 -04008445 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008446 if ((i+1) == numBdes)
8447 bf_set(lpfc_sli4_sge_last, sgl, 1);
8448 else
8449 bf_set(lpfc_sli4_sge_last, sgl, 0);
James Smart28baac72010-02-12 14:42:03 -05008450 /* swap the size field back to the cpu so we
8451 * can assign it to the sgl.
8452 */
8453 bde.tus.w = le32_to_cpu(bpl->tus.w);
8454 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
James Smart63e801c2010-11-20 23:14:19 -05008455 /* The offsets in the sgl need to be accumulated
8456 * separately for the request and reply lists.
8457 * The request is always first, the reply follows.
8458 */
8459 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8460 /* add up the reply sg entries */
8461 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8462 inbound++;
8463 /* first inbound? reset the offset */
8464 if (inbound == 1)
8465 offset = 0;
8466 bf_set(lpfc_sli4_sge_offset, sgl, offset);
James Smartf9bb2da2011-10-10 21:34:11 -04008467 bf_set(lpfc_sli4_sge_type, sgl,
8468 LPFC_SGE_TYPE_DATA);
James Smart63e801c2010-11-20 23:14:19 -05008469 offset += bde.tus.f.bdeSize;
8470 }
James Smart546fc852011-03-11 16:06:29 -05008471 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008472 bpl++;
8473 sgl++;
8474 }
8475 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8476 /* The addrHigh and addrLow fields of the BDE have not
8477 * been byteswapped yet so they need to be swapped
8478 * before putting them in the sgl.
8479 */
8480 sgl->addr_hi =
8481 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8482 sgl->addr_lo =
8483 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
James Smart05580562011-05-24 11:40:48 -04008484 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04008485 bf_set(lpfc_sli4_sge_last, sgl, 1);
8486 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart28baac72010-02-12 14:42:03 -05008487 sgl->sge_len =
8488 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
James Smart4f774512009-05-22 14:52:35 -04008489 }
8490 return sglq->sli4_xritag;
8491}
8492
8493/**
James Smart4f774512009-05-22 14:52:35 -04008494 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8495 * @phba: Pointer to HBA context object.
8496 * @piocb: Pointer to command iocb.
8497 * @wqe: Pointer to the work queue entry.
8498 *
8499 * This routine converts the iocb command to its Work Queue Entry
8500 * equivalent. The wqe pointer should not have any fields set when
8501 * this routine is called because it will memcpy over them.
8502 * This routine does not set the CQ_ID or the WQEC bits in the
8503 * wqe.
8504 *
8505 * Returns: 0 = Success, IOCB_ERROR = Failure.
8506 **/
8507static int
8508lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8509 union lpfc_wqe *wqe)
8510{
James Smart5ffc2662009-11-18 15:39:44 -05008511 uint32_t xmit_len = 0, total_len = 0;
James Smart4f774512009-05-22 14:52:35 -04008512 uint8_t ct = 0;
8513 uint32_t fip;
8514 uint32_t abort_tag;
8515 uint8_t command_type = ELS_COMMAND_NON_FIP;
8516 uint8_t cmnd;
8517 uint16_t xritag;
James Smartdcf2a4e2010-09-29 11:18:53 -04008518 uint16_t abrt_iotag;
8519 struct lpfc_iocbq *abrtiocbq;
James Smart4f774512009-05-22 14:52:35 -04008520 struct ulp_bde64 *bpl = NULL;
James Smartf0d9bcc2010-10-22 11:07:09 -04008521 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
James Smart5ffc2662009-11-18 15:39:44 -05008522 int numBdes, i;
8523 struct ulp_bde64 bde;
James Smartc31098c2011-04-16 11:03:33 -04008524 struct lpfc_nodelist *ndlp;
James Smartff78d8f2011-12-13 13:21:35 -05008525 uint32_t *pcmd;
James Smart1b511972011-12-13 13:23:09 -05008526 uint32_t if_type;
James Smart4f774512009-05-22 14:52:35 -04008527
James Smart45ed1192009-10-02 15:17:02 -04008528 fip = phba->hba_flag & HBA_FIP_SUPPORT;
James Smart4f774512009-05-22 14:52:35 -04008529 /* The fcp commands will set command type */
James Smart0c287582009-06-10 17:22:56 -04008530 if (iocbq->iocb_flag & LPFC_IO_FCP)
James Smart4f774512009-05-22 14:52:35 -04008531 command_type = FCP_COMMAND;
James Smartc8685952009-11-18 15:39:16 -05008532 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
James Smart0c287582009-06-10 17:22:56 -04008533 command_type = ELS_COMMAND_FIP;
8534 else
8535 command_type = ELS_COMMAND_NON_FIP;
8536
James Smartb5c53952016-03-31 14:12:30 -07008537 if (phba->fcp_embed_io)
8538 memset(wqe, 0, sizeof(union lpfc_wqe128));
James Smart4f774512009-05-22 14:52:35 -04008539 /* Some of the fields are in the right position already */
8540 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
James Smartf0d9bcc2010-10-22 11:07:09 -04008541 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
James Smart28d7f3d2014-05-21 08:05:28 -04008542 wqe->generic.wqe_com.word10 = 0;
James Smartb5c53952016-03-31 14:12:30 -07008543
8544 abort_tag = (uint32_t) iocbq->iotag;
8545 xritag = iocbq->sli4_xritag;
James Smart4f774512009-05-22 14:52:35 -04008546 /* words0-2 bpl convert bde */
8547 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
James Smart5ffc2662009-11-18 15:39:44 -05008548 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8549 sizeof(struct ulp_bde64);
James Smart4f774512009-05-22 14:52:35 -04008550 bpl = (struct ulp_bde64 *)
8551 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8552 if (!bpl)
8553 return IOCB_ERROR;
8554
8555 /* Should already be byte swapped. */
8556 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8557 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8558 /* swap the size field back to the cpu so we
8559 * can assign it to the sgl.
8560 */
8561 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
James Smart5ffc2662009-11-18 15:39:44 -05008562 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8563 total_len = 0;
8564 for (i = 0; i < numBdes; i++) {
8565 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8566 total_len += bde.tus.f.bdeSize;
8567 }
James Smart4f774512009-05-22 14:52:35 -04008568 } else
James Smart5ffc2662009-11-18 15:39:44 -05008569 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
James Smart4f774512009-05-22 14:52:35 -04008570
8571 iocbq->iocb.ulpIoTag = iocbq->iotag;
8572 cmnd = iocbq->iocb.ulpCommand;
8573
8574 switch (iocbq->iocb.ulpCommand) {
8575 case CMD_ELS_REQUEST64_CR:
James Smart93d1379e2012-05-09 21:19:34 -04008576 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8577 ndlp = iocbq->context_un.ndlp;
8578 else
8579 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04008580 if (!iocbq->iocb.ulpLe) {
8581 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8582 "2007 Only Limited Edition cmd Format"
8583 " supported 0x%x\n",
8584 iocbq->iocb.ulpCommand);
8585 return IOCB_ERROR;
8586 }
James Smartff78d8f2011-12-13 13:21:35 -05008587
James Smart5ffc2662009-11-18 15:39:44 -05008588 wqe->els_req.payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008589 /* Els_reguest64 has a TMO */
8590 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8591 iocbq->iocb.ulpTimeout);
8592 /* Need a VF for word 4 set the vf bit*/
8593 bf_set(els_req64_vf, &wqe->els_req, 0);
8594 /* And a VFID for word 12 */
8595 bf_set(els_req64_vfid, &wqe->els_req, 0);
James Smart4f774512009-05-22 14:52:35 -04008596 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
James Smartf0d9bcc2010-10-22 11:07:09 -04008597 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8598 iocbq->iocb.ulpContext);
8599 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8600 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04008601 /* CCP CCPE PV PRI in word10 were set in the memcpy */
James Smartff78d8f2011-12-13 13:21:35 -05008602 if (command_type == ELS_COMMAND_FIP)
James Smartc8685952009-11-18 15:39:16 -05008603 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8604 >> LPFC_FIP_ELS_ID_SHIFT);
James Smartff78d8f2011-12-13 13:21:35 -05008605 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8606 iocbq->context2)->virt);
James Smart1b511972011-12-13 13:23:09 -05008607 if_type = bf_get(lpfc_sli_intf_if_type,
8608 &phba->sli4_hba.sli_intf);
8609 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
James Smartff78d8f2011-12-13 13:21:35 -05008610 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
James Smartcb69f7d2011-12-13 13:21:57 -05008611 *pcmd == ELS_CMD_SCR ||
James Smart6b5151f2012-01-18 16:24:06 -05008612 *pcmd == ELS_CMD_FDISC ||
James Smartbdcd2b92012-03-01 22:33:52 -05008613 *pcmd == ELS_CMD_LOGO ||
James Smartff78d8f2011-12-13 13:21:35 -05008614 *pcmd == ELS_CMD_PLOGI)) {
8615 bf_set(els_req64_sp, &wqe->els_req, 1);
8616 bf_set(els_req64_sid, &wqe->els_req,
8617 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04008618 if ((*pcmd == ELS_CMD_FLOGI) &&
8619 !(phba->fc_topology ==
8620 LPFC_TOPOLOGY_LOOP))
8621 bf_set(els_req64_sid, &wqe->els_req, 0);
James Smartff78d8f2011-12-13 13:21:35 -05008622 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8623 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
James Smarta7dd9c02012-05-09 21:16:50 -04008624 phba->vpi_ids[iocbq->vport->vpi]);
James Smart3ef6d242012-01-18 16:23:48 -05008625 } else if (pcmd && iocbq->context1) {
James Smartff78d8f2011-12-13 13:21:35 -05008626 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8627 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8628 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8629 }
James Smartc8685952009-11-18 15:39:16 -05008630 }
James Smart6d368e52011-05-24 11:44:12 -04008631 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8632 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04008633 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8634 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8635 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8636 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8637 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8638 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04008639 wqe->els_req.max_response_payload_len = total_len - xmit_len;
James Smart7851fe22011-07-22 18:36:52 -04008640 break;
James Smart5ffc2662009-11-18 15:39:44 -05008641 case CMD_XMIT_SEQUENCE64_CX:
James Smartf0d9bcc2010-10-22 11:07:09 -04008642 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8643 iocbq->iocb.un.ulpWord[3]);
8644 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04008645 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart5ffc2662009-11-18 15:39:44 -05008646 /* The entire sequence is transmitted for this IOCB */
8647 xmit_len = total_len;
8648 cmnd = CMD_XMIT_SEQUENCE64_CR;
James Smart1b511972011-12-13 13:23:09 -05008649 if (phba->link_flag & LS_LOOPBACK_MODE)
8650 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
James Smart4f774512009-05-22 14:52:35 -04008651 case CMD_XMIT_SEQUENCE64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04008652 /* word3 iocb=io_tag32 wqe=reserved */
8653 wqe->xmit_sequence.rsvd3 = 0;
James Smart4f774512009-05-22 14:52:35 -04008654 /* word4 relative_offset memcpy */
8655 /* word5 r_ctl/df_ctl memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04008656 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8657 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8658 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8659 LPFC_WQE_IOD_WRITE);
8660 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8661 LPFC_WQE_LENLOC_WORD12);
8662 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
James Smart5ffc2662009-11-18 15:39:44 -05008663 wqe->xmit_sequence.xmit_len = xmit_len;
8664 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04008665 break;
James Smart4f774512009-05-22 14:52:35 -04008666 case CMD_XMIT_BCAST64_CN:
James Smartf0d9bcc2010-10-22 11:07:09 -04008667 /* word3 iocb=iotag32 wqe=seq_payload_len */
8668 wqe->xmit_bcast64.seq_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008669 /* word4 iocb=rsvd wqe=rsvd */
8670 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8671 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
James Smartf0d9bcc2010-10-22 11:07:09 -04008672 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04008673 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
James Smartf0d9bcc2010-10-22 11:07:09 -04008674 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8675 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8676 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8677 LPFC_WQE_LENLOC_WORD3);
8678 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04008679 break;
James Smart4f774512009-05-22 14:52:35 -04008680 case CMD_FCP_IWRITE64_CR:
8681 command_type = FCP_COMMAND_DATA_OUT;
James Smartf0d9bcc2010-10-22 11:07:09 -04008682 /* word3 iocb=iotag wqe=payload_offset_len */
8683 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04008684 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8685 xmit_len + sizeof(struct fcp_rsp));
8686 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8687 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008688 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8689 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8690 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8691 iocbq->iocb.ulpFCP2Rcvy);
8692 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8693 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008694 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8695 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8696 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04008697 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05008698 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05008699 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8700 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008701 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8702 if (iocbq->priority) {
8703 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8704 (iocbq->priority << 1));
8705 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008706 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8707 (phba->cfg_XLanePriority << 1));
8708 }
8709 }
James Smartb5c53952016-03-31 14:12:30 -07008710 /* Note, word 10 is already initialized to 0 */
8711
8712 if (phba->fcp_embed_io) {
8713 struct lpfc_scsi_buf *lpfc_cmd;
8714 struct sli4_sge *sgl;
8715 union lpfc_wqe128 *wqe128;
8716 struct fcp_cmnd *fcp_cmnd;
8717 uint32_t *ptr;
8718
8719 /* 128 byte wqe support here */
8720 wqe128 = (union lpfc_wqe128 *)wqe;
8721
8722 lpfc_cmd = iocbq->context1;
8723 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8724 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8725
8726 /* Word 0-2 - FCP_CMND */
8727 wqe128->generic.bde.tus.f.bdeFlags =
8728 BUFF_TYPE_BDE_IMMED;
8729 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8730 wqe128->generic.bde.addrHigh = 0;
8731 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8732
8733 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8734
8735 /* Word 22-29 FCP CMND Payload */
8736 ptr = &wqe128->words[22];
8737 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8738 }
James Smart7851fe22011-07-22 18:36:52 -04008739 break;
James Smartf0d9bcc2010-10-22 11:07:09 -04008740 case CMD_FCP_IREAD64_CR:
8741 /* word3 iocb=iotag wqe=payload_offset_len */
8742 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
James Smart0ba4b212013-10-10 12:22:38 -04008743 bf_set(payload_offset_len, &wqe->fcp_iread,
8744 xmit_len + sizeof(struct fcp_rsp));
8745 bf_set(cmd_buff_len, &wqe->fcp_iread,
8746 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008747 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8748 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8749 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8750 iocbq->iocb.ulpFCP2Rcvy);
8751 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
James Smart4f774512009-05-22 14:52:35 -04008752 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008753 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8754 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8755 LPFC_WQE_LENLOC_WORD4);
James Smartf0d9bcc2010-10-22 11:07:09 -04008756 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
James Smartacd68592012-01-18 16:25:09 -05008757 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
James Smart1ba981f2014-02-20 09:56:45 -05008758 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8759 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008760 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8761 if (iocbq->priority) {
8762 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8763 (iocbq->priority << 1));
8764 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008765 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8766 (phba->cfg_XLanePriority << 1));
8767 }
8768 }
James Smartb5c53952016-03-31 14:12:30 -07008769 /* Note, word 10 is already initialized to 0 */
8770
8771 if (phba->fcp_embed_io) {
8772 struct lpfc_scsi_buf *lpfc_cmd;
8773 struct sli4_sge *sgl;
8774 union lpfc_wqe128 *wqe128;
8775 struct fcp_cmnd *fcp_cmnd;
8776 uint32_t *ptr;
8777
8778 /* 128 byte wqe support here */
8779 wqe128 = (union lpfc_wqe128 *)wqe;
8780
8781 lpfc_cmd = iocbq->context1;
8782 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8783 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8784
8785 /* Word 0-2 - FCP_CMND */
8786 wqe128->generic.bde.tus.f.bdeFlags =
8787 BUFF_TYPE_BDE_IMMED;
8788 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8789 wqe128->generic.bde.addrHigh = 0;
8790 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8791
8792 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8793
8794 /* Word 22-29 FCP CMND Payload */
8795 ptr = &wqe128->words[22];
8796 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8797 }
James Smart7851fe22011-07-22 18:36:52 -04008798 break;
James Smartf1126682009-06-10 17:22:44 -04008799 case CMD_FCP_ICMND64_CR:
James Smart0ba4b212013-10-10 12:22:38 -04008800 /* word3 iocb=iotag wqe=payload_offset_len */
8801 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8802 bf_set(payload_offset_len, &wqe->fcp_icmd,
8803 xmit_len + sizeof(struct fcp_rsp));
8804 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8805 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04008806 /* word3 iocb=IO_TAG wqe=reserved */
James Smartf0d9bcc2010-10-22 11:07:09 -04008807 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
James Smartf1126682009-06-10 17:22:44 -04008808 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04008809 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8810 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8811 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8812 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8813 LPFC_WQE_LENLOC_NONE);
James Smart2a94aea2012-09-29 11:30:31 -04008814 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8815 iocbq->iocb.ulpFCP2Rcvy);
James Smart1ba981f2014-02-20 09:56:45 -05008816 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8817 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
James Smartc92c8412016-07-06 12:36:05 -07008818 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8819 if (iocbq->priority) {
8820 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8821 (iocbq->priority << 1));
8822 } else {
James Smart1ba981f2014-02-20 09:56:45 -05008823 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8824 (phba->cfg_XLanePriority << 1));
8825 }
8826 }
James Smartb5c53952016-03-31 14:12:30 -07008827 /* Note, word 10 is already initialized to 0 */
8828
8829 if (phba->fcp_embed_io) {
8830 struct lpfc_scsi_buf *lpfc_cmd;
8831 struct sli4_sge *sgl;
8832 union lpfc_wqe128 *wqe128;
8833 struct fcp_cmnd *fcp_cmnd;
8834 uint32_t *ptr;
8835
8836 /* 128 byte wqe support here */
8837 wqe128 = (union lpfc_wqe128 *)wqe;
8838
8839 lpfc_cmd = iocbq->context1;
8840 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8841 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8842
8843 /* Word 0-2 - FCP_CMND */
8844 wqe128->generic.bde.tus.f.bdeFlags =
8845 BUFF_TYPE_BDE_IMMED;
8846 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8847 wqe128->generic.bde.addrHigh = 0;
8848 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8849
8850 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8851
8852 /* Word 22-29 FCP CMND Payload */
8853 ptr = &wqe128->words[22];
8854 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8855 }
James Smart7851fe22011-07-22 18:36:52 -04008856 break;
James Smart4f774512009-05-22 14:52:35 -04008857 case CMD_GEN_REQUEST64_CR:
James Smart63e801c2010-11-20 23:14:19 -05008858 /* For this command calculate the xmit length of the
8859 * request bde.
8860 */
8861 xmit_len = 0;
8862 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8863 sizeof(struct ulp_bde64);
8864 for (i = 0; i < numBdes; i++) {
James Smart63e801c2010-11-20 23:14:19 -05008865 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
James Smart546fc852011-03-11 16:06:29 -05008866 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8867 break;
James Smart63e801c2010-11-20 23:14:19 -05008868 xmit_len += bde.tus.f.bdeSize;
8869 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008870 /* word3 iocb=IO_TAG wqe=request_payload_len */
8871 wqe->gen_req.request_payload_len = xmit_len;
8872 /* word4 iocb=parameter wqe=relative_offset memcpy */
8873 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
James Smart4f774512009-05-22 14:52:35 -04008874 /* word6 context tag copied in memcpy */
8875 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8876 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8878 "2015 Invalid CT %x command 0x%x\n",
8879 ct, iocbq->iocb.ulpCommand);
8880 return IOCB_ERROR;
8881 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008882 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8883 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8884 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8885 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8886 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8887 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8888 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8889 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
James Smartaf227412013-10-10 12:23:10 -04008890 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
James Smart4f774512009-05-22 14:52:35 -04008891 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04008892 break;
James Smart4f774512009-05-22 14:52:35 -04008893 case CMD_XMIT_ELS_RSP64_CX:
James Smartc31098c2011-04-16 11:03:33 -04008894 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04008895 /* words0-2 BDE memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04008896 /* word3 iocb=iotag32 wqe=response_payload_len */
8897 wqe->xmit_els_rsp.response_payload_len = xmit_len;
James Smart939723a2012-05-09 21:19:03 -04008898 /* word4 */
8899 wqe->xmit_els_rsp.word4 = 0;
James Smart4f774512009-05-22 14:52:35 -04008900 /* word5 iocb=rsvd wge=did */
8901 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
James Smart939723a2012-05-09 21:19:03 -04008902 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8903
8904 if_type = bf_get(lpfc_sli_intf_if_type,
8905 &phba->sli4_hba.sli_intf);
8906 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8907 if (iocbq->vport->fc_flag & FC_PT2PT) {
8908 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8909 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8910 iocbq->vport->fc_myDID);
8911 if (iocbq->vport->fc_myDID == Fabric_DID) {
8912 bf_set(wqe_els_did,
8913 &wqe->xmit_els_rsp.wqe_dest, 0);
8914 }
8915 }
8916 }
James Smartf0d9bcc2010-10-22 11:07:09 -04008917 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8918 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8919 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8920 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04008921 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart4f774512009-05-22 14:52:35 -04008922 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
James Smartf0d9bcc2010-10-22 11:07:09 -04008923 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smart6d368e52011-05-24 11:44:12 -04008924 phba->vpi_ids[iocbq->vport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04008925 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8926 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8927 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8928 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8929 LPFC_WQE_LENLOC_WORD3);
8930 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
James Smart6d368e52011-05-24 11:44:12 -04008931 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8932 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartff78d8f2011-12-13 13:21:35 -05008933 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8934 iocbq->context2)->virt);
8935 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
James Smart939723a2012-05-09 21:19:03 -04008936 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8937 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
James Smartff78d8f2011-12-13 13:21:35 -05008938 iocbq->vport->fc_myDID);
James Smart939723a2012-05-09 21:19:03 -04008939 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8940 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smartff78d8f2011-12-13 13:21:35 -05008941 phba->vpi_ids[phba->pport->vpi]);
8942 }
James Smart4f774512009-05-22 14:52:35 -04008943 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04008944 break;
James Smart4f774512009-05-22 14:52:35 -04008945 case CMD_CLOSE_XRI_CN:
8946 case CMD_ABORT_XRI_CN:
8947 case CMD_ABORT_XRI_CX:
8948 /* words 0-2 memcpy should be 0 rserved */
8949 /* port will send abts */
James Smartdcf2a4e2010-09-29 11:18:53 -04008950 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8951 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8952 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8953 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8954 } else
8955 fip = 0;
8956
8957 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
James Smart4f774512009-05-22 14:52:35 -04008958 /*
James Smartdcf2a4e2010-09-29 11:18:53 -04008959 * The link is down, or the command was ELS_FIP
8960 * so the fw does not need to send abts
James Smart4f774512009-05-22 14:52:35 -04008961 * on the wire.
8962 */
8963 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8964 else
8965 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8966 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
James Smartf0d9bcc2010-10-22 11:07:09 -04008967 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8968 wqe->abort_cmd.rsrvd5 = 0;
8969 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04008970 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8971 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
James Smart4f774512009-05-22 14:52:35 -04008972 /*
8973 * The abort handler will send us CMD_ABORT_XRI_CN or
8974 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8975 */
James Smartf0d9bcc2010-10-22 11:07:09 -04008976 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8977 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8978 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8979 LPFC_WQE_LENLOC_NONE);
James Smart4f774512009-05-22 14:52:35 -04008980 cmnd = CMD_ABORT_XRI_CX;
8981 command_type = OTHER_COMMAND;
8982 xritag = 0;
James Smart7851fe22011-07-22 18:36:52 -04008983 break;
James Smart6669f9b2009-10-02 15:16:45 -04008984 case CMD_XMIT_BLS_RSP64_CX:
James Smart6b5151f2012-01-18 16:24:06 -05008985 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart546fc852011-03-11 16:06:29 -05008986 /* As BLS ABTS RSP WQE is very different from other WQEs,
James Smart6669f9b2009-10-02 15:16:45 -04008987 * we re-construct this WQE here based on information in
8988 * iocbq from scratch.
8989 */
8990 memset(wqe, 0, sizeof(union lpfc_wqe));
James Smart5ffc2662009-11-18 15:39:44 -05008991 /* OX_ID is invariable to who sent ABTS to CT exchange */
James Smart6669f9b2009-10-02 15:16:45 -04008992 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05008993 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8994 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
James Smart5ffc2662009-11-18 15:39:44 -05008995 LPFC_ABTS_UNSOL_INT) {
8996 /* ABTS sent by initiator to CT exchange, the
8997 * RX_ID field will be filled with the newly
8998 * allocated responder XRI.
8999 */
9000 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9001 iocbq->sli4_xritag);
9002 } else {
9003 /* ABTS sent by responder to CT exchange, the
9004 * RX_ID field will be filled with the responder
9005 * RX_ID from ABTS.
9006 */
9007 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05009008 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
James Smart5ffc2662009-11-18 15:39:44 -05009009 }
James Smart6669f9b2009-10-02 15:16:45 -04009010 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9011 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
James Smart6b5151f2012-01-18 16:24:06 -05009012
9013 /* Use CT=VPI */
9014 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9015 ndlp->nlp_DID);
9016 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9017 iocbq->iocb.ulpContext);
9018 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
James Smart6669f9b2009-10-02 15:16:45 -04009019 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
James Smart6b5151f2012-01-18 16:24:06 -05009020 phba->vpi_ids[phba->pport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04009021 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9022 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9023 LPFC_WQE_LENLOC_NONE);
James Smart6669f9b2009-10-02 15:16:45 -04009024 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9025 command_type = OTHER_COMMAND;
James Smart546fc852011-03-11 16:06:29 -05009026 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9027 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9028 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9029 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9030 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9031 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9032 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9033 }
9034
James Smart7851fe22011-07-22 18:36:52 -04009035 break;
James Smart4f774512009-05-22 14:52:35 -04009036 case CMD_XRI_ABORTED_CX:
9037 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
James Smart4f774512009-05-22 14:52:35 -04009038 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9039 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9040 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9041 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9042 default:
9043 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9044 "2014 Invalid command 0x%x\n",
9045 iocbq->iocb.ulpCommand);
9046 return IOCB_ERROR;
James Smart7851fe22011-07-22 18:36:52 -04009047 break;
James Smart4f774512009-05-22 14:52:35 -04009048 }
James Smart6d368e52011-05-24 11:44:12 -04009049
James Smart8012cc32012-10-31 14:44:49 -04009050 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9051 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9052 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9053 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9054 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9055 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9056 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9057 LPFC_IO_DIF_INSERT);
James Smartf0d9bcc2010-10-22 11:07:09 -04009058 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9059 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9060 wqe->generic.wqe_com.abort_tag = abort_tag;
9061 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9062 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9063 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9064 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smart4f774512009-05-22 14:52:35 -04009065 return 0;
9066}
9067
9068/**
9069 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9070 * @phba: Pointer to HBA context object.
9071 * @ring_number: SLI ring number to issue iocb on.
9072 * @piocb: Pointer to command iocb.
9073 * @flag: Flag indicating if this command can be put into txq.
9074 *
9075 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9076 * an iocb command to an HBA with SLI-4 interface spec.
9077 *
9078 * This function is called with hbalock held. The function will return success
9079 * after it successfully submit the iocb to firmware or after adding to the
9080 * txq.
9081 **/
9082static int
9083__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9084 struct lpfc_iocbq *piocb, uint32_t flag)
9085{
9086 struct lpfc_sglq *sglq;
James Smartb5c53952016-03-31 14:12:30 -07009087 union lpfc_wqe *wqe;
9088 union lpfc_wqe128 wqe128;
James Smart1ba981f2014-02-20 09:56:45 -05009089 struct lpfc_queue *wq;
James Smart895427b2017-02-12 13:52:30 -08009090 struct lpfc_sli_ring *pring;
James Smart4f774512009-05-22 14:52:35 -04009091
James Smart895427b2017-02-12 13:52:30 -08009092 /* Get the WQ */
9093 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9094 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9095 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9096 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9097 else
9098 wq = phba->sli4_hba.oas_wq;
9099 } else {
9100 wq = phba->sli4_hba.els_wq;
9101 }
9102
9103 /* Get corresponding ring */
9104 pring = wq->pring;
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +01009105
James Smartb5c53952016-03-31 14:12:30 -07009106 /*
9107 * The WQE can be either 64 or 128 bytes,
9108 * so allocate space on the stack assuming the largest.
9109 */
9110 wqe = (union lpfc_wqe *)&wqe128;
9111
James Smart895427b2017-02-12 13:52:30 -08009112 lockdep_assert_held(&phba->hbalock);
9113
James Smart4f774512009-05-22 14:52:35 -04009114 if (piocb->sli4_xritag == NO_XRI) {
9115 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
James Smart6b5151f2012-01-18 16:24:06 -05009116 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
James Smart4f774512009-05-22 14:52:35 -04009117 sglq = NULL;
9118 else {
James Smart0e9bb8d2013-03-01 16:35:12 -05009119 if (!list_empty(&pring->txq)) {
James Smart2a9bf3d2010-06-07 15:24:45 -04009120 if (!(flag & SLI_IOCB_RET_IOCB)) {
9121 __lpfc_sli_ringtx_put(phba,
9122 pring, piocb);
9123 return IOCB_SUCCESS;
9124 } else {
9125 return IOCB_BUSY;
9126 }
9127 } else {
James Smart895427b2017-02-12 13:52:30 -08009128 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04009129 if (!sglq) {
9130 if (!(flag & SLI_IOCB_RET_IOCB)) {
9131 __lpfc_sli_ringtx_put(phba,
9132 pring,
9133 piocb);
9134 return IOCB_SUCCESS;
9135 } else
9136 return IOCB_BUSY;
9137 }
9138 }
James Smart4f774512009-05-22 14:52:35 -04009139 }
James Smart2ea259e2017-02-12 13:52:27 -08009140 } else if (piocb->iocb_flag & LPFC_IO_FCP)
James Smart6d368e52011-05-24 11:44:12 -04009141 /* These IO's already have an XRI and a mapped sgl. */
9142 sglq = NULL;
James Smart2ea259e2017-02-12 13:52:27 -08009143 else {
James Smart6d368e52011-05-24 11:44:12 -04009144 /*
9145 * This is a continuation of a commandi,(CX) so this
James Smart4f774512009-05-22 14:52:35 -04009146 * sglq is on the active list
9147 */
James Smartedccdc12013-01-03 15:43:45 -05009148 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
James Smart4f774512009-05-22 14:52:35 -04009149 if (!sglq)
9150 return IOCB_ERROR;
9151 }
9152
9153 if (sglq) {
James Smart6d368e52011-05-24 11:44:12 -04009154 piocb->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04009155 piocb->sli4_xritag = sglq->sli4_xritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04009156 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
James Smart4f774512009-05-22 14:52:35 -04009157 return IOCB_ERROR;
9158 }
9159
James Smartb5c53952016-03-31 14:12:30 -07009160 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
James Smart4f774512009-05-22 14:52:35 -04009161 return IOCB_ERROR;
9162
James Smart895427b2017-02-12 13:52:30 -08009163 if (lpfc_sli4_wq_put(wq, wqe))
9164 return IOCB_ERROR;
James Smart4f774512009-05-22 14:52:35 -04009165 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9166
9167 return 0;
9168}
9169
9170/**
James Smart3772a992009-05-22 14:50:54 -04009171 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9172 *
9173 * This routine wraps the actual lockless version for issusing IOCB function
9174 * pointer from the lpfc_hba struct.
9175 *
9176 * Return codes:
James Smartb5c53952016-03-31 14:12:30 -07009177 * IOCB_ERROR - Error
9178 * IOCB_SUCCESS - Success
9179 * IOCB_BUSY - Busy
James Smart3772a992009-05-22 14:50:54 -04009180 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04009181int
James Smart3772a992009-05-22 14:50:54 -04009182__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9183 struct lpfc_iocbq *piocb, uint32_t flag)
9184{
9185 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9186}
9187
9188/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009189 * lpfc_sli_api_table_setup - Set up sli api function jump table
James Smart3772a992009-05-22 14:50:54 -04009190 * @phba: The hba struct for which this call is being executed.
9191 * @dev_grp: The HBA PCI-Device group number.
9192 *
9193 * This routine sets up the SLI interface API function jump table in @phba
9194 * struct.
9195 * Returns: 0 - success, -ENODEV - failure.
9196 **/
9197int
9198lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9199{
9200
9201 switch (dev_grp) {
9202 case LPFC_PCI_DEV_LP:
9203 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9204 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9205 break;
James Smart4f774512009-05-22 14:52:35 -04009206 case LPFC_PCI_DEV_OC:
9207 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9208 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9209 break;
James Smart3772a992009-05-22 14:50:54 -04009210 default:
9211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9212 "1419 Invalid HBA PCI-device group: 0x%x\n",
9213 dev_grp);
9214 return -ENODEV;
9215 break;
9216 }
9217 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9218 return 0;
9219}
James Smart92d7f7b2007-06-17 19:56:38 -05009220
James Smarta1efe162015-05-21 13:55:20 -04009221/**
James Smart895427b2017-02-12 13:52:30 -08009222 * lpfc_sli4_calc_ring - Calculates which ring to use
James Smarta1efe162015-05-21 13:55:20 -04009223 * @phba: Pointer to HBA context object.
James Smarta1efe162015-05-21 13:55:20 -04009224 * @piocb: Pointer to command iocb.
9225 *
James Smart895427b2017-02-12 13:52:30 -08009226 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9227 * hba_wqidx, thus we need to calculate the corresponding ring.
James Smarta1efe162015-05-21 13:55:20 -04009228 * Since ABORTS must go on the same WQ of the command they are
James Smart895427b2017-02-12 13:52:30 -08009229 * aborting, we use command's hba_wqidx.
James Smarta1efe162015-05-21 13:55:20 -04009230 */
James Smart895427b2017-02-12 13:52:30 -08009231struct lpfc_sli_ring *
9232lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
James Smart9bd2bff52014-09-03 12:57:30 -04009233{
James Smart895427b2017-02-12 13:52:30 -08009234 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
James Smart8b0dff12015-05-22 10:42:38 -04009235 if (!(phba->cfg_fof) ||
James Smart895427b2017-02-12 13:52:30 -08009236 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
James Smart8b0dff12015-05-22 10:42:38 -04009237 if (unlikely(!phba->sli4_hba.fcp_wq))
James Smart895427b2017-02-12 13:52:30 -08009238 return NULL;
James Smart8b0dff12015-05-22 10:42:38 -04009239 /*
James Smart895427b2017-02-12 13:52:30 -08009240 * for abort iocb hba_wqidx should already
James Smart8b0dff12015-05-22 10:42:38 -04009241 * be setup based on what work queue we used.
9242 */
9243 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
James Smart895427b2017-02-12 13:52:30 -08009244 piocb->hba_wqidx =
James Smart8b0dff12015-05-22 10:42:38 -04009245 lpfc_sli4_scmd_to_wqidx_distr(phba,
9246 piocb->context1);
James Smart895427b2017-02-12 13:52:30 -08009247 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
James Smart8b0dff12015-05-22 10:42:38 -04009248 } else {
9249 if (unlikely(!phba->sli4_hba.oas_wq))
James Smart895427b2017-02-12 13:52:30 -08009250 return NULL;
9251 piocb->hba_wqidx = 0;
9252 return phba->sli4_hba.oas_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -04009253 }
James Smart895427b2017-02-12 13:52:30 -08009254 } else {
9255 if (unlikely(!phba->sli4_hba.els_wq))
9256 return NULL;
9257 piocb->hba_wqidx = 0;
9258 return phba->sli4_hba.els_wq->pring;
James Smart9bd2bff52014-09-03 12:57:30 -04009259 }
James Smart9bd2bff52014-09-03 12:57:30 -04009260}
9261
James Smarte59058c2008-08-24 21:49:00 -04009262/**
James Smart3621a712009-04-06 18:47:14 -04009263 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04009264 * @phba: Pointer to HBA context object.
9265 * @pring: Pointer to driver SLI ring object.
9266 * @piocb: Pointer to command iocb.
9267 * @flag: Flag indicating if this command can be put into txq.
9268 *
9269 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9270 * function. This function gets the hbalock and calls
9271 * __lpfc_sli_issue_iocb function and will return the error returned
9272 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9273 * functions which do not hold hbalock.
9274 **/
James Smart92d7f7b2007-06-17 19:56:38 -05009275int
James Smart3772a992009-05-22 14:50:54 -04009276lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
James Smart92d7f7b2007-06-17 19:56:38 -05009277 struct lpfc_iocbq *piocb, uint32_t flag)
9278{
James Smart895427b2017-02-12 13:52:30 -08009279 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart2a76a282012-08-03 12:35:54 -04009280 struct lpfc_sli_ring *pring;
James Smartba20c852012-08-03 12:36:52 -04009281 struct lpfc_queue *fpeq;
9282 struct lpfc_eqe *eqe;
James Smart92d7f7b2007-06-17 19:56:38 -05009283 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -04009284 int rc, idx;
James Smart92d7f7b2007-06-17 19:56:38 -05009285
James Smart7e56aa22012-08-03 12:35:34 -04009286 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -08009287 pring = lpfc_sli4_calc_ring(phba, piocb);
9288 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -04009289 return IOCB_ERROR;
James Smartba20c852012-08-03 12:36:52 -04009290
James Smart9bd2bff52014-09-03 12:57:30 -04009291 spin_lock_irqsave(&pring->ring_lock, iflags);
9292 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9293 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smartba20c852012-08-03 12:36:52 -04009294
James Smart9bd2bff52014-09-03 12:57:30 -04009295 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
James Smart895427b2017-02-12 13:52:30 -08009296 idx = piocb->hba_wqidx;
9297 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
James Smartba20c852012-08-03 12:36:52 -04009298
James Smart895427b2017-02-12 13:52:30 -08009299 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
James Smartba20c852012-08-03 12:36:52 -04009300
James Smart9bd2bff52014-09-03 12:57:30 -04009301 /* Get associated EQ with this index */
9302 fpeq = phba->sli4_hba.hba_eq[idx];
James Smartba20c852012-08-03 12:36:52 -04009303
James Smart9bd2bff52014-09-03 12:57:30 -04009304 /* Turn off interrupts from this EQ */
9305 lpfc_sli4_eq_clr_intr(fpeq);
James Smartba20c852012-08-03 12:36:52 -04009306
James Smart9bd2bff52014-09-03 12:57:30 -04009307 /*
9308 * Process all the events on FCP EQ
9309 */
9310 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9311 lpfc_sli4_hba_handle_eqe(phba,
9312 eqe, idx);
9313 fpeq->EQ_processed++;
James Smartba20c852012-08-03 12:36:52 -04009314 }
James Smartba20c852012-08-03 12:36:52 -04009315
James Smart9bd2bff52014-09-03 12:57:30 -04009316 /* Always clear and re-arm the EQ */
9317 lpfc_sli4_eq_release(fpeq,
9318 LPFC_QUEUE_REARM);
9319 }
James Smart895427b2017-02-12 13:52:30 -08009320 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smart2a76a282012-08-03 12:35:54 -04009321 }
James Smart7e56aa22012-08-03 12:35:34 -04009322 } else {
9323 /* For now, SLI2/3 will still use hbalock */
9324 spin_lock_irqsave(&phba->hbalock, iflags);
9325 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9326 spin_unlock_irqrestore(&phba->hbalock, iflags);
9327 }
James Smart92d7f7b2007-06-17 19:56:38 -05009328 return rc;
9329}
9330
James Smarte59058c2008-08-24 21:49:00 -04009331/**
James Smart3621a712009-04-06 18:47:14 -04009332 * lpfc_extra_ring_setup - Extra ring setup function
James Smarte59058c2008-08-24 21:49:00 -04009333 * @phba: Pointer to HBA context object.
9334 *
9335 * This function is called while driver attaches with the
9336 * HBA to setup the extra ring. The extra ring is used
9337 * only when driver needs to support target mode functionality
9338 * or IP over FC functionalities.
9339 *
James Smart895427b2017-02-12 13:52:30 -08009340 * This function is called with no lock held. SLI3 only.
James Smarte59058c2008-08-24 21:49:00 -04009341 **/
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009342static int
9343lpfc_extra_ring_setup( struct lpfc_hba *phba)
9344{
9345 struct lpfc_sli *psli;
9346 struct lpfc_sli_ring *pring;
9347
9348 psli = &phba->sli;
9349
9350 /* Adjust cmd/rsp ring iocb entries more evenly */
James Smarta4bc3372006-12-02 13:34:16 -05009351
9352 /* Take some away from the FCP ring */
James Smart895427b2017-02-12 13:52:30 -08009353 pring = &psli->sli3_ring[LPFC_FCP_RING];
James Smart7e56aa22012-08-03 12:35:34 -04009354 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9355 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9356 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9357 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009358
James Smarta4bc3372006-12-02 13:34:16 -05009359 /* and give them to the extra ring */
James Smart895427b2017-02-12 13:52:30 -08009360 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
James Smarta4bc3372006-12-02 13:34:16 -05009361
James Smart7e56aa22012-08-03 12:35:34 -04009362 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9363 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9364 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9365 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009366
9367 /* Setup default profile for this ring */
9368 pring->iotag_max = 4096;
9369 pring->num_mask = 1;
9370 pring->prt[0].profile = 0; /* Mask 0 */
James Smarta4bc3372006-12-02 13:34:16 -05009371 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9372 pring->prt[0].type = phba->cfg_multi_ring_type;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009373 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9374 return 0;
9375}
9376
James Smartcb69f7d2011-12-13 13:21:57 -05009377/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9378 * @phba: Pointer to HBA context object.
9379 * @iocbq: Pointer to iocb object.
9380 *
9381 * The async_event handler calls this routine when it receives
9382 * an ASYNC_STATUS_CN event from the port. The port generates
9383 * this event when an Abort Sequence request to an rport fails
9384 * twice in succession. The abort could be originated by the
9385 * driver or by the port. The ABTS could have been for an ELS
9386 * or FCP IO. The port only generates this event when an ABTS
9387 * fails to complete after one retry.
9388 */
9389static void
9390lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9391 struct lpfc_iocbq *iocbq)
9392{
9393 struct lpfc_nodelist *ndlp = NULL;
9394 uint16_t rpi = 0, vpi = 0;
9395 struct lpfc_vport *vport = NULL;
9396
9397 /* The rpi in the ulpContext is vport-sensitive. */
9398 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9399 rpi = iocbq->iocb.ulpContext;
9400
9401 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9402 "3092 Port generated ABTS async event "
9403 "on vpi %d rpi %d status 0x%x\n",
9404 vpi, rpi, iocbq->iocb.ulpStatus);
9405
9406 vport = lpfc_find_vport_by_vpid(phba, vpi);
9407 if (!vport)
9408 goto err_exit;
9409 ndlp = lpfc_findnode_rpi(vport, rpi);
9410 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9411 goto err_exit;
9412
9413 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9414 lpfc_sli_abts_recover_port(vport, ndlp);
9415 return;
9416
9417 err_exit:
9418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9419 "3095 Event Context not found, no "
9420 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9421 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9422 vpi, rpi);
9423}
9424
9425/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9426 * @phba: pointer to HBA context object.
9427 * @ndlp: nodelist pointer for the impacted rport.
9428 * @axri: pointer to the wcqe containing the failed exchange.
9429 *
9430 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9431 * port. The port generates this event when an abort exchange request to an
9432 * rport fails twice in succession with no reply. The abort could be originated
9433 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9434 */
9435void
9436lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9437 struct lpfc_nodelist *ndlp,
9438 struct sli4_wcqe_xri_aborted *axri)
9439{
9440 struct lpfc_vport *vport;
James Smart5c1db2a2012-03-01 22:34:36 -05009441 uint32_t ext_status = 0;
James Smartcb69f7d2011-12-13 13:21:57 -05009442
James Smart6b5151f2012-01-18 16:24:06 -05009443 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
James Smartcb69f7d2011-12-13 13:21:57 -05009444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9445 "3115 Node Context not found, driver "
9446 "ignoring abts err event\n");
James Smart6b5151f2012-01-18 16:24:06 -05009447 return;
9448 }
9449
James Smartcb69f7d2011-12-13 13:21:57 -05009450 vport = ndlp->vport;
9451 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9452 "3116 Port generated FCP XRI ABORT event on "
James Smart5c1db2a2012-03-01 22:34:36 -05009453 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
James Smart8e668af2013-05-31 17:04:28 -04009454 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
James Smartcb69f7d2011-12-13 13:21:57 -05009455 bf_get(lpfc_wcqe_xa_xri, axri),
James Smart5c1db2a2012-03-01 22:34:36 -05009456 bf_get(lpfc_wcqe_xa_status, axri),
9457 axri->parameter);
James Smartcb69f7d2011-12-13 13:21:57 -05009458
James Smart5c1db2a2012-03-01 22:34:36 -05009459 /*
9460 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9461 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9462 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9463 */
James Smarte3d2b802012-08-14 14:25:43 -04009464 ext_status = axri->parameter & IOERR_PARAM_MASK;
James Smart5c1db2a2012-03-01 22:34:36 -05009465 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9466 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
James Smartcb69f7d2011-12-13 13:21:57 -05009467 lpfc_sli_abts_recover_port(vport, ndlp);
9468}
9469
James Smarte59058c2008-08-24 21:49:00 -04009470/**
James Smart3621a712009-04-06 18:47:14 -04009471 * lpfc_sli_async_event_handler - ASYNC iocb handler function
James Smarte59058c2008-08-24 21:49:00 -04009472 * @phba: Pointer to HBA context object.
9473 * @pring: Pointer to driver SLI ring object.
9474 * @iocbq: Pointer to iocb object.
9475 *
9476 * This function is called by the slow ring event handler
9477 * function when there is an ASYNC event iocb in the ring.
9478 * This function is called with no lock held.
9479 * Currently this function handles only temperature related
9480 * ASYNC events. The function decodes the temperature sensor
9481 * event message and posts events for the management applications.
9482 **/
James Smart98c9ea52007-10-27 13:37:33 -04009483static void
James Smart57127f12007-10-27 13:37:05 -04009484lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9485 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9486{
9487 IOCB_t *icmd;
9488 uint16_t evt_code;
James Smart57127f12007-10-27 13:37:05 -04009489 struct temp_event temp_event_data;
9490 struct Scsi_Host *shost;
James Smarta257bf92009-04-06 18:48:10 -04009491 uint32_t *iocb_w;
James Smart57127f12007-10-27 13:37:05 -04009492
9493 icmd = &iocbq->iocb;
9494 evt_code = icmd->un.asyncstat.evt_code;
James Smart57127f12007-10-27 13:37:05 -04009495
James Smartcb69f7d2011-12-13 13:21:57 -05009496 switch (evt_code) {
9497 case ASYNC_TEMP_WARN:
9498 case ASYNC_TEMP_SAFE:
9499 temp_event_data.data = (uint32_t) icmd->ulpContext;
9500 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9501 if (evt_code == ASYNC_TEMP_WARN) {
9502 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9503 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9504 "0347 Adapter is very hot, please take "
9505 "corrective action. temperature : %d Celsius\n",
9506 (uint32_t) icmd->ulpContext);
9507 } else {
9508 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9509 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9510 "0340 Adapter temperature is OK now. "
9511 "temperature : %d Celsius\n",
9512 (uint32_t) icmd->ulpContext);
9513 }
9514
9515 /* Send temperature change event to applications */
9516 shost = lpfc_shost_from_vport(phba->pport);
9517 fc_host_post_vendor_event(shost, fc_get_event_number(),
9518 sizeof(temp_event_data), (char *) &temp_event_data,
9519 LPFC_NL_VENDOR_ID);
9520 break;
9521 case ASYNC_STATUS_CN:
9522 lpfc_sli_abts_err_handler(phba, iocbq);
9523 break;
9524 default:
James Smarta257bf92009-04-06 18:48:10 -04009525 iocb_w = (uint32_t *) icmd;
James Smartcb69f7d2011-12-13 13:21:57 -05009526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04009527 "0346 Ring %d handler: unexpected ASYNC_STATUS"
James Smarte4e74272009-07-19 10:01:38 -04009528 " evt_code 0x%x\n"
James Smarta257bf92009-04-06 18:48:10 -04009529 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9530 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9531 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9532 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
James Smartcb69f7d2011-12-13 13:21:57 -05009533 pring->ringno, icmd->un.asyncstat.evt_code,
James Smarta257bf92009-04-06 18:48:10 -04009534 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9535 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9536 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9537 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9538
James Smartcb69f7d2011-12-13 13:21:57 -05009539 break;
James Smart57127f12007-10-27 13:37:05 -04009540 }
James Smart57127f12007-10-27 13:37:05 -04009541}
9542
9543
James Smarte59058c2008-08-24 21:49:00 -04009544/**
James Smart895427b2017-02-12 13:52:30 -08009545 * lpfc_sli4_setup - SLI ring setup function
James Smarte59058c2008-08-24 21:49:00 -04009546 * @phba: Pointer to HBA context object.
9547 *
9548 * lpfc_sli_setup sets up rings of the SLI interface with
9549 * number of iocbs per ring and iotags. This function is
9550 * called while driver attach to the HBA and before the
9551 * interrupts are enabled. So there is no need for locking.
9552 *
9553 * This function always returns 0.
9554 **/
dea31012005-04-17 16:05:31 -05009555int
James Smart895427b2017-02-12 13:52:30 -08009556lpfc_sli4_setup(struct lpfc_hba *phba)
9557{
9558 struct lpfc_sli_ring *pring;
9559
9560 pring = phba->sli4_hba.els_wq->pring;
9561 pring->num_mask = LPFC_MAX_RING_MASK;
9562 pring->prt[0].profile = 0; /* Mask 0 */
9563 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9564 pring->prt[0].type = FC_TYPE_ELS;
9565 pring->prt[0].lpfc_sli_rcv_unsol_event =
9566 lpfc_els_unsol_event;
9567 pring->prt[1].profile = 0; /* Mask 1 */
9568 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9569 pring->prt[1].type = FC_TYPE_ELS;
9570 pring->prt[1].lpfc_sli_rcv_unsol_event =
9571 lpfc_els_unsol_event;
9572 pring->prt[2].profile = 0; /* Mask 2 */
9573 /* NameServer Inquiry */
9574 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9575 /* NameServer */
9576 pring->prt[2].type = FC_TYPE_CT;
9577 pring->prt[2].lpfc_sli_rcv_unsol_event =
9578 lpfc_ct_unsol_event;
9579 pring->prt[3].profile = 0; /* Mask 3 */
9580 /* NameServer response */
9581 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9582 /* NameServer */
9583 pring->prt[3].type = FC_TYPE_CT;
9584 pring->prt[3].lpfc_sli_rcv_unsol_event =
9585 lpfc_ct_unsol_event;
9586 return 0;
9587}
9588
9589/**
9590 * lpfc_sli_setup - SLI ring setup function
9591 * @phba: Pointer to HBA context object.
9592 *
9593 * lpfc_sli_setup sets up rings of the SLI interface with
9594 * number of iocbs per ring and iotags. This function is
9595 * called while driver attach to the HBA and before the
9596 * interrupts are enabled. So there is no need for locking.
9597 *
9598 * This function always returns 0. SLI3 only.
9599 **/
9600int
dea31012005-04-17 16:05:31 -05009601lpfc_sli_setup(struct lpfc_hba *phba)
9602{
James Smarted957682007-06-17 19:56:37 -05009603 int i, totiocbsize = 0;
dea31012005-04-17 16:05:31 -05009604 struct lpfc_sli *psli = &phba->sli;
9605 struct lpfc_sli_ring *pring;
9606
James Smart2a76a282012-08-03 12:35:54 -04009607 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
dea31012005-04-17 16:05:31 -05009608 psli->sli_flag = 0;
dea31012005-04-17 16:05:31 -05009609
James Bottomley604a3e32005-10-29 10:28:33 -05009610 psli->iocbq_lookup = NULL;
9611 psli->iocbq_lookup_len = 0;
9612 psli->last_iotag = 0;
9613
dea31012005-04-17 16:05:31 -05009614 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08009615 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05009616 switch (i) {
9617 case LPFC_FCP_RING: /* ring 0 - FCP */
9618 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009619 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9620 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9621 pring->sli.sli3.numCiocb +=
9622 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9623 pring->sli.sli3.numRiocb +=
9624 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9625 pring->sli.sli3.numCiocb +=
9626 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9627 pring->sli.sli3.numRiocb +=
9628 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9629 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009630 SLI3_IOCB_CMD_SIZE :
9631 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009632 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009633 SLI3_IOCB_RSP_SIZE :
9634 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05009635 pring->iotag_ctr = 0;
9636 pring->iotag_max =
James Smart92d7f7b2007-06-17 19:56:38 -05009637 (phba->cfg_hba_queue_depth * 2);
dea31012005-04-17 16:05:31 -05009638 pring->fast_iotag = pring->iotag_max;
9639 pring->num_mask = 0;
9640 break;
James Smarta4bc3372006-12-02 13:34:16 -05009641 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea31012005-04-17 16:05:31 -05009642 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009643 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9644 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9645 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009646 SLI3_IOCB_CMD_SIZE :
9647 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009648 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009649 SLI3_IOCB_RSP_SIZE :
9650 SLI2_IOCB_RSP_SIZE;
James Smart2e0fef82007-06-17 19:56:36 -05009651 pring->iotag_max = phba->cfg_hba_queue_depth;
dea31012005-04-17 16:05:31 -05009652 pring->num_mask = 0;
9653 break;
9654 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
9655 /* numCiocb and numRiocb are used in config_port */
James Smart7e56aa22012-08-03 12:35:34 -04009656 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9657 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9658 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009659 SLI3_IOCB_CMD_SIZE :
9660 SLI2_IOCB_CMD_SIZE;
James Smart7e56aa22012-08-03 12:35:34 -04009661 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05009662 SLI3_IOCB_RSP_SIZE :
9663 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05009664 pring->fast_iotag = 0;
9665 pring->iotag_ctr = 0;
9666 pring->iotag_max = 4096;
James Smart57127f12007-10-27 13:37:05 -04009667 pring->lpfc_sli_rcv_async_status =
9668 lpfc_sli_async_event_handler;
James Smart6669f9b2009-10-02 15:16:45 -04009669 pring->num_mask = LPFC_MAX_RING_MASK;
dea31012005-04-17 16:05:31 -05009670 pring->prt[0].profile = 0; /* Mask 0 */
James Smart6a9c52c2009-10-02 15:16:51 -04009671 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9672 pring->prt[0].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05009673 pring->prt[0].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009674 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05009675 pring->prt[1].profile = 0; /* Mask 1 */
James Smart6a9c52c2009-10-02 15:16:51 -04009676 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9677 pring->prt[1].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05009678 pring->prt[1].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009679 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05009680 pring->prt[2].profile = 0; /* Mask 2 */
9681 /* NameServer Inquiry */
James Smart6a9c52c2009-10-02 15:16:51 -04009682 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea31012005-04-17 16:05:31 -05009683 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04009684 pring->prt[2].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05009685 pring->prt[2].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009686 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -05009687 pring->prt[3].profile = 0; /* Mask 3 */
9688 /* NameServer response */
James Smart6a9c52c2009-10-02 15:16:51 -04009689 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea31012005-04-17 16:05:31 -05009690 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04009691 pring->prt[3].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05009692 pring->prt[3].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05009693 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -05009694 break;
9695 }
James Smart7e56aa22012-08-03 12:35:34 -04009696 totiocbsize += (pring->sli.sli3.numCiocb *
9697 pring->sli.sli3.sizeCiocb) +
9698 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
dea31012005-04-17 16:05:31 -05009699 }
James Smarted957682007-06-17 19:56:37 -05009700 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea31012005-04-17 16:05:31 -05009701 /* Too many cmd / rsp ring entries in SLI2 SLIM */
James Smarte8b62012007-08-02 11:10:09 -04009702 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9703 "SLI2 SLIM Data: x%x x%lx\n",
9704 phba->brd_no, totiocbsize,
9705 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea31012005-04-17 16:05:31 -05009706 }
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05009707 if (phba->cfg_multi_ring_support == 2)
9708 lpfc_extra_ring_setup(phba);
dea31012005-04-17 16:05:31 -05009709
9710 return 0;
9711}
9712
James Smarte59058c2008-08-24 21:49:00 -04009713/**
James Smart895427b2017-02-12 13:52:30 -08009714 * lpfc_sli4_queue_init - Queue initialization function
James Smarte59058c2008-08-24 21:49:00 -04009715 * @phba: Pointer to HBA context object.
9716 *
James Smart895427b2017-02-12 13:52:30 -08009717 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
James Smarte59058c2008-08-24 21:49:00 -04009718 * ring. This function also initializes ring indices of each ring.
9719 * This function is called during the initialization of the SLI
9720 * interface of an HBA.
9721 * This function is called with no lock held and always returns
9722 * 1.
9723 **/
James Smart895427b2017-02-12 13:52:30 -08009724void
9725lpfc_sli4_queue_init(struct lpfc_hba *phba)
9726{
9727 struct lpfc_sli *psli;
9728 struct lpfc_sli_ring *pring;
9729 int i;
9730
9731 psli = &phba->sli;
9732 spin_lock_irq(&phba->hbalock);
9733 INIT_LIST_HEAD(&psli->mboxq);
9734 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9735 /* Initialize list headers for txq and txcmplq as double linked lists */
9736 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9737 pring = phba->sli4_hba.fcp_wq[i]->pring;
9738 pring->flag = 0;
9739 pring->ringno = LPFC_FCP_RING;
9740 INIT_LIST_HEAD(&pring->txq);
9741 INIT_LIST_HEAD(&pring->txcmplq);
9742 INIT_LIST_HEAD(&pring->iocb_continueq);
9743 spin_lock_init(&pring->ring_lock);
9744 }
9745 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9746 pring = phba->sli4_hba.nvme_wq[i]->pring;
9747 pring->flag = 0;
9748 pring->ringno = LPFC_FCP_RING;
9749 INIT_LIST_HEAD(&pring->txq);
9750 INIT_LIST_HEAD(&pring->txcmplq);
9751 INIT_LIST_HEAD(&pring->iocb_continueq);
9752 spin_lock_init(&pring->ring_lock);
9753 }
9754 pring = phba->sli4_hba.els_wq->pring;
9755 pring->flag = 0;
9756 pring->ringno = LPFC_ELS_RING;
9757 INIT_LIST_HEAD(&pring->txq);
9758 INIT_LIST_HEAD(&pring->txcmplq);
9759 INIT_LIST_HEAD(&pring->iocb_continueq);
9760 spin_lock_init(&pring->ring_lock);
9761
9762 if (phba->cfg_nvme_io_channel) {
9763 pring = phba->sli4_hba.nvmels_wq->pring;
9764 pring->flag = 0;
9765 pring->ringno = LPFC_ELS_RING;
9766 INIT_LIST_HEAD(&pring->txq);
9767 INIT_LIST_HEAD(&pring->txcmplq);
9768 INIT_LIST_HEAD(&pring->iocb_continueq);
9769 spin_lock_init(&pring->ring_lock);
9770 }
9771
9772 if (phba->cfg_fof) {
9773 pring = phba->sli4_hba.oas_wq->pring;
9774 pring->flag = 0;
9775 pring->ringno = LPFC_FCP_RING;
9776 INIT_LIST_HEAD(&pring->txq);
9777 INIT_LIST_HEAD(&pring->txcmplq);
9778 INIT_LIST_HEAD(&pring->iocb_continueq);
9779 spin_lock_init(&pring->ring_lock);
9780 }
9781
9782 spin_unlock_irq(&phba->hbalock);
9783}
9784
9785/**
9786 * lpfc_sli_queue_init - Queue initialization function
9787 * @phba: Pointer to HBA context object.
9788 *
9789 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9790 * ring. This function also initializes ring indices of each ring.
9791 * This function is called during the initialization of the SLI
9792 * interface of an HBA.
9793 * This function is called with no lock held and always returns
9794 * 1.
9795 **/
9796void
9797lpfc_sli_queue_init(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05009798{
9799 struct lpfc_sli *psli;
9800 struct lpfc_sli_ring *pring;
James Bottomley604a3e32005-10-29 10:28:33 -05009801 int i;
dea31012005-04-17 16:05:31 -05009802
9803 psli = &phba->sli;
James Smart2e0fef82007-06-17 19:56:36 -05009804 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009805 INIT_LIST_HEAD(&psli->mboxq);
James Smart92d7f7b2007-06-17 19:56:38 -05009806 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea31012005-04-17 16:05:31 -05009807 /* Initialize list headers for txq and txcmplq as double linked lists */
9808 for (i = 0; i < psli->num_rings; i++) {
James Smart895427b2017-02-12 13:52:30 -08009809 pring = &psli->sli3_ring[i];
dea31012005-04-17 16:05:31 -05009810 pring->ringno = i;
James Smart7e56aa22012-08-03 12:35:34 -04009811 pring->sli.sli3.next_cmdidx = 0;
9812 pring->sli.sli3.local_getidx = 0;
9813 pring->sli.sli3.cmdidx = 0;
dea31012005-04-17 16:05:31 -05009814 INIT_LIST_HEAD(&pring->iocb_continueq);
James Smart9c2face2008-01-11 01:53:18 -05009815 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea31012005-04-17 16:05:31 -05009816 INIT_LIST_HEAD(&pring->postbufq);
James Smart895427b2017-02-12 13:52:30 -08009817 pring->flag = 0;
9818 INIT_LIST_HEAD(&pring->txq);
9819 INIT_LIST_HEAD(&pring->txcmplq);
James Smart7e56aa22012-08-03 12:35:34 -04009820 spin_lock_init(&pring->ring_lock);
dea31012005-04-17 16:05:31 -05009821 }
James Smart2e0fef82007-06-17 19:56:36 -05009822 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05009823}
9824
James Smarte59058c2008-08-24 21:49:00 -04009825/**
James Smart04c68492009-05-22 14:52:52 -04009826 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9827 * @phba: Pointer to HBA context object.
9828 *
9829 * This routine flushes the mailbox command subsystem. It will unconditionally
9830 * flush all the mailbox commands in the three possible stages in the mailbox
9831 * command sub-system: pending mailbox command queue; the outstanding mailbox
9832 * command; and completed mailbox command queue. It is caller's responsibility
9833 * to make sure that the driver is in the proper state to flush the mailbox
9834 * command sub-system. Namely, the posting of mailbox commands into the
9835 * pending mailbox command queue from the various clients must be stopped;
9836 * either the HBA is in a state that it will never works on the outstanding
9837 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9838 * mailbox command has been completed.
9839 **/
9840static void
9841lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9842{
9843 LIST_HEAD(completions);
9844 struct lpfc_sli *psli = &phba->sli;
9845 LPFC_MBOXQ_t *pmb;
9846 unsigned long iflag;
9847
9848 /* Flush all the mailbox commands in the mbox system */
9849 spin_lock_irqsave(&phba->hbalock, iflag);
9850 /* The pending mailbox command queue */
9851 list_splice_init(&phba->sli.mboxq, &completions);
9852 /* The outstanding active mailbox command */
9853 if (psli->mbox_active) {
9854 list_add_tail(&psli->mbox_active->list, &completions);
9855 psli->mbox_active = NULL;
9856 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9857 }
9858 /* The completed mailbox command queue */
9859 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9860 spin_unlock_irqrestore(&phba->hbalock, iflag);
9861
9862 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9863 while (!list_empty(&completions)) {
9864 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9865 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9866 if (pmb->mbox_cmpl)
9867 pmb->mbox_cmpl(phba, pmb);
9868 }
9869}
9870
9871/**
James Smart3621a712009-04-06 18:47:14 -04009872 * lpfc_sli_host_down - Vport cleanup function
James Smarte59058c2008-08-24 21:49:00 -04009873 * @vport: Pointer to virtual port object.
9874 *
9875 * lpfc_sli_host_down is called to clean up the resources
9876 * associated with a vport before destroying virtual
9877 * port data structures.
9878 * This function does following operations:
9879 * - Free discovery resources associated with this virtual
9880 * port.
9881 * - Free iocbs associated with this virtual port in
9882 * the txq.
9883 * - Send abort for all iocb commands associated with this
9884 * vport in txcmplq.
9885 *
9886 * This function is called with no lock held and always returns 1.
9887 **/
dea31012005-04-17 16:05:31 -05009888int
James Smart92d7f7b2007-06-17 19:56:38 -05009889lpfc_sli_host_down(struct lpfc_vport *vport)
9890{
James Smart858c9f62007-06-17 19:56:39 -05009891 LIST_HEAD(completions);
James Smart92d7f7b2007-06-17 19:56:38 -05009892 struct lpfc_hba *phba = vport->phba;
9893 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -08009894 struct lpfc_queue *qp = NULL;
James Smart92d7f7b2007-06-17 19:56:38 -05009895 struct lpfc_sli_ring *pring;
9896 struct lpfc_iocbq *iocb, *next_iocb;
James Smart92d7f7b2007-06-17 19:56:38 -05009897 int i;
9898 unsigned long flags = 0;
9899 uint16_t prev_pring_flag;
9900
9901 lpfc_cleanup_discovery_resources(vport);
9902
9903 spin_lock_irqsave(&phba->hbalock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -05009904
James Smart895427b2017-02-12 13:52:30 -08009905 /*
9906 * Error everything on the txq since these iocbs
9907 * have not been given to the FW yet.
9908 * Also issue ABTS for everything on the txcmplq
9909 */
9910 if (phba->sli_rev != LPFC_SLI_REV4) {
9911 for (i = 0; i < psli->num_rings; i++) {
9912 pring = &psli->sli3_ring[i];
9913 prev_pring_flag = pring->flag;
9914 /* Only slow rings */
9915 if (pring->ringno == LPFC_ELS_RING) {
9916 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9917 /* Set the lpfc data pending flag */
9918 set_bit(LPFC_DATA_READY, &phba->data_flags);
9919 }
9920 list_for_each_entry_safe(iocb, next_iocb,
9921 &pring->txq, list) {
9922 if (iocb->vport != vport)
9923 continue;
9924 list_move_tail(&iocb->list, &completions);
9925 }
9926 list_for_each_entry_safe(iocb, next_iocb,
9927 &pring->txcmplq, list) {
9928 if (iocb->vport != vport)
9929 continue;
9930 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9931 }
9932 pring->flag = prev_pring_flag;
James Smart92d7f7b2007-06-17 19:56:38 -05009933 }
James Smart895427b2017-02-12 13:52:30 -08009934 } else {
9935 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
9936 pring = qp->pring;
9937 if (!pring)
9938 continue;
9939 if (pring == phba->sli4_hba.els_wq->pring) {
9940 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9941 /* Set the lpfc data pending flag */
9942 set_bit(LPFC_DATA_READY, &phba->data_flags);
9943 }
9944 prev_pring_flag = pring->flag;
9945 spin_lock_irq(&pring->ring_lock);
9946 list_for_each_entry_safe(iocb, next_iocb,
9947 &pring->txq, list) {
9948 if (iocb->vport != vport)
9949 continue;
9950 list_move_tail(&iocb->list, &completions);
9951 }
9952 spin_unlock_irq(&pring->ring_lock);
9953 list_for_each_entry_safe(iocb, next_iocb,
9954 &pring->txcmplq, list) {
9955 if (iocb->vport != vport)
9956 continue;
9957 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9958 }
9959 pring->flag = prev_pring_flag;
9960 }
James Smart92d7f7b2007-06-17 19:56:38 -05009961 }
James Smart92d7f7b2007-06-17 19:56:38 -05009962 spin_unlock_irqrestore(&phba->hbalock, flags);
9963
James Smarta257bf92009-04-06 18:48:10 -04009964 /* Cancel all the IOCBs from the completions list */
9965 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9966 IOERR_SLI_DOWN);
James Smart92d7f7b2007-06-17 19:56:38 -05009967 return 1;
9968}
9969
James Smarte59058c2008-08-24 21:49:00 -04009970/**
James Smart3621a712009-04-06 18:47:14 -04009971 * lpfc_sli_hba_down - Resource cleanup function for the HBA
James Smarte59058c2008-08-24 21:49:00 -04009972 * @phba: Pointer to HBA context object.
9973 *
9974 * This function cleans up all iocb, buffers, mailbox commands
9975 * while shutting down the HBA. This function is called with no
9976 * lock held and always returns 1.
9977 * This function does the following to cleanup driver resources:
9978 * - Free discovery resources for each virtual port
9979 * - Cleanup any pending fabric iocbs
9980 * - Iterate through the iocb txq and free each entry
9981 * in the list.
9982 * - Free up any buffer posted to the HBA
9983 * - Free mailbox commands in the mailbox queue.
9984 **/
James Smart92d7f7b2007-06-17 19:56:38 -05009985int
James Smart2e0fef82007-06-17 19:56:36 -05009986lpfc_sli_hba_down(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05009987{
James Smart2534ba72007-04-25 09:52:20 -04009988 LIST_HEAD(completions);
James Smart2e0fef82007-06-17 19:56:36 -05009989 struct lpfc_sli *psli = &phba->sli;
James Smart895427b2017-02-12 13:52:30 -08009990 struct lpfc_queue *qp = NULL;
dea31012005-04-17 16:05:31 -05009991 struct lpfc_sli_ring *pring;
James Smart0ff10d42008-01-11 01:52:36 -05009992 struct lpfc_dmabuf *buf_ptr;
dea31012005-04-17 16:05:31 -05009993 unsigned long flags = 0;
James Smart04c68492009-05-22 14:52:52 -04009994 int i;
9995
9996 /* Shutdown the mailbox command sub-system */
James Smart618a5232012-06-12 13:54:36 -04009997 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
dea31012005-04-17 16:05:31 -05009998
dea31012005-04-17 16:05:31 -05009999 lpfc_hba_down_prep(phba);
10000
James Smart92d7f7b2007-06-17 19:56:38 -050010001 lpfc_fabric_abort_hba(phba);
10002
James Smart2e0fef82007-06-17 19:56:36 -050010003 spin_lock_irqsave(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010004
James Smart895427b2017-02-12 13:52:30 -080010005 /*
10006 * Error everything on the txq since these iocbs
10007 * have not been given to the FW yet.
10008 */
10009 if (phba->sli_rev != LPFC_SLI_REV4) {
10010 for (i = 0; i < psli->num_rings; i++) {
10011 pring = &psli->sli3_ring[i];
10012 /* Only slow rings */
10013 if (pring->ringno == LPFC_ELS_RING) {
10014 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10015 /* Set the lpfc data pending flag */
10016 set_bit(LPFC_DATA_READY, &phba->data_flags);
10017 }
10018 list_splice_init(&pring->txq, &completions);
10019 }
10020 } else {
10021 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10022 pring = qp->pring;
10023 if (!pring)
10024 continue;
10025 spin_lock_irq(&pring->ring_lock);
10026 list_splice_init(&pring->txq, &completions);
10027 spin_unlock_irq(&pring->ring_lock);
10028 if (pring == phba->sli4_hba.els_wq->pring) {
10029 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10030 /* Set the lpfc data pending flag */
10031 set_bit(LPFC_DATA_READY, &phba->data_flags);
10032 }
10033 }
dea31012005-04-17 16:05:31 -050010034 }
James Smart2e0fef82007-06-17 19:56:36 -050010035 spin_unlock_irqrestore(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -050010036
James Smarta257bf92009-04-06 18:48:10 -040010037 /* Cancel all the IOCBs from the completions list */
10038 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10039 IOERR_SLI_DOWN);
James Smart2534ba72007-04-25 09:52:20 -040010040
James Smart0ff10d42008-01-11 01:52:36 -050010041 spin_lock_irqsave(&phba->hbalock, flags);
10042 list_splice_init(&phba->elsbuf, &completions);
10043 phba->elsbuf_cnt = 0;
10044 phba->elsbuf_prev_cnt = 0;
10045 spin_unlock_irqrestore(&phba->hbalock, flags);
10046
10047 while (!list_empty(&completions)) {
10048 list_remove_head(&completions, buf_ptr,
10049 struct lpfc_dmabuf, list);
10050 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10051 kfree(buf_ptr);
10052 }
10053
dea31012005-04-17 16:05:31 -050010054 /* Return any active mbox cmds */
10055 del_timer_sync(&psli->mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -050010056
James Smartda0436e2009-05-22 14:51:39 -040010057 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010058 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
James Smartda0436e2009-05-22 14:51:39 -040010059 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -050010060
James Smartda0436e2009-05-22 14:51:39 -040010061 return 1;
10062}
James Smart92d7f7b2007-06-17 19:56:38 -050010063
James Smartda0436e2009-05-22 14:51:39 -040010064/**
James Smart3621a712009-04-06 18:47:14 -040010065 * lpfc_sli_pcimem_bcopy - SLI memory copy function
James Smarte59058c2008-08-24 21:49:00 -040010066 * @srcp: Source memory pointer.
10067 * @destp: Destination memory pointer.
10068 * @cnt: Number of words required to be copied.
10069 *
10070 * This function is used for copying data between driver memory
10071 * and the SLI memory. This function also changes the endianness
10072 * of each word if native endianness is different from SLI
10073 * endianness. This function can be called with or without
10074 * lock.
10075 **/
dea31012005-04-17 16:05:31 -050010076void
10077lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10078{
10079 uint32_t *src = srcp;
10080 uint32_t *dest = destp;
10081 uint32_t ldata;
10082 int i;
10083
10084 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10085 ldata = *src;
10086 ldata = le32_to_cpu(ldata);
10087 *dest = ldata;
10088 src++;
10089 dest++;
10090 }
10091}
10092
James Smarte59058c2008-08-24 21:49:00 -040010093
10094/**
James Smarta0c87cb2009-07-19 10:01:10 -040010095 * lpfc_sli_bemem_bcopy - SLI memory copy function
10096 * @srcp: Source memory pointer.
10097 * @destp: Destination memory pointer.
10098 * @cnt: Number of words required to be copied.
10099 *
10100 * This function is used for copying data between a data structure
10101 * with big endian representation to local endianness.
10102 * This function can be called with or without lock.
10103 **/
10104void
10105lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10106{
10107 uint32_t *src = srcp;
10108 uint32_t *dest = destp;
10109 uint32_t ldata;
10110 int i;
10111
10112 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10113 ldata = *src;
10114 ldata = be32_to_cpu(ldata);
10115 *dest = ldata;
10116 src++;
10117 dest++;
10118 }
10119}
10120
10121/**
James Smart3621a712009-04-06 18:47:14 -040010122 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
James Smarte59058c2008-08-24 21:49:00 -040010123 * @phba: Pointer to HBA context object.
10124 * @pring: Pointer to driver SLI ring object.
10125 * @mp: Pointer to driver buffer object.
10126 *
10127 * This function is called with no lock held.
10128 * It always return zero after adding the buffer to the postbufq
10129 * buffer list.
10130 **/
dea31012005-04-17 16:05:31 -050010131int
James Smart2e0fef82007-06-17 19:56:36 -050010132lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10133 struct lpfc_dmabuf *mp)
dea31012005-04-17 16:05:31 -050010134{
10135 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10136 later */
James Smart2e0fef82007-06-17 19:56:36 -050010137 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010138 list_add_tail(&mp->list, &pring->postbufq);
dea31012005-04-17 16:05:31 -050010139 pring->postbufq_cnt++;
James Smart2e0fef82007-06-17 19:56:36 -050010140 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010141 return 0;
10142}
10143
James Smarte59058c2008-08-24 21:49:00 -040010144/**
James Smart3621a712009-04-06 18:47:14 -040010145 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
James Smarte59058c2008-08-24 21:49:00 -040010146 * @phba: Pointer to HBA context object.
10147 *
10148 * When HBQ is enabled, buffers are searched based on tags. This function
10149 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10150 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10151 * does not conflict with tags of buffer posted for unsolicited events.
10152 * The function returns the allocated tag. The function is called with
10153 * no locks held.
10154 **/
James Smart76bb24e2007-10-27 13:38:00 -040010155uint32_t
10156lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10157{
10158 spin_lock_irq(&phba->hbalock);
10159 phba->buffer_tag_count++;
10160 /*
10161 * Always set the QUE_BUFTAG_BIT to distiguish between
10162 * a tag assigned by HBQ.
10163 */
10164 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10165 spin_unlock_irq(&phba->hbalock);
10166 return phba->buffer_tag_count;
10167}
10168
James Smarte59058c2008-08-24 21:49:00 -040010169/**
James Smart3621a712009-04-06 18:47:14 -040010170 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
James Smarte59058c2008-08-24 21:49:00 -040010171 * @phba: Pointer to HBA context object.
10172 * @pring: Pointer to driver SLI ring object.
10173 * @tag: Buffer tag.
10174 *
10175 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10176 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10177 * iocb is posted to the response ring with the tag of the buffer.
10178 * This function searches the pring->postbufq list using the tag
10179 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10180 * iocb. If the buffer is found then lpfc_dmabuf object of the
10181 * buffer is returned to the caller else NULL is returned.
10182 * This function is called with no lock held.
10183 **/
James Smart76bb24e2007-10-27 13:38:00 -040010184struct lpfc_dmabuf *
10185lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10186 uint32_t tag)
10187{
10188 struct lpfc_dmabuf *mp, *next_mp;
10189 struct list_head *slp = &pring->postbufq;
10190
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010191 /* Search postbufq, from the beginning, looking for a match on tag */
James Smart76bb24e2007-10-27 13:38:00 -040010192 spin_lock_irq(&phba->hbalock);
10193 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10194 if (mp->buffer_tag == tag) {
10195 list_del_init(&mp->list);
10196 pring->postbufq_cnt--;
10197 spin_unlock_irq(&phba->hbalock);
10198 return mp;
10199 }
10200 }
10201
10202 spin_unlock_irq(&phba->hbalock);
10203 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd7c255b2008-08-24 21:50:00 -040010204 "0402 Cannot find virtual addr for buffer tag on "
James Smart76bb24e2007-10-27 13:38:00 -040010205 "ring %d Data x%lx x%p x%p x%x\n",
10206 pring->ringno, (unsigned long) tag,
10207 slp->next, slp->prev, pring->postbufq_cnt);
10208
10209 return NULL;
10210}
dea31012005-04-17 16:05:31 -050010211
James Smarte59058c2008-08-24 21:49:00 -040010212/**
James Smart3621a712009-04-06 18:47:14 -040010213 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
James Smarte59058c2008-08-24 21:49:00 -040010214 * @phba: Pointer to HBA context object.
10215 * @pring: Pointer to driver SLI ring object.
10216 * @phys: DMA address of the buffer.
10217 *
10218 * This function searches the buffer list using the dma_address
10219 * of unsolicited event to find the driver's lpfc_dmabuf object
10220 * corresponding to the dma_address. The function returns the
10221 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10222 * This function is called by the ct and els unsolicited event
10223 * handlers to get the buffer associated with the unsolicited
10224 * event.
10225 *
10226 * This function is called with no lock held.
10227 **/
dea31012005-04-17 16:05:31 -050010228struct lpfc_dmabuf *
10229lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10230 dma_addr_t phys)
10231{
10232 struct lpfc_dmabuf *mp, *next_mp;
10233 struct list_head *slp = &pring->postbufq;
10234
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010235 /* Search postbufq, from the beginning, looking for a match on phys */
James Smart2e0fef82007-06-17 19:56:36 -050010236 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010237 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10238 if (mp->phys == phys) {
10239 list_del_init(&mp->list);
10240 pring->postbufq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -050010241 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010242 return mp;
10243 }
10244 }
10245
James Smart2e0fef82007-06-17 19:56:36 -050010246 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -050010247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -040010248 "0410 Cannot find virtual addr for mapped buf on "
dea31012005-04-17 16:05:31 -050010249 "ring %d Data x%llx x%p x%p x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040010250 pring->ringno, (unsigned long long)phys,
dea31012005-04-17 16:05:31 -050010251 slp->next, slp->prev, pring->postbufq_cnt);
10252 return NULL;
10253}
10254
James Smarte59058c2008-08-24 21:49:00 -040010255/**
James Smart3621a712009-04-06 18:47:14 -040010256 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
James Smarte59058c2008-08-24 21:49:00 -040010257 * @phba: Pointer to HBA context object.
10258 * @cmdiocb: Pointer to driver command iocb object.
10259 * @rspiocb: Pointer to driver response iocb object.
10260 *
10261 * This function is the completion handler for the abort iocbs for
10262 * ELS commands. This function is called from the ELS ring event
10263 * handler with no lock held. This function frees memory resources
10264 * associated with the abort iocb.
10265 **/
dea31012005-04-17 16:05:31 -050010266static void
James Smart2e0fef82007-06-17 19:56:36 -050010267lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10268 struct lpfc_iocbq *rspiocb)
dea31012005-04-17 16:05:31 -050010269{
James Smart2e0fef82007-06-17 19:56:36 -050010270 IOCB_t *irsp = &rspiocb->iocb;
James Smart2680eea2007-04-25 09:52:55 -040010271 uint16_t abort_iotag, abort_context;
James Smartff78d8f2011-12-13 13:21:35 -050010272 struct lpfc_iocbq *abort_iocb = NULL;
James Smart2680eea2007-04-25 09:52:55 -040010273
10274 if (irsp->ulpStatus) {
James Smartff78d8f2011-12-13 13:21:35 -050010275
10276 /*
10277 * Assume that the port already completed and returned, or
10278 * will return the iocb. Just Log the message.
10279 */
James Smart2680eea2007-04-25 09:52:55 -040010280 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10281 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10282
James Smart2e0fef82007-06-17 19:56:36 -050010283 spin_lock_irq(&phba->hbalock);
James Smart45ed1192009-10-02 15:17:02 -040010284 if (phba->sli_rev < LPFC_SLI_REV4) {
10285 if (abort_iotag != 0 &&
10286 abort_iotag <= phba->sli.last_iotag)
10287 abort_iocb =
10288 phba->sli.iocbq_lookup[abort_iotag];
10289 } else
10290 /* For sli4 the abort_tag is the XRI,
10291 * so the abort routine puts the iotag of the iocb
10292 * being aborted in the context field of the abort
10293 * IOCB.
10294 */
10295 abort_iocb = phba->sli.iocbq_lookup[abort_context];
James Smart2680eea2007-04-25 09:52:55 -040010296
James Smart2a9bf3d2010-06-07 15:24:45 -040010297 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10298 "0327 Cannot abort els iocb %p "
10299 "with tag %x context %x, abort status %x, "
10300 "abort code %x\n",
10301 abort_iocb, abort_iotag, abort_context,
10302 irsp->ulpStatus, irsp->un.ulpWord[4]);
James Smart2680eea2007-04-25 09:52:55 -040010303
James Smartff78d8f2011-12-13 13:21:35 -050010304 spin_unlock_irq(&phba->hbalock);
James Smart2680eea2007-04-25 09:52:55 -040010305 }
James Bottomley604a3e32005-10-29 10:28:33 -050010306 lpfc_sli_release_iocbq(phba, cmdiocb);
dea31012005-04-17 16:05:31 -050010307 return;
10308}
10309
James Smarte59058c2008-08-24 21:49:00 -040010310/**
James Smart3621a712009-04-06 18:47:14 -040010311 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
James Smarte59058c2008-08-24 21:49:00 -040010312 * @phba: Pointer to HBA context object.
10313 * @cmdiocb: Pointer to driver command iocb object.
10314 * @rspiocb: Pointer to driver response iocb object.
10315 *
10316 * The function is called from SLI ring event handler with no
10317 * lock held. This function is the completion handler for ELS commands
10318 * which are aborted. The function frees memory resources used for
10319 * the aborted ELS commands.
10320 **/
James Smart92d7f7b2007-06-17 19:56:38 -050010321static void
10322lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10323 struct lpfc_iocbq *rspiocb)
10324{
10325 IOCB_t *irsp = &rspiocb->iocb;
10326
10327 /* ELS cmd tag <ulpIoTag> completes */
10328 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smartd7c255b2008-08-24 21:50:00 -040010329 "0139 Ignoring ELS cmd tag x%x completion Data: "
James Smart92d7f7b2007-06-17 19:56:38 -050010330 "x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040010331 irsp->ulpIoTag, irsp->ulpStatus,
James Smart92d7f7b2007-06-17 19:56:38 -050010332 irsp->un.ulpWord[4], irsp->ulpTimeout);
James Smart858c9f62007-06-17 19:56:39 -050010333 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10334 lpfc_ct_free_iocb(phba, cmdiocb);
10335 else
10336 lpfc_els_free_iocb(phba, cmdiocb);
James Smart92d7f7b2007-06-17 19:56:38 -050010337 return;
10338}
10339
James Smarte59058c2008-08-24 21:49:00 -040010340/**
James Smart5af5eee2010-10-22 11:06:38 -040010341 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
James Smarte59058c2008-08-24 21:49:00 -040010342 * @phba: Pointer to HBA context object.
10343 * @pring: Pointer to driver SLI ring object.
10344 * @cmdiocb: Pointer to driver command iocb object.
10345 *
James Smart5af5eee2010-10-22 11:06:38 -040010346 * This function issues an abort iocb for the provided command iocb down to
10347 * the port. Other than the case the outstanding command iocb is an abort
10348 * request, this function issues abort out unconditionally. This function is
10349 * called with hbalock held. The function returns 0 when it fails due to
10350 * memory allocation failure or when the command iocb is an abort request.
James Smarte59058c2008-08-24 21:49:00 -040010351 **/
James Smart5af5eee2010-10-22 11:06:38 -040010352static int
10353lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -050010354 struct lpfc_iocbq *cmdiocb)
dea31012005-04-17 16:05:31 -050010355{
James Smart2e0fef82007-06-17 19:56:36 -050010356 struct lpfc_vport *vport = cmdiocb->vport;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010357 struct lpfc_iocbq *abtsiocbp;
dea31012005-04-17 16:05:31 -050010358 IOCB_t *icmd = NULL;
10359 IOCB_t *iabt = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040010360 int retval;
James Smart7e56aa22012-08-03 12:35:34 -040010361 unsigned long iflags;
James Smart07951072007-04-25 09:51:38 -040010362
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010010363 lockdep_assert_held(&phba->hbalock);
10364
James Smart92d7f7b2007-06-17 19:56:38 -050010365 /*
10366 * There are certain command types we don't want to abort. And we
10367 * don't want to abort commands that are already in the process of
10368 * being aborted.
James Smart07951072007-04-25 09:51:38 -040010369 */
10370 icmd = &cmdiocb->iocb;
James Smart2e0fef82007-06-17 19:56:36 -050010371 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
James Smart92d7f7b2007-06-17 19:56:38 -050010372 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10373 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
James Smart07951072007-04-25 09:51:38 -040010374 return 0;
10375
dea31012005-04-17 16:05:31 -050010376 /* issue ABTS for this IOCB based on iotag */
James Smart92d7f7b2007-06-17 19:56:38 -050010377 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050010378 if (abtsiocbp == NULL)
10379 return 0;
dea31012005-04-17 16:05:31 -050010380
James Smart07951072007-04-25 09:51:38 -040010381 /* This signals the response to set the correct status
James Smart341af102010-01-26 23:07:37 -050010382 * before calling the completion handler
James Smart07951072007-04-25 09:51:38 -040010383 */
10384 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10385
dea31012005-04-17 16:05:31 -050010386 iabt = &abtsiocbp->iocb;
James Smart07951072007-04-25 09:51:38 -040010387 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10388 iabt->un.acxri.abortContextTag = icmd->ulpContext;
James Smart45ed1192009-10-02 15:17:02 -040010389 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -040010390 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
James Smart45ed1192009-10-02 15:17:02 -040010391 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10392 }
James Smartda0436e2009-05-22 14:51:39 -040010393 else
10394 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
dea31012005-04-17 16:05:31 -050010395 iabt->ulpLe = 1;
James Smart07951072007-04-25 09:51:38 -040010396 iabt->ulpClass = icmd->ulpClass;
dea31012005-04-17 16:05:31 -050010397
James Smart5ffc2662009-11-18 15:39:44 -050010398 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080010399 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050010400 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10401 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040010402 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10403 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050010404
James Smart2e0fef82007-06-17 19:56:36 -050010405 if (phba->link_state >= LPFC_LINK_UP)
James Smart07951072007-04-25 09:51:38 -040010406 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10407 else
10408 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10409
10410 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
James Smarte6c6acc2016-12-19 15:07:23 -080010411 abtsiocbp->vport = vport;
James Smart5b8bd0c2007-04-25 09:52:49 -040010412
James Smarte8b62012007-08-02 11:10:09 -040010413 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10414 "0339 Abort xri x%x, original iotag x%x, "
10415 "abort cmd iotag x%x\n",
James Smart2a9bf3d2010-06-07 15:24:45 -040010416 iabt->un.acxri.abortIoTag,
James Smarte8b62012007-08-02 11:10:09 -040010417 iabt->un.acxri.abortContextTag,
James Smart2a9bf3d2010-06-07 15:24:45 -040010418 abtsiocbp->iotag);
James Smart7e56aa22012-08-03 12:35:34 -040010419
10420 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080010421 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10422 if (unlikely(pring == NULL))
James Smart9bd2bff52014-09-03 12:57:30 -040010423 return 0;
James Smart7e56aa22012-08-03 12:35:34 -040010424 /* Note: both hbalock and ring_lock need to be set here */
10425 spin_lock_irqsave(&pring->ring_lock, iflags);
10426 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10427 abtsiocbp, 0);
10428 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10429 } else {
10430 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10431 abtsiocbp, 0);
10432 }
James Smart07951072007-04-25 09:51:38 -040010433
James Smartd7c255b2008-08-24 21:50:00 -040010434 if (retval)
10435 __lpfc_sli_release_iocbq(phba, abtsiocbp);
James Smart5af5eee2010-10-22 11:06:38 -040010436
10437 /*
10438 * Caller to this routine should check for IOCB_ERROR
10439 * and handle it properly. This routine no longer removes
10440 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10441 */
10442 return retval;
10443}
10444
10445/**
10446 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10447 * @phba: Pointer to HBA context object.
10448 * @pring: Pointer to driver SLI ring object.
10449 * @cmdiocb: Pointer to driver command iocb object.
10450 *
10451 * This function issues an abort iocb for the provided command iocb. In case
10452 * of unloading, the abort iocb will not be issued to commands on the ELS
10453 * ring. Instead, the callback function shall be changed to those commands
10454 * so that nothing happens when them finishes. This function is called with
10455 * hbalock held. The function returns 0 when the command iocb is an abort
10456 * request.
10457 **/
10458int
10459lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10460 struct lpfc_iocbq *cmdiocb)
10461{
10462 struct lpfc_vport *vport = cmdiocb->vport;
10463 int retval = IOCB_ERROR;
10464 IOCB_t *icmd = NULL;
10465
Johannes Thumshirn1c2ba472016-01-20 16:22:22 +010010466 lockdep_assert_held(&phba->hbalock);
10467
James Smart5af5eee2010-10-22 11:06:38 -040010468 /*
10469 * There are certain command types we don't want to abort. And we
10470 * don't want to abort commands that are already in the process of
10471 * being aborted.
10472 */
10473 icmd = &cmdiocb->iocb;
10474 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10475 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10476 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10477 return 0;
10478
10479 /*
10480 * If we're unloading, don't abort iocb on the ELS ring, but change
10481 * the callback so that nothing happens when it finishes.
10482 */
10483 if ((vport->load_flag & FC_UNLOADING) &&
10484 (pring->ringno == LPFC_ELS_RING)) {
10485 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10486 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10487 else
10488 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10489 goto abort_iotag_exit;
10490 }
10491
10492 /* Now, we try to issue the abort to the cmdiocb out */
10493 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10494
James Smart07951072007-04-25 09:51:38 -040010495abort_iotag_exit:
James Smart2e0fef82007-06-17 19:56:36 -050010496 /*
10497 * Caller to this routine should check for IOCB_ERROR
10498 * and handle it properly. This routine no longer removes
10499 * iocb off txcmplq and call compl in case of IOCB_ERROR.
James Smart07951072007-04-25 09:51:38 -040010500 */
James Smart2e0fef82007-06-17 19:56:36 -050010501 return retval;
dea31012005-04-17 16:05:31 -050010502}
10503
James Smarte59058c2008-08-24 21:49:00 -040010504/**
James Smart895427b2017-02-12 13:52:30 -080010505 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10506 * @phba: Pointer to HBA context object.
10507 * @pring: Pointer to driver SLI ring object.
10508 * @cmdiocb: Pointer to driver command iocb object.
10509 *
10510 * This function issues an abort iocb for the provided command iocb down to
10511 * the port. Other than the case the outstanding command iocb is an abort
10512 * request, this function issues abort out unconditionally. This function is
10513 * called with hbalock held. The function returns 0 when it fails due to
10514 * memory allocation failure or when the command iocb is an abort request.
10515 **/
10516static int
10517lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10518 struct lpfc_iocbq *cmdiocb)
10519{
10520 struct lpfc_vport *vport = cmdiocb->vport;
10521 struct lpfc_iocbq *abtsiocbp;
10522 union lpfc_wqe *abts_wqe;
10523 int retval;
10524
10525 /*
10526 * There are certain command types we don't want to abort. And we
10527 * don't want to abort commands that are already in the process of
10528 * being aborted.
10529 */
10530 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10531 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10532 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10533 return 0;
10534
10535 /* issue ABTS for this io based on iotag */
10536 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10537 if (abtsiocbp == NULL)
10538 return 0;
10539
10540 /* This signals the response to set the correct status
10541 * before calling the completion handler
10542 */
10543 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10544
10545 /* Complete prepping the abort wqe and issue to the FW. */
10546 abts_wqe = &abtsiocbp->wqe;
10547 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10548 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10549
10550 /* Explicitly set reserved fields to zero.*/
10551 abts_wqe->abort_cmd.rsrvd4 = 0;
10552 abts_wqe->abort_cmd.rsrvd5 = 0;
10553
10554 /* WQE Common - word 6. Context is XRI tag. Set 0. */
10555 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10556 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10557
10558 /* word 7 */
10559 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10560 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10561 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10562 cmdiocb->iocb.ulpClass);
10563
10564 /* word 8 - tell the FW to abort the IO associated with this
10565 * outstanding exchange ID.
10566 */
10567 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10568
10569 /* word 9 - this is the iotag for the abts_wqe completion. */
10570 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10571 abtsiocbp->iotag);
10572
10573 /* word 10 */
10574 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10575 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10576 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10577
10578 /* word 11 */
10579 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10580 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10581 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10582
10583 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10584 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10585 abtsiocbp->vport = vport;
James Smart01649562017-02-12 13:52:32 -080010586 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
James Smart895427b2017-02-12 13:52:30 -080010587 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10588 if (retval == IOCB_ERROR) {
10589 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10590 "6147 Failed abts issue_wqe with status x%x "
10591 "for oxid x%x\n",
10592 retval, cmdiocb->sli4_xritag);
10593 lpfc_sli_release_iocbq(phba, abtsiocbp);
10594 return retval;
10595 }
10596
10597 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10598 "6148 Drv Abort NVME Request Issued for "
10599 "ox_id x%x on reqtag x%x\n",
10600 cmdiocb->sli4_xritag,
10601 abtsiocbp->iotag);
10602
10603 return retval;
10604}
10605
10606/**
James Smart5af5eee2010-10-22 11:06:38 -040010607 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10608 * @phba: pointer to lpfc HBA data structure.
10609 *
10610 * This routine will abort all pending and outstanding iocbs to an HBA.
10611 **/
10612void
10613lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10614{
10615 struct lpfc_sli *psli = &phba->sli;
10616 struct lpfc_sli_ring *pring;
James Smart895427b2017-02-12 13:52:30 -080010617 struct lpfc_queue *qp = NULL;
James Smart5af5eee2010-10-22 11:06:38 -040010618 int i;
10619
James Smart895427b2017-02-12 13:52:30 -080010620 if (phba->sli_rev != LPFC_SLI_REV4) {
10621 for (i = 0; i < psli->num_rings; i++) {
10622 pring = &psli->sli3_ring[i];
10623 lpfc_sli_abort_iocb_ring(phba, pring);
10624 }
10625 return;
10626 }
10627 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10628 pring = qp->pring;
10629 if (!pring)
10630 continue;
James Smartdb55fba2014-04-04 13:52:02 -040010631 lpfc_sli_abort_iocb_ring(phba, pring);
James Smart5af5eee2010-10-22 11:06:38 -040010632 }
10633}
10634
10635/**
James Smart3621a712009-04-06 18:47:14 -040010636 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
James Smarte59058c2008-08-24 21:49:00 -040010637 * @iocbq: Pointer to driver iocb object.
10638 * @vport: Pointer to driver virtual port object.
10639 * @tgt_id: SCSI ID of the target.
10640 * @lun_id: LUN ID of the scsi device.
10641 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10642 *
James Smart3621a712009-04-06 18:47:14 -040010643 * This function acts as an iocb filter for functions which abort or count
James Smarte59058c2008-08-24 21:49:00 -040010644 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10645 * 0 if the filtering criteria is met for the given iocb and will return
10646 * 1 if the filtering criteria is not met.
10647 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10648 * given iocb is for the SCSI device specified by vport, tgt_id and
10649 * lun_id parameter.
10650 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
10651 * given iocb is for the SCSI target specified by vport and tgt_id
10652 * parameters.
10653 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10654 * given iocb is for the SCSI host associated with the given vport.
10655 * This function is called with no locks held.
10656 **/
dea31012005-04-17 16:05:31 -050010657static int
James Smart51ef4c22007-08-02 11:10:31 -040010658lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10659 uint16_t tgt_id, uint64_t lun_id,
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010660 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050010661{
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010662 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050010663 int rc = 1;
10664
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010665 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10666 return rc;
10667
James Smart51ef4c22007-08-02 11:10:31 -040010668 if (iocbq->vport != vport)
10669 return rc;
10670
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010671 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010672
James Smart495a7142008-06-14 22:52:59 -040010673 if (lpfc_cmd->pCmd == NULL)
dea31012005-04-17 16:05:31 -050010674 return rc;
10675
10676 switch (ctx_cmd) {
10677 case LPFC_CTX_LUN:
James Smart495a7142008-06-14 22:52:59 -040010678 if ((lpfc_cmd->rdata->pnode) &&
10679 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10680 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea31012005-04-17 16:05:31 -050010681 rc = 0;
10682 break;
10683 case LPFC_CTX_TGT:
James Smart495a7142008-06-14 22:52:59 -040010684 if ((lpfc_cmd->rdata->pnode) &&
10685 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea31012005-04-17 16:05:31 -050010686 rc = 0;
10687 break;
dea31012005-04-17 16:05:31 -050010688 case LPFC_CTX_HOST:
10689 rc = 0;
10690 break;
10691 default:
10692 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -070010693 __func__, ctx_cmd);
dea31012005-04-17 16:05:31 -050010694 break;
10695 }
10696
10697 return rc;
10698}
10699
James Smarte59058c2008-08-24 21:49:00 -040010700/**
James Smart3621a712009-04-06 18:47:14 -040010701 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
James Smarte59058c2008-08-24 21:49:00 -040010702 * @vport: Pointer to virtual port.
10703 * @tgt_id: SCSI ID of the target.
10704 * @lun_id: LUN ID of the scsi device.
10705 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10706 *
10707 * This function returns number of FCP commands pending for the vport.
10708 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10709 * commands pending on the vport associated with SCSI device specified
10710 * by tgt_id and lun_id parameters.
10711 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10712 * commands pending on the vport associated with SCSI target specified
10713 * by tgt_id parameter.
10714 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10715 * commands pending on the vport.
10716 * This function returns the number of iocbs which satisfy the filter.
10717 * This function is called without any lock held.
10718 **/
dea31012005-04-17 16:05:31 -050010719int
James Smart51ef4c22007-08-02 11:10:31 -040010720lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10721 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -050010722{
James Smart51ef4c22007-08-02 11:10:31 -040010723 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010724 struct lpfc_iocbq *iocbq;
10725 int sum, i;
dea31012005-04-17 16:05:31 -050010726
Johannes Thumshirn31979002016-07-18 16:06:03 +020010727 spin_lock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010728 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10729 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050010730
James Smart51ef4c22007-08-02 11:10:31 -040010731 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10732 ctx_cmd) == 0)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010733 sum++;
dea31012005-04-17 16:05:31 -050010734 }
Johannes Thumshirn31979002016-07-18 16:06:03 +020010735 spin_unlock_irq(&phba->hbalock);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010736
dea31012005-04-17 16:05:31 -050010737 return sum;
10738}
10739
James Smarte59058c2008-08-24 21:49:00 -040010740/**
James Smart3621a712009-04-06 18:47:14 -040010741 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
James Smarte59058c2008-08-24 21:49:00 -040010742 * @phba: Pointer to HBA context object
10743 * @cmdiocb: Pointer to command iocb object.
10744 * @rspiocb: Pointer to response iocb object.
10745 *
10746 * This function is called when an aborted FCP iocb completes. This
10747 * function is called by the ring event handler with no lock held.
10748 * This function frees the iocb.
10749 **/
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010750void
James Smart2e0fef82007-06-17 19:56:36 -050010751lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10752 struct lpfc_iocbq *rspiocb)
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010753{
James Smartcb69f7d2011-12-13 13:21:57 -050010754 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smart8e668af2013-05-31 17:04:28 -040010755 "3096 ABORT_XRI_CN completing on rpi x%x "
James Smartcb69f7d2011-12-13 13:21:57 -050010756 "original iotag x%x, abort cmd iotag x%x "
10757 "status 0x%x, reason 0x%x\n",
10758 cmdiocb->iocb.un.acxri.abortContextTag,
10759 cmdiocb->iocb.un.acxri.abortIoTag,
10760 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10761 rspiocb->iocb.un.ulpWord[4]);
James Bottomley604a3e32005-10-29 10:28:33 -050010762 lpfc_sli_release_iocbq(phba, cmdiocb);
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010763 return;
10764}
10765
James Smarte59058c2008-08-24 21:49:00 -040010766/**
James Smart3621a712009-04-06 18:47:14 -040010767 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
James Smarte59058c2008-08-24 21:49:00 -040010768 * @vport: Pointer to virtual port.
10769 * @pring: Pointer to driver SLI ring object.
10770 * @tgt_id: SCSI ID of the target.
10771 * @lun_id: LUN ID of the scsi device.
10772 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10773 *
10774 * This function sends an abort command for every SCSI command
10775 * associated with the given virtual port pending on the ring
10776 * filtered by lpfc_sli_validate_fcp_iocb function.
10777 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10778 * FCP iocbs associated with lun specified by tgt_id and lun_id
10779 * parameters
10780 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10781 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10782 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10783 * FCP iocbs associated with virtual port.
10784 * This function returns number of iocbs it failed to abort.
10785 * This function is called with no locks held.
10786 **/
dea31012005-04-17 16:05:31 -050010787int
James Smart51ef4c22007-08-02 11:10:31 -040010788lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10789 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea31012005-04-17 16:05:31 -050010790{
James Smart51ef4c22007-08-02 11:10:31 -040010791 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010792 struct lpfc_iocbq *iocbq;
10793 struct lpfc_iocbq *abtsiocb;
dea31012005-04-17 16:05:31 -050010794 IOCB_t *cmd = NULL;
dea31012005-04-17 16:05:31 -050010795 int errcnt = 0, ret_val = 0;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010796 int i;
dea31012005-04-17 16:05:31 -050010797
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010798 for (i = 1; i <= phba->sli.last_iotag; i++) {
10799 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -050010800
James Smart51ef4c22007-08-02 11:10:31 -040010801 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
James Smart2e0fef82007-06-17 19:56:36 -050010802 abort_cmd) != 0)
dea31012005-04-17 16:05:31 -050010803 continue;
10804
James Smartafbd8d82013-09-06 12:22:13 -040010805 /*
10806 * If the iocbq is already being aborted, don't take a second
10807 * action, but do count it.
10808 */
10809 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10810 continue;
10811
dea31012005-04-17 16:05:31 -050010812 /* issue ABTS for this IOCB based on iotag */
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010813 abtsiocb = lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -050010814 if (abtsiocb == NULL) {
10815 errcnt++;
10816 continue;
10817 }
dea31012005-04-17 16:05:31 -050010818
James Smartafbd8d82013-09-06 12:22:13 -040010819 /* indicate the IO is being aborted by the driver. */
10820 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10821
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -040010822 cmd = &iocbq->iocb;
dea31012005-04-17 16:05:31 -050010823 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10824 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
James Smartda0436e2009-05-22 14:51:39 -040010825 if (phba->sli_rev == LPFC_SLI_REV4)
10826 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10827 else
10828 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea31012005-04-17 16:05:31 -050010829 abtsiocb->iocb.ulpLe = 1;
10830 abtsiocb->iocb.ulpClass = cmd->ulpClass;
James Smartafbd8d82013-09-06 12:22:13 -040010831 abtsiocb->vport = vport;
dea31012005-04-17 16:05:31 -050010832
James Smart5ffc2662009-11-18 15:39:44 -050010833 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080010834 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
James Smart341af102010-01-26 23:07:37 -050010835 if (iocbq->iocb_flag & LPFC_IO_FCP)
10836 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040010837 if (iocbq->iocb_flag & LPFC_IO_FOF)
10838 abtsiocb->iocb_flag |= LPFC_IO_FOF;
James Smart5ffc2662009-11-18 15:39:44 -050010839
James Smart2e0fef82007-06-17 19:56:36 -050010840 if (lpfc_is_link_up(phba))
dea31012005-04-17 16:05:31 -050010841 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10842 else
10843 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10844
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -040010845 /* Setup callback routine and issue the command. */
10846 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
James Smartda0436e2009-05-22 14:51:39 -040010847 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10848 abtsiocb, 0);
dea31012005-04-17 16:05:31 -050010849 if (ret_val == IOCB_ERROR) {
James Bottomley604a3e32005-10-29 10:28:33 -050010850 lpfc_sli_release_iocbq(phba, abtsiocb);
dea31012005-04-17 16:05:31 -050010851 errcnt++;
10852 continue;
10853 }
10854 }
10855
10856 return errcnt;
10857}
10858
James Smarte59058c2008-08-24 21:49:00 -040010859/**
James Smart98912dda2014-04-04 13:52:31 -040010860 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10861 * @vport: Pointer to virtual port.
10862 * @pring: Pointer to driver SLI ring object.
10863 * @tgt_id: SCSI ID of the target.
10864 * @lun_id: LUN ID of the scsi device.
10865 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10866 *
10867 * This function sends an abort command for every SCSI command
10868 * associated with the given virtual port pending on the ring
10869 * filtered by lpfc_sli_validate_fcp_iocb function.
10870 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10871 * FCP iocbs associated with lun specified by tgt_id and lun_id
10872 * parameters
10873 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10874 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10875 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10876 * FCP iocbs associated with virtual port.
10877 * This function returns number of iocbs it aborted .
10878 * This function is called with no locks held right after a taskmgmt
10879 * command is sent.
10880 **/
10881int
10882lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10883 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10884{
10885 struct lpfc_hba *phba = vport->phba;
James Smart8c50d252014-09-03 12:58:16 -040010886 struct lpfc_scsi_buf *lpfc_cmd;
James Smart98912dda2014-04-04 13:52:31 -040010887 struct lpfc_iocbq *abtsiocbq;
James Smart8c50d252014-09-03 12:58:16 -040010888 struct lpfc_nodelist *ndlp;
James Smart98912dda2014-04-04 13:52:31 -040010889 struct lpfc_iocbq *iocbq;
10890 IOCB_t *icmd;
10891 int sum, i, ret_val;
10892 unsigned long iflags;
10893 struct lpfc_sli_ring *pring_s4;
James Smart98912dda2014-04-04 13:52:31 -040010894
10895 spin_lock_irq(&phba->hbalock);
10896
10897 /* all I/Os are in process of being flushed */
10898 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10899 spin_unlock_irq(&phba->hbalock);
10900 return 0;
10901 }
10902 sum = 0;
10903
10904 for (i = 1; i <= phba->sli.last_iotag; i++) {
10905 iocbq = phba->sli.iocbq_lookup[i];
10906
10907 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10908 cmd) != 0)
10909 continue;
10910
10911 /*
10912 * If the iocbq is already being aborted, don't take a second
10913 * action, but do count it.
10914 */
10915 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10916 continue;
10917
10918 /* issue ABTS for this IOCB based on iotag */
10919 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10920 if (abtsiocbq == NULL)
10921 continue;
10922
10923 icmd = &iocbq->iocb;
10924 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10925 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10926 if (phba->sli_rev == LPFC_SLI_REV4)
10927 abtsiocbq->iocb.un.acxri.abortIoTag =
10928 iocbq->sli4_xritag;
10929 else
10930 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10931 abtsiocbq->iocb.ulpLe = 1;
10932 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10933 abtsiocbq->vport = vport;
10934
10935 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
James Smart895427b2017-02-12 13:52:30 -080010936 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
James Smart98912dda2014-04-04 13:52:31 -040010937 if (iocbq->iocb_flag & LPFC_IO_FCP)
10938 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart9bd2bff52014-09-03 12:57:30 -040010939 if (iocbq->iocb_flag & LPFC_IO_FOF)
10940 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
James Smart98912dda2014-04-04 13:52:31 -040010941
James Smart8c50d252014-09-03 12:58:16 -040010942 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10943 ndlp = lpfc_cmd->rdata->pnode;
10944
10945 if (lpfc_is_link_up(phba) &&
10946 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
James Smart98912dda2014-04-04 13:52:31 -040010947 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10948 else
10949 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10950
10951 /* Setup callback routine and issue the command. */
10952 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10953
10954 /*
10955 * Indicate the IO is being aborted by the driver and set
10956 * the caller's flag into the aborted IO.
10957 */
10958 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10959
10960 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smart895427b2017-02-12 13:52:30 -080010961 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
10962 if (pring_s4 == NULL)
10963 continue;
James Smart98912dda2014-04-04 13:52:31 -040010964 /* Note: both hbalock and ring_lock must be set here */
10965 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10966 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10967 abtsiocbq, 0);
10968 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10969 } else {
10970 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10971 abtsiocbq, 0);
10972 }
10973
10974
10975 if (ret_val == IOCB_ERROR)
10976 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10977 else
10978 sum++;
10979 }
10980 spin_unlock_irq(&phba->hbalock);
10981 return sum;
10982}
10983
10984/**
James Smart3621a712009-04-06 18:47:14 -040010985 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
James Smarte59058c2008-08-24 21:49:00 -040010986 * @phba: Pointer to HBA context object.
10987 * @cmdiocbq: Pointer to command iocb.
10988 * @rspiocbq: Pointer to response iocb.
10989 *
10990 * This function is the completion handler for iocbs issued using
10991 * lpfc_sli_issue_iocb_wait function. This function is called by the
10992 * ring event handler function without any lock held. This function
10993 * can be called from both worker thread context and interrupt
10994 * context. This function also can be called from other thread which
10995 * cleans up the SLI layer objects.
10996 * This function copy the contents of the response iocb to the
10997 * response iocb memory object provided by the caller of
10998 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10999 * sleeps for the iocb completion.
11000 **/
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011001static void
11002lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11003 struct lpfc_iocbq *cmdiocbq,
11004 struct lpfc_iocbq *rspiocbq)
dea31012005-04-17 16:05:31 -050011005{
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011006 wait_queue_head_t *pdone_q;
11007 unsigned long iflags;
James Smart0f65ff62010-02-26 14:14:23 -050011008 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -050011009
James Smart2e0fef82007-06-17 19:56:36 -050011010 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart5a0916b2013-07-15 18:31:42 -040011011 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11012
11013 /*
11014 * A time out has occurred for the iocb. If a time out
11015 * completion handler has been supplied, call it. Otherwise,
11016 * just free the iocbq.
11017 */
11018
11019 spin_unlock_irqrestore(&phba->hbalock, iflags);
11020 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11021 cmdiocbq->wait_iocb_cmpl = NULL;
11022 if (cmdiocbq->iocb_cmpl)
11023 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11024 else
11025 lpfc_sli_release_iocbq(phba, cmdiocbq);
11026 return;
11027 }
11028
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011029 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11030 if (cmdiocbq->context2 && rspiocbq)
11031 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11032 &rspiocbq->iocb, sizeof(IOCB_t));
11033
James Smart0f65ff62010-02-26 14:14:23 -050011034 /* Set the exchange busy flag for task management commands */
11035 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11036 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11037 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11038 cur_iocbq);
11039 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11040 }
11041
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011042 pdone_q = cmdiocbq->context_un.wait_queue;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011043 if (pdone_q)
11044 wake_up(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -050011045 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea31012005-04-17 16:05:31 -050011046 return;
11047}
11048
James Smarte59058c2008-08-24 21:49:00 -040011049/**
James Smartd11e31d2009-06-10 17:23:06 -040011050 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11051 * @phba: Pointer to HBA context object..
11052 * @piocbq: Pointer to command iocb.
11053 * @flag: Flag to test.
11054 *
11055 * This routine grabs the hbalock and then test the iocb_flag to
11056 * see if the passed in flag is set.
11057 * Returns:
11058 * 1 if flag is set.
11059 * 0 if flag is not set.
11060 **/
11061static int
11062lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11063 struct lpfc_iocbq *piocbq, uint32_t flag)
11064{
11065 unsigned long iflags;
11066 int ret;
11067
11068 spin_lock_irqsave(&phba->hbalock, iflags);
11069 ret = piocbq->iocb_flag & flag;
11070 spin_unlock_irqrestore(&phba->hbalock, iflags);
11071 return ret;
11072
11073}
11074
11075/**
James Smart3621a712009-04-06 18:47:14 -040011076 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
James Smarte59058c2008-08-24 21:49:00 -040011077 * @phba: Pointer to HBA context object..
11078 * @pring: Pointer to sli ring.
11079 * @piocb: Pointer to command iocb.
11080 * @prspiocbq: Pointer to response iocb.
11081 * @timeout: Timeout in number of seconds.
11082 *
11083 * This function issues the iocb to firmware and waits for the
James Smart5a0916b2013-07-15 18:31:42 -040011084 * iocb to complete. The iocb_cmpl field of the shall be used
11085 * to handle iocbs which time out. If the field is NULL, the
11086 * function shall free the iocbq structure. If more clean up is
11087 * needed, the caller is expected to provide a completion function
11088 * that will provide the needed clean up. If the iocb command is
11089 * not completed within timeout seconds, the function will either
11090 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11091 * completion function set in the iocb_cmpl field and then return
11092 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11093 * resources if this function returns IOCB_TIMEDOUT.
James Smarte59058c2008-08-24 21:49:00 -040011094 * The function waits for the iocb completion using an
11095 * non-interruptible wait.
11096 * This function will sleep while waiting for iocb completion.
11097 * So, this function should not be called from any context which
11098 * does not allow sleeping. Due to the same reason, this function
11099 * cannot be called with interrupt disabled.
11100 * This function assumes that the iocb completions occur while
11101 * this function sleep. So, this function cannot be called from
11102 * the thread which process iocb completion for this ring.
11103 * This function clears the iocb_flag of the iocb object before
11104 * issuing the iocb and the iocb completion handler sets this
11105 * flag and wakes this thread when the iocb completes.
11106 * The contents of the response iocb will be copied to prspiocbq
11107 * by the completion handler when the command completes.
11108 * This function returns IOCB_SUCCESS when success.
11109 * This function is called with no lock held.
11110 **/
dea31012005-04-17 16:05:31 -050011111int
James Smart2e0fef82007-06-17 19:56:36 -050011112lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
James Smartda0436e2009-05-22 14:51:39 -040011113 uint32_t ring_number,
James Smart2e0fef82007-06-17 19:56:36 -050011114 struct lpfc_iocbq *piocb,
11115 struct lpfc_iocbq *prspiocbq,
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011116 uint32_t timeout)
dea31012005-04-17 16:05:31 -050011117{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080011118 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011119 long timeleft, timeout_req = 0;
11120 int retval = IOCB_SUCCESS;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011121 uint32_t creg_val;
James Smart0e9bb8d2013-03-01 16:35:12 -050011122 struct lpfc_iocbq *iocb;
11123 int txq_cnt = 0;
11124 int txcmplq_cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080011125 struct lpfc_sli_ring *pring;
James Smart5a0916b2013-07-15 18:31:42 -040011126 unsigned long iflags;
11127 bool iocb_completed = true;
11128
James Smart895427b2017-02-12 13:52:30 -080011129 if (phba->sli_rev >= LPFC_SLI_REV4)
11130 pring = lpfc_sli4_calc_ring(phba, piocb);
11131 else
11132 pring = &phba->sli.sli3_ring[ring_number];
dea31012005-04-17 16:05:31 -050011133 /*
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011134 * If the caller has provided a response iocbq buffer, then context2
11135 * is NULL or its an error.
dea31012005-04-17 16:05:31 -050011136 */
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011137 if (prspiocbq) {
11138 if (piocb->context2)
11139 return IOCB_ERROR;
11140 piocb->context2 = prspiocbq;
dea31012005-04-17 16:05:31 -050011141 }
11142
James Smart5a0916b2013-07-15 18:31:42 -040011143 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011144 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11145 piocb->context_un.wait_queue = &done_q;
James Smart5a0916b2013-07-15 18:31:42 -040011146 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
dea31012005-04-17 16:05:31 -050011147
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011148 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011149 if (lpfc_readl(phba->HCregaddr, &creg_val))
11150 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011151 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11152 writel(creg_val, phba->HCregaddr);
11153 readl(phba->HCregaddr); /* flush */
11154 }
11155
James Smart2a9bf3d2010-06-07 15:24:45 -040011156 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11157 SLI_IOCB_RET_IOCB);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011158 if (retval == IOCB_SUCCESS) {
James Smart256ec0d2013-04-17 20:14:58 -040011159 timeout_req = msecs_to_jiffies(timeout * 1000);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011160 timeleft = wait_event_timeout(done_q,
James Smartd11e31d2009-06-10 17:23:06 -040011161 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011162 timeout_req);
James Smart5a0916b2013-07-15 18:31:42 -040011163 spin_lock_irqsave(&phba->hbalock, iflags);
11164 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
dea31012005-04-17 16:05:31 -050011165
James Smart5a0916b2013-07-15 18:31:42 -040011166 /*
11167 * IOCB timed out. Inform the wake iocb wait
11168 * completion function and set local status
11169 */
11170
11171 iocb_completed = false;
11172 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11173 }
11174 spin_unlock_irqrestore(&phba->hbalock, iflags);
11175 if (iocb_completed) {
James Smart7054a602007-04-25 09:52:34 -040011176 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011177 "0331 IOCB wake signaled\n");
James Smart53151bb2013-10-10 12:24:07 -040011178 /* Note: we are not indicating if the IOCB has a success
11179 * status or not - that's for the caller to check.
11180 * IOCB_SUCCESS means just that the command was sent and
11181 * completed. Not that it completed successfully.
11182 * */
James Smart7054a602007-04-25 09:52:34 -040011183 } else if (timeleft == 0) {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011185 "0338 IOCB wait timeout error - no "
11186 "wake response Data x%x\n", timeout);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011187 retval = IOCB_TIMEDOUT;
James Smart7054a602007-04-25 09:52:34 -040011188 } else {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011189 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011190 "0330 IOCB wake NOT set, "
11191 "Data x%x x%lx\n",
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011192 timeout, (timeleft / jiffies));
11193 retval = IOCB_TIMEDOUT;
dea31012005-04-17 16:05:31 -050011194 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011195 } else if (retval == IOCB_BUSY) {
James Smart0e9bb8d2013-03-01 16:35:12 -050011196 if (phba->cfg_log_verbose & LOG_SLI) {
11197 list_for_each_entry(iocb, &pring->txq, list) {
11198 txq_cnt++;
11199 }
11200 list_for_each_entry(iocb, &pring->txcmplq, list) {
11201 txcmplq_cnt++;
11202 }
11203 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11204 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11205 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11206 }
James Smart2a9bf3d2010-06-07 15:24:45 -040011207 return retval;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011208 } else {
11209 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040011210 "0332 IOCB wait issue failed, Data x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011211 retval);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011212 retval = IOCB_ERROR;
dea31012005-04-17 16:05:31 -050011213 }
11214
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011215 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -050011216 if (lpfc_readl(phba->HCregaddr, &creg_val))
11217 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -050011218 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11219 writel(creg_val, phba->HCregaddr);
11220 readl(phba->HCregaddr); /* flush */
11221 }
11222
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011223 if (prspiocbq)
11224 piocb->context2 = NULL;
11225
11226 piocb->context_un.wait_queue = NULL;
11227 piocb->iocb_cmpl = NULL;
dea31012005-04-17 16:05:31 -050011228 return retval;
11229}
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -040011230
James Smarte59058c2008-08-24 21:49:00 -040011231/**
James Smart3621a712009-04-06 18:47:14 -040011232 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
James Smarte59058c2008-08-24 21:49:00 -040011233 * @phba: Pointer to HBA context object.
11234 * @pmboxq: Pointer to driver mailbox object.
11235 * @timeout: Timeout in number of seconds.
11236 *
11237 * This function issues the mailbox to firmware and waits for the
11238 * mailbox command to complete. If the mailbox command is not
11239 * completed within timeout seconds, it returns MBX_TIMEOUT.
11240 * The function waits for the mailbox completion using an
11241 * interruptible wait. If the thread is woken up due to a
11242 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11243 * should not free the mailbox resources, if this function returns
11244 * MBX_TIMEOUT.
11245 * This function will sleep while waiting for mailbox completion.
11246 * So, this function should not be called from any context which
11247 * does not allow sleeping. Due to the same reason, this function
11248 * cannot be called with interrupt disabled.
11249 * This function assumes that the mailbox completion occurs while
11250 * this function sleep. So, this function cannot be called from
11251 * the worker thread which processes mailbox completion.
11252 * This function is called in the context of HBA management
11253 * applications.
11254 * This function returns MBX_SUCCESS when successful.
11255 * This function is called with no lock held.
11256 **/
dea31012005-04-17 16:05:31 -050011257int
James Smart2e0fef82007-06-17 19:56:36 -050011258lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea31012005-04-17 16:05:31 -050011259 uint32_t timeout)
11260{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080011261 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James Smartb230b8a2013-05-31 17:05:27 -040011262 MAILBOX_t *mb = NULL;
dea31012005-04-17 16:05:31 -050011263 int retval;
James Smart858c9f62007-06-17 19:56:39 -050011264 unsigned long flag;
dea31012005-04-17 16:05:31 -050011265
James Smartb230b8a2013-05-31 17:05:27 -040011266 /* The caller might set context1 for extended buffer */
James Smart98c9ea52007-10-27 13:37:33 -040011267 if (pmboxq->context1)
James Smartb230b8a2013-05-31 17:05:27 -040011268 mb = (MAILBOX_t *)pmboxq->context1;
dea31012005-04-17 16:05:31 -050011269
James Smart495a7142008-06-14 22:52:59 -040011270 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea31012005-04-17 16:05:31 -050011271 /* setup wake call as IOCB callback */
11272 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11273 /* setup context field to pass wait_queue pointer to wake function */
11274 pmboxq->context1 = &done_q;
11275
dea31012005-04-17 16:05:31 -050011276 /* now issue the command */
11277 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -050011278 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
James Smart7054a602007-04-25 09:52:34 -040011279 wait_event_interruptible_timeout(done_q,
11280 pmboxq->mbox_flag & LPFC_MBX_WAKE,
James Smart256ec0d2013-04-17 20:14:58 -040011281 msecs_to_jiffies(timeout * 1000));
James Smart7054a602007-04-25 09:52:34 -040011282
James Smart858c9f62007-06-17 19:56:39 -050011283 spin_lock_irqsave(&phba->hbalock, flag);
James Smartb230b8a2013-05-31 17:05:27 -040011284 /* restore the possible extended buffer for free resource */
11285 pmboxq->context1 = (uint8_t *)mb;
James Smart7054a602007-04-25 09:52:34 -040011286 /*
11287 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11288 * else do not free the resources.
11289 */
James Smartd7c47992010-06-08 18:31:54 -040011290 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea31012005-04-17 16:05:31 -050011291 retval = MBX_SUCCESS;
James Smartd7c47992010-06-08 18:31:54 -040011292 } else {
James Smart7054a602007-04-25 09:52:34 -040011293 retval = MBX_TIMEOUT;
James Smart858c9f62007-06-17 19:56:39 -050011294 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11295 }
11296 spin_unlock_irqrestore(&phba->hbalock, flag);
James Smartb230b8a2013-05-31 17:05:27 -040011297 } else {
11298 /* restore the possible extended buffer for free resource */
11299 pmboxq->context1 = (uint8_t *)mb;
dea31012005-04-17 16:05:31 -050011300 }
11301
dea31012005-04-17 16:05:31 -050011302 return retval;
11303}
11304
James Smarte59058c2008-08-24 21:49:00 -040011305/**
James Smart3772a992009-05-22 14:50:54 -040011306 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
James Smarte59058c2008-08-24 21:49:00 -040011307 * @phba: Pointer to HBA context.
11308 *
James Smart3772a992009-05-22 14:50:54 -040011309 * This function is called to shutdown the driver's mailbox sub-system.
11310 * It first marks the mailbox sub-system is in a block state to prevent
11311 * the asynchronous mailbox command from issued off the pending mailbox
11312 * command queue. If the mailbox command sub-system shutdown is due to
11313 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11314 * the mailbox sub-system flush routine to forcefully bring down the
11315 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11316 * as with offline or HBA function reset), this routine will wait for the
11317 * outstanding mailbox command to complete before invoking the mailbox
11318 * sub-system flush routine to gracefully bring down mailbox sub-system.
James Smarte59058c2008-08-24 21:49:00 -040011319 **/
James Smart3772a992009-05-22 14:50:54 -040011320void
James Smart618a5232012-06-12 13:54:36 -040011321lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
James Smartb4c02652006-07-06 15:50:43 -040011322{
James Smart3772a992009-05-22 14:50:54 -040011323 struct lpfc_sli *psli = &phba->sli;
James Smart3772a992009-05-22 14:50:54 -040011324 unsigned long timeout;
11325
James Smart618a5232012-06-12 13:54:36 -040011326 if (mbx_action == LPFC_MBX_NO_WAIT) {
11327 /* delay 100ms for port state */
11328 msleep(100);
11329 lpfc_sli_mbox_sys_flush(phba);
11330 return;
11331 }
James Smarta183a152011-10-10 21:32:43 -040011332 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
James Smartd7069f02012-03-01 22:36:29 -050011333
James Smart3772a992009-05-22 14:50:54 -040011334 spin_lock_irq(&phba->hbalock);
11335 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smart3772a992009-05-22 14:50:54 -040011336
11337 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart3772a992009-05-22 14:50:54 -040011338 /* Determine how long we might wait for the active mailbox
11339 * command to be gracefully completed by firmware.
11340 */
James Smarta183a152011-10-10 21:32:43 -040011341 if (phba->sli.mbox_active)
11342 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11343 phba->sli.mbox_active) *
11344 1000) + jiffies;
11345 spin_unlock_irq(&phba->hbalock);
11346
James Smart3772a992009-05-22 14:50:54 -040011347 while (phba->sli.mbox_active) {
11348 /* Check active mailbox complete status every 2ms */
11349 msleep(2);
11350 if (time_after(jiffies, timeout))
11351 /* Timeout, let the mailbox flush routine to
11352 * forcefully release active mailbox command
11353 */
11354 break;
11355 }
James Smartd7069f02012-03-01 22:36:29 -050011356 } else
11357 spin_unlock_irq(&phba->hbalock);
11358
James Smart3772a992009-05-22 14:50:54 -040011359 lpfc_sli_mbox_sys_flush(phba);
11360}
11361
11362/**
11363 * lpfc_sli_eratt_read - read sli-3 error attention events
11364 * @phba: Pointer to HBA context.
11365 *
11366 * This function is called to read the SLI3 device error attention registers
11367 * for possible error attention events. The caller must hold the hostlock
11368 * with spin_lock_irq().
11369 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011370 * This function returns 1 when there is Error Attention in the Host Attention
James Smart3772a992009-05-22 14:50:54 -040011371 * Register and returns 0 otherwise.
11372 **/
11373static int
11374lpfc_sli_eratt_read(struct lpfc_hba *phba)
11375{
James Smarted957682007-06-17 19:56:37 -050011376 uint32_t ha_copy;
James Smartb4c02652006-07-06 15:50:43 -040011377
James Smart3772a992009-05-22 14:50:54 -040011378 /* Read chip Host Attention (HA) register */
James Smart9940b972011-03-11 16:06:12 -050011379 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11380 goto unplug_err;
11381
James Smart3772a992009-05-22 14:50:54 -040011382 if (ha_copy & HA_ERATT) {
11383 /* Read host status register to retrieve error event */
James Smart9940b972011-03-11 16:06:12 -050011384 if (lpfc_sli_read_hs(phba))
11385 goto unplug_err;
James Smartb4c02652006-07-06 15:50:43 -040011386
James Smart3772a992009-05-22 14:50:54 -040011387 /* Check if there is a deferred error condition is active */
11388 if ((HS_FFER1 & phba->work_hs) &&
11389 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040011390 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
James Smart3772a992009-05-22 14:50:54 -040011391 phba->hba_flag |= DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -040011392 /* Clear all interrupt enable conditions */
11393 writel(0, phba->HCregaddr);
11394 readl(phba->HCregaddr);
11395 }
11396
11397 /* Set the driver HA work bitmap */
James Smart3772a992009-05-22 14:50:54 -040011398 phba->work_ha |= HA_ERATT;
11399 /* Indicate polling handles this ERATT */
11400 phba->hba_flag |= HBA_ERATT_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040011401 return 1;
James Smartb4c02652006-07-06 15:50:43 -040011402 }
James Smart3772a992009-05-22 14:50:54 -040011403 return 0;
James Smart9940b972011-03-11 16:06:12 -050011404
11405unplug_err:
11406 /* Set the driver HS work bitmap */
11407 phba->work_hs |= UNPLUG_ERR;
11408 /* Set the driver HA work bitmap */
11409 phba->work_ha |= HA_ERATT;
11410 /* Indicate polling handles this ERATT */
11411 phba->hba_flag |= HBA_ERATT_HANDLED;
11412 return 1;
James Smartb4c02652006-07-06 15:50:43 -040011413}
11414
James Smarte59058c2008-08-24 21:49:00 -040011415/**
James Smartda0436e2009-05-22 14:51:39 -040011416 * lpfc_sli4_eratt_read - read sli-4 error attention events
11417 * @phba: Pointer to HBA context.
11418 *
11419 * This function is called to read the SLI4 device error attention registers
11420 * for possible error attention events. The caller must hold the hostlock
11421 * with spin_lock_irq().
11422 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011423 * This function returns 1 when there is Error Attention in the Host Attention
James Smartda0436e2009-05-22 14:51:39 -040011424 * Register and returns 0 otherwise.
11425 **/
11426static int
11427lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11428{
11429 uint32_t uerr_sta_hi, uerr_sta_lo;
James Smart2fcee4b2010-12-15 17:57:46 -050011430 uint32_t if_type, portsmphr;
11431 struct lpfc_register portstat_reg;
James Smartda0436e2009-05-22 14:51:39 -040011432
James Smart2fcee4b2010-12-15 17:57:46 -050011433 /*
11434 * For now, use the SLI4 device internal unrecoverable error
James Smartda0436e2009-05-22 14:51:39 -040011435 * registers for error attention. This can be changed later.
11436 */
James Smart2fcee4b2010-12-15 17:57:46 -050011437 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11438 switch (if_type) {
11439 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart9940b972011-03-11 16:06:12 -050011440 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11441 &uerr_sta_lo) ||
11442 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11443 &uerr_sta_hi)) {
11444 phba->work_hs |= UNPLUG_ERR;
11445 phba->work_ha |= HA_ERATT;
11446 phba->hba_flag |= HBA_ERATT_HANDLED;
11447 return 1;
11448 }
James Smart2fcee4b2010-12-15 17:57:46 -050011449 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11450 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11452 "1423 HBA Unrecoverable error: "
11453 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11454 "ue_mask_lo_reg=0x%x, "
11455 "ue_mask_hi_reg=0x%x\n",
11456 uerr_sta_lo, uerr_sta_hi,
11457 phba->sli4_hba.ue_mask_lo,
11458 phba->sli4_hba.ue_mask_hi);
11459 phba->work_status[0] = uerr_sta_lo;
11460 phba->work_status[1] = uerr_sta_hi;
11461 phba->work_ha |= HA_ERATT;
11462 phba->hba_flag |= HBA_ERATT_HANDLED;
11463 return 1;
11464 }
11465 break;
11466 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart9940b972011-03-11 16:06:12 -050011467 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11468 &portstat_reg.word0) ||
11469 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11470 &portsmphr)){
11471 phba->work_hs |= UNPLUG_ERR;
11472 phba->work_ha |= HA_ERATT;
11473 phba->hba_flag |= HBA_ERATT_HANDLED;
11474 return 1;
11475 }
James Smart2fcee4b2010-12-15 17:57:46 -050011476 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11477 phba->work_status[0] =
11478 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11479 phba->work_status[1] =
11480 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2e90f4b2011-12-13 13:22:37 -050011482 "2885 Port Status Event: "
James Smart2fcee4b2010-12-15 17:57:46 -050011483 "port status reg 0x%x, "
11484 "port smphr reg 0x%x, "
11485 "error 1=0x%x, error 2=0x%x\n",
11486 portstat_reg.word0,
11487 portsmphr,
11488 phba->work_status[0],
11489 phba->work_status[1]);
11490 phba->work_ha |= HA_ERATT;
11491 phba->hba_flag |= HBA_ERATT_HANDLED;
11492 return 1;
11493 }
11494 break;
11495 case LPFC_SLI_INTF_IF_TYPE_1:
11496 default:
James Smarta747c9c2009-11-18 15:41:10 -050011497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2fcee4b2010-12-15 17:57:46 -050011498 "2886 HBA Error Attention on unsupported "
11499 "if type %d.", if_type);
James Smarta747c9c2009-11-18 15:41:10 -050011500 return 1;
James Smartda0436e2009-05-22 14:51:39 -040011501 }
James Smart2fcee4b2010-12-15 17:57:46 -050011502
James Smartda0436e2009-05-22 14:51:39 -040011503 return 0;
11504}
11505
11506/**
James Smart3621a712009-04-06 18:47:14 -040011507 * lpfc_sli_check_eratt - check error attention events
James Smart93996272008-08-24 21:50:30 -040011508 * @phba: Pointer to HBA context.
11509 *
James Smart3772a992009-05-22 14:50:54 -040011510 * This function is called from timer soft interrupt context to check HBA's
James Smart93996272008-08-24 21:50:30 -040011511 * error attention register bit for error attention events.
11512 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011513 * This function returns 1 when there is Error Attention in the Host Attention
James Smart93996272008-08-24 21:50:30 -040011514 * Register and returns 0 otherwise.
11515 **/
11516int
11517lpfc_sli_check_eratt(struct lpfc_hba *phba)
11518{
11519 uint32_t ha_copy;
11520
11521 /* If somebody is waiting to handle an eratt, don't process it
11522 * here. The brdkill function will do this.
11523 */
11524 if (phba->link_flag & LS_IGNORE_ERATT)
11525 return 0;
11526
11527 /* Check if interrupt handler handles this ERATT */
11528 spin_lock_irq(&phba->hbalock);
11529 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11530 /* Interrupt handler has handled ERATT */
11531 spin_unlock_irq(&phba->hbalock);
11532 return 0;
11533 }
11534
James Smarta257bf92009-04-06 18:48:10 -040011535 /*
11536 * If there is deferred error attention, do not check for error
11537 * attention
11538 */
11539 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11540 spin_unlock_irq(&phba->hbalock);
11541 return 0;
11542 }
11543
James Smart3772a992009-05-22 14:50:54 -040011544 /* If PCI channel is offline, don't process it */
11545 if (unlikely(pci_channel_offline(phba->pcidev))) {
James Smart93996272008-08-24 21:50:30 -040011546 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040011547 return 0;
11548 }
11549
11550 switch (phba->sli_rev) {
11551 case LPFC_SLI_REV2:
11552 case LPFC_SLI_REV3:
11553 /* Read chip Host Attention (HA) register */
11554 ha_copy = lpfc_sli_eratt_read(phba);
11555 break;
James Smartda0436e2009-05-22 14:51:39 -040011556 case LPFC_SLI_REV4:
James Smart2fcee4b2010-12-15 17:57:46 -050011557 /* Read device Uncoverable Error (UERR) registers */
James Smartda0436e2009-05-22 14:51:39 -040011558 ha_copy = lpfc_sli4_eratt_read(phba);
11559 break;
James Smart3772a992009-05-22 14:50:54 -040011560 default:
11561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11562 "0299 Invalid SLI revision (%d)\n",
11563 phba->sli_rev);
11564 ha_copy = 0;
11565 break;
James Smart93996272008-08-24 21:50:30 -040011566 }
11567 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -040011568
11569 return ha_copy;
11570}
11571
11572/**
11573 * lpfc_intr_state_check - Check device state for interrupt handling
11574 * @phba: Pointer to HBA context.
11575 *
11576 * This inline routine checks whether a device or its PCI slot is in a state
11577 * that the interrupt should be handled.
11578 *
11579 * This function returns 0 if the device or the PCI slot is in a state that
11580 * interrupt should be handled, otherwise -EIO.
11581 */
11582static inline int
11583lpfc_intr_state_check(struct lpfc_hba *phba)
11584{
11585 /* If the pci channel is offline, ignore all the interrupts */
11586 if (unlikely(pci_channel_offline(phba->pcidev)))
11587 return -EIO;
11588
11589 /* Update device level interrupt statistics */
11590 phba->sli.slistat.sli_intr++;
11591
11592 /* Ignore all interrupts during initialization. */
11593 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11594 return -EIO;
11595
James Smart93996272008-08-24 21:50:30 -040011596 return 0;
11597}
11598
11599/**
James Smart3772a992009-05-22 14:50:54 -040011600 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
James Smarte59058c2008-08-24 21:49:00 -040011601 * @irq: Interrupt number.
11602 * @dev_id: The device context pointer.
11603 *
James Smart93996272008-08-24 21:50:30 -040011604 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040011605 * service routine when device with SLI-3 interface spec is enabled with
11606 * MSI-X multi-message interrupt mode and there are slow-path events in
11607 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11608 * interrupt mode, this function is called as part of the device-level
11609 * interrupt handler. When the PCI slot is in error recovery or the HBA
11610 * is undergoing initialization, the interrupt handler will not process
11611 * the interrupt. The link attention and ELS ring attention events are
11612 * handled by the worker thread. The interrupt handler signals the worker
11613 * thread and returns for these events. This function is called without
11614 * any lock held. It gets the hbalock to access and update SLI data
James Smart93996272008-08-24 21:50:30 -040011615 * structures.
11616 *
11617 * This function returns IRQ_HANDLED when interrupt is handled else it
11618 * returns IRQ_NONE.
James Smarte59058c2008-08-24 21:49:00 -040011619 **/
dea31012005-04-17 16:05:31 -050011620irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040011621lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea31012005-04-17 16:05:31 -050011622{
James Smart2e0fef82007-06-17 19:56:36 -050011623 struct lpfc_hba *phba;
James Smarta747c9c2009-11-18 15:41:10 -050011624 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -050011625 uint32_t work_ha_copy;
11626 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050011627 unsigned long iflag;
dea31012005-04-17 16:05:31 -050011628 uint32_t control;
11629
James Smart92d7f7b2007-06-17 19:56:38 -050011630 MAILBOX_t *mbox, *pmbox;
James Smart858c9f62007-06-17 19:56:39 -050011631 struct lpfc_vport *vport;
11632 struct lpfc_nodelist *ndlp;
11633 struct lpfc_dmabuf *mp;
James Smart92d7f7b2007-06-17 19:56:38 -050011634 LPFC_MBOXQ_t *pmb;
11635 int rc;
11636
dea31012005-04-17 16:05:31 -050011637 /*
11638 * Get the driver's phba structure from the dev_id and
11639 * assume the HBA is not interrupting.
11640 */
James Smart93996272008-08-24 21:50:30 -040011641 phba = (struct lpfc_hba *)dev_id;
dea31012005-04-17 16:05:31 -050011642
11643 if (unlikely(!phba))
11644 return IRQ_NONE;
11645
dea31012005-04-17 16:05:31 -050011646 /*
James Smart93996272008-08-24 21:50:30 -040011647 * Stuff needs to be attented to when this function is invoked as an
11648 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050011649 */
James Smart93996272008-08-24 21:50:30 -040011650 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040011651 /* Check device state for handling interrupt */
11652 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040011653 return IRQ_NONE;
11654 /* Need to read HA REG for slow-path events */
James Smart5b75da22008-12-04 22:39:35 -050011655 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050011656 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11657 goto unplug_error;
James Smart93996272008-08-24 21:50:30 -040011658 /* If somebody is waiting to handle an eratt don't process it
11659 * here. The brdkill function will do this.
11660 */
11661 if (phba->link_flag & LS_IGNORE_ERATT)
11662 ha_copy &= ~HA_ERATT;
11663 /* Check the need for handling ERATT in interrupt handler */
11664 if (ha_copy & HA_ERATT) {
11665 if (phba->hba_flag & HBA_ERATT_HANDLED)
11666 /* ERATT polling has handled ERATT */
11667 ha_copy &= ~HA_ERATT;
11668 else
11669 /* Indicate interrupt handler handles ERATT */
11670 phba->hba_flag |= HBA_ERATT_HANDLED;
11671 }
James Smarta257bf92009-04-06 18:48:10 -040011672
11673 /*
11674 * If there is deferred error attention, do not check for any
11675 * interrupt.
11676 */
11677 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040011678 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011679 return IRQ_NONE;
11680 }
11681
James Smart93996272008-08-24 21:50:30 -040011682 /* Clear up only attention source related to slow-path */
James Smart9940b972011-03-11 16:06:12 -050011683 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11684 goto unplug_error;
11685
James Smarta747c9c2009-11-18 15:41:10 -050011686 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11687 HC_LAINT_ENA | HC_ERINT_ENA),
11688 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040011689 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11690 phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050011691 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040011692 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050011693 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040011694 } else
11695 ha_copy = phba->ha_copy;
dea31012005-04-17 16:05:31 -050011696
dea31012005-04-17 16:05:31 -050011697 work_ha_copy = ha_copy & phba->work_ha_mask;
11698
James Smart93996272008-08-24 21:50:30 -040011699 if (work_ha_copy) {
dea31012005-04-17 16:05:31 -050011700 if (work_ha_copy & HA_LATT) {
11701 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11702 /*
11703 * Turn off Link Attention interrupts
11704 * until CLEAR_LA done
11705 */
James Smart5b75da22008-12-04 22:39:35 -050011706 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011707 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
James Smart9940b972011-03-11 16:06:12 -050011708 if (lpfc_readl(phba->HCregaddr, &control))
11709 goto unplug_error;
dea31012005-04-17 16:05:31 -050011710 control &= ~HC_LAINT_ENA;
11711 writel(control, phba->HCregaddr);
11712 readl(phba->HCregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050011713 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011714 }
11715 else
11716 work_ha_copy &= ~HA_LATT;
11717 }
11718
James Smart93996272008-08-24 21:50:30 -040011719 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
James Smart858c9f62007-06-17 19:56:39 -050011720 /*
11721 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11722 * the only slow ring.
11723 */
11724 status = (work_ha_copy &
11725 (HA_RXMASK << (4*LPFC_ELS_RING)));
11726 status >>= (4*LPFC_ELS_RING);
11727 if (status & HA_RXMASK) {
James Smart5b75da22008-12-04 22:39:35 -050011728 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050011729 if (lpfc_readl(phba->HCregaddr, &control))
11730 goto unplug_error;
James Smarta58cbd52007-08-02 11:09:43 -040011731
11732 lpfc_debugfs_slow_ring_trc(phba,
11733 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11734 control, status,
11735 (uint32_t)phba->sli.slistat.sli_intr);
11736
James Smart858c9f62007-06-17 19:56:39 -050011737 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
James Smarta58cbd52007-08-02 11:09:43 -040011738 lpfc_debugfs_slow_ring_trc(phba,
11739 "ISR Disable ring:"
11740 "pwork:x%x hawork:x%x wait:x%x",
11741 phba->work_ha, work_ha_copy,
11742 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040011743 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040011744
James Smart858c9f62007-06-17 19:56:39 -050011745 control &=
11746 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea31012005-04-17 16:05:31 -050011747 writel(control, phba->HCregaddr);
11748 readl(phba->HCregaddr); /* flush */
dea31012005-04-17 16:05:31 -050011749 }
James Smarta58cbd52007-08-02 11:09:43 -040011750 else {
11751 lpfc_debugfs_slow_ring_trc(phba,
11752 "ISR slow ring: pwork:"
11753 "x%x hawork:x%x wait:x%x",
11754 phba->work_ha, work_ha_copy,
11755 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040011756 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040011757 }
James Smart5b75da22008-12-04 22:39:35 -050011758 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011759 }
11760 }
James Smart5b75da22008-12-04 22:39:35 -050011761 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011762 if (work_ha_copy & HA_ERATT) {
James Smart9940b972011-03-11 16:06:12 -050011763 if (lpfc_sli_read_hs(phba))
11764 goto unplug_error;
James Smarta257bf92009-04-06 18:48:10 -040011765 /*
11766 * Check if there is a deferred error condition
11767 * is active
11768 */
11769 if ((HS_FFER1 & phba->work_hs) &&
11770 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040011771 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11772 phba->work_hs)) {
James Smarta257bf92009-04-06 18:48:10 -040011773 phba->hba_flag |= DEFER_ERATT;
11774 /* Clear all interrupt enable conditions */
11775 writel(0, phba->HCregaddr);
11776 readl(phba->HCregaddr);
11777 }
11778 }
11779
James Smart93996272008-08-24 21:50:30 -040011780 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
James Smart92d7f7b2007-06-17 19:56:38 -050011781 pmb = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -040011782 pmbox = &pmb->u.mb;
James Smart34b02dc2008-08-24 21:49:55 -040011783 mbox = phba->mbox;
James Smart858c9f62007-06-17 19:56:39 -050011784 vport = pmb->vport;
James Smart92d7f7b2007-06-17 19:56:38 -050011785
11786 /* First check out the status word */
11787 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11788 if (pmbox->mbxOwner != OWN_HOST) {
James Smart5b75da22008-12-04 22:39:35 -050011789 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -050011790 /*
11791 * Stray Mailbox Interrupt, mbxCommand <cmd>
11792 * mbxStatus <status>
11793 */
James Smart09372822008-01-11 01:52:54 -050011794 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
James Smart92d7f7b2007-06-17 19:56:38 -050011795 LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040011796 "(%d):0304 Stray Mailbox "
James Smart92d7f7b2007-06-17 19:56:38 -050011797 "Interrupt mbxCommand x%x "
11798 "mbxStatus x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040011799 (vport ? vport->vpi : 0),
James Smart92d7f7b2007-06-17 19:56:38 -050011800 pmbox->mbxCommand,
11801 pmbox->mbxStatus);
James Smart09372822008-01-11 01:52:54 -050011802 /* clear mailbox attention bit */
11803 work_ha_copy &= ~HA_MBATT;
11804 } else {
James Smart97eab632008-04-07 10:16:05 -040011805 phba->sli.mbox_active = NULL;
James Smart5b75da22008-12-04 22:39:35 -050011806 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart09372822008-01-11 01:52:54 -050011807 phba->last_completion_time = jiffies;
11808 del_timer(&phba->sli.mbox_tmo);
James Smart09372822008-01-11 01:52:54 -050011809 if (pmb->mbox_cmpl) {
11810 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11811 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -040011812 if (pmb->out_ext_byte_len &&
11813 pmb->context2)
11814 lpfc_sli_pcimem_bcopy(
11815 phba->mbox_ext,
11816 pmb->context2,
11817 pmb->out_ext_byte_len);
James Smart858c9f62007-06-17 19:56:39 -050011818 }
James Smart09372822008-01-11 01:52:54 -050011819 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11820 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11821
11822 lpfc_debugfs_disc_trc(vport,
11823 LPFC_DISC_TRC_MBOX_VPORT,
11824 "MBOX dflt rpi: : "
11825 "status:x%x rpi:x%x",
11826 (uint32_t)pmbox->mbxStatus,
11827 pmbox->un.varWords[0], 0);
11828
11829 if (!pmbox->mbxStatus) {
11830 mp = (struct lpfc_dmabuf *)
11831 (pmb->context1);
11832 ndlp = (struct lpfc_nodelist *)
11833 pmb->context2;
11834
11835 /* Reg_LOGIN of dflt RPI was
11836 * successful. new lets get
11837 * rid of the RPI using the
11838 * same mbox buffer.
11839 */
11840 lpfc_unreg_login(phba,
11841 vport->vpi,
11842 pmbox->un.varWords[0],
11843 pmb);
11844 pmb->mbox_cmpl =
11845 lpfc_mbx_cmpl_dflt_rpi;
11846 pmb->context1 = mp;
11847 pmb->context2 = ndlp;
11848 pmb->vport = vport;
James Smart58da1ff2008-04-07 10:15:56 -040011849 rc = lpfc_sli_issue_mbox(phba,
11850 pmb,
11851 MBX_NOWAIT);
11852 if (rc != MBX_BUSY)
11853 lpfc_printf_log(phba,
11854 KERN_ERR,
11855 LOG_MBOX | LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040011856 "0350 rc should have"
James Smart6a9c52c2009-10-02 15:16:51 -040011857 "been MBX_BUSY\n");
James Smart3772a992009-05-22 14:50:54 -040011858 if (rc != MBX_NOT_FINISHED)
11859 goto send_current_mbox;
James Smart09372822008-01-11 01:52:54 -050011860 }
11861 }
James Smart5b75da22008-12-04 22:39:35 -050011862 spin_lock_irqsave(
11863 &phba->pport->work_port_lock,
11864 iflag);
James Smart09372822008-01-11 01:52:54 -050011865 phba->pport->work_port_events &=
11866 ~WORKER_MBOX_TMO;
James Smart5b75da22008-12-04 22:39:35 -050011867 spin_unlock_irqrestore(
11868 &phba->pport->work_port_lock,
11869 iflag);
James Smart09372822008-01-11 01:52:54 -050011870 lpfc_mbox_cmpl_put(phba, pmb);
James Smart858c9f62007-06-17 19:56:39 -050011871 }
James Smart97eab632008-04-07 10:16:05 -040011872 } else
James Smart5b75da22008-12-04 22:39:35 -050011873 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040011874
James Smart92d7f7b2007-06-17 19:56:38 -050011875 if ((work_ha_copy & HA_MBATT) &&
11876 (phba->sli.mbox_active == NULL)) {
James Smart858c9f62007-06-17 19:56:39 -050011877send_current_mbox:
James Smart92d7f7b2007-06-17 19:56:38 -050011878 /* Process next mailbox command if there is one */
James Smart58da1ff2008-04-07 10:15:56 -040011879 do {
11880 rc = lpfc_sli_issue_mbox(phba, NULL,
11881 MBX_NOWAIT);
11882 } while (rc == MBX_NOT_FINISHED);
11883 if (rc != MBX_SUCCESS)
11884 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11885 LOG_SLI, "0349 rc should be "
James Smart6a9c52c2009-10-02 15:16:51 -040011886 "MBX_SUCCESS\n");
James Smart92d7f7b2007-06-17 19:56:38 -050011887 }
11888
James Smart5b75da22008-12-04 22:39:35 -050011889 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050011890 phba->work_ha |= work_ha_copy;
James Smart5b75da22008-12-04 22:39:35 -050011891 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -040011892 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -050011893 }
James Smart93996272008-08-24 21:50:30 -040011894 return IRQ_HANDLED;
James Smart9940b972011-03-11 16:06:12 -050011895unplug_error:
11896 spin_unlock_irqrestore(&phba->hbalock, iflag);
11897 return IRQ_HANDLED;
dea31012005-04-17 16:05:31 -050011898
James Smart3772a992009-05-22 14:50:54 -040011899} /* lpfc_sli_sp_intr_handler */
James Smart93996272008-08-24 21:50:30 -040011900
11901/**
James Smart3772a992009-05-22 14:50:54 -040011902 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
James Smart93996272008-08-24 21:50:30 -040011903 * @irq: Interrupt number.
11904 * @dev_id: The device context pointer.
11905 *
11906 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040011907 * service routine when device with SLI-3 interface spec is enabled with
11908 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11909 * ring event in the HBA. However, when the device is enabled with either
11910 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11911 * device-level interrupt handler. When the PCI slot is in error recovery
11912 * or the HBA is undergoing initialization, the interrupt handler will not
11913 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11914 * the intrrupt context. This function is called without any lock held.
11915 * It gets the hbalock to access and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040011916 *
11917 * This function returns IRQ_HANDLED when interrupt is handled else it
11918 * returns IRQ_NONE.
11919 **/
11920irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040011921lpfc_sli_fp_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040011922{
11923 struct lpfc_hba *phba;
11924 uint32_t ha_copy;
11925 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050011926 unsigned long iflag;
James Smart895427b2017-02-12 13:52:30 -080011927 struct lpfc_sli_ring *pring;
James Smart93996272008-08-24 21:50:30 -040011928
11929 /* Get the driver's phba structure from the dev_id and
11930 * assume the HBA is not interrupting.
11931 */
11932 phba = (struct lpfc_hba *) dev_id;
11933
11934 if (unlikely(!phba))
11935 return IRQ_NONE;
dea31012005-04-17 16:05:31 -050011936
11937 /*
James Smart93996272008-08-24 21:50:30 -040011938 * Stuff needs to be attented to when this function is invoked as an
11939 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050011940 */
James Smart93996272008-08-24 21:50:30 -040011941 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040011942 /* Check device state for handling interrupt */
11943 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040011944 return IRQ_NONE;
11945 /* Need to read HA REG for FCP ring and other ring events */
James Smart9940b972011-03-11 16:06:12 -050011946 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11947 return IRQ_HANDLED;
James Smart93996272008-08-24 21:50:30 -040011948 /* Clear up only attention source related to fast-path */
James Smart5b75da22008-12-04 22:39:35 -050011949 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011950 /*
11951 * If there is deferred error attention, do not check for
11952 * any interrupt.
11953 */
11954 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040011955 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040011956 return IRQ_NONE;
11957 }
James Smart93996272008-08-24 21:50:30 -040011958 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11959 phba->HAregaddr);
11960 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050011961 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040011962 } else
11963 ha_copy = phba->ha_copy;
11964
11965 /*
11966 * Process all events on FCP ring. Take the optimized path for FCP IO.
11967 */
11968 ha_copy &= ~(phba->work_ha_mask);
11969
11970 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea31012005-04-17 16:05:31 -050011971 status >>= (4*LPFC_FCP_RING);
James Smart895427b2017-02-12 13:52:30 -080011972 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
James Smart858c9f62007-06-17 19:56:39 -050011973 if (status & HA_RXMASK)
James Smart895427b2017-02-12 13:52:30 -080011974 lpfc_sli_handle_fast_ring_event(phba, pring, status);
James Smarta4bc3372006-12-02 13:34:16 -050011975
11976 if (phba->cfg_multi_ring_support == 2) {
11977 /*
James Smart93996272008-08-24 21:50:30 -040011978 * Process all events on extra ring. Take the optimized path
11979 * for extra ring IO.
James Smarta4bc3372006-12-02 13:34:16 -050011980 */
James Smart93996272008-08-24 21:50:30 -040011981 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
James Smarta4bc3372006-12-02 13:34:16 -050011982 status >>= (4*LPFC_EXTRA_RING);
James Smart858c9f62007-06-17 19:56:39 -050011983 if (status & HA_RXMASK) {
James Smarta4bc3372006-12-02 13:34:16 -050011984 lpfc_sli_handle_fast_ring_event(phba,
James Smart895427b2017-02-12 13:52:30 -080011985 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
James Smarta4bc3372006-12-02 13:34:16 -050011986 status);
11987 }
11988 }
dea31012005-04-17 16:05:31 -050011989 return IRQ_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040011990} /* lpfc_sli_fp_intr_handler */
dea31012005-04-17 16:05:31 -050011991
James Smart93996272008-08-24 21:50:30 -040011992/**
James Smart3772a992009-05-22 14:50:54 -040011993 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
James Smart93996272008-08-24 21:50:30 -040011994 * @irq: Interrupt number.
11995 * @dev_id: The device context pointer.
11996 *
James Smart3772a992009-05-22 14:50:54 -040011997 * This function is the HBA device-level interrupt handler to device with
11998 * SLI-3 interface spec, called from the PCI layer when either MSI or
11999 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12000 * requires driver attention. This function invokes the slow-path interrupt
12001 * attention handling function and fast-path interrupt attention handling
12002 * function in turn to process the relevant HBA attention events. This
12003 * function is called without any lock held. It gets the hbalock to access
12004 * and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040012005 *
12006 * This function returns IRQ_HANDLED when interrupt is handled, else it
12007 * returns IRQ_NONE.
12008 **/
12009irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040012010lpfc_sli_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040012011{
12012 struct lpfc_hba *phba;
12013 irqreturn_t sp_irq_rc, fp_irq_rc;
12014 unsigned long status1, status2;
James Smarta747c9c2009-11-18 15:41:10 -050012015 uint32_t hc_copy;
James Smart93996272008-08-24 21:50:30 -040012016
12017 /*
12018 * Get the driver's phba structure from the dev_id and
12019 * assume the HBA is not interrupting.
12020 */
12021 phba = (struct lpfc_hba *) dev_id;
12022
12023 if (unlikely(!phba))
12024 return IRQ_NONE;
12025
James Smart3772a992009-05-22 14:50:54 -040012026 /* Check device state for handling interrupt */
12027 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040012028 return IRQ_NONE;
12029
12030 spin_lock(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -050012031 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12032 spin_unlock(&phba->hbalock);
12033 return IRQ_HANDLED;
12034 }
12035
James Smart93996272008-08-24 21:50:30 -040012036 if (unlikely(!phba->ha_copy)) {
12037 spin_unlock(&phba->hbalock);
12038 return IRQ_NONE;
12039 } else if (phba->ha_copy & HA_ERATT) {
12040 if (phba->hba_flag & HBA_ERATT_HANDLED)
12041 /* ERATT polling has handled ERATT */
12042 phba->ha_copy &= ~HA_ERATT;
12043 else
12044 /* Indicate interrupt handler handles ERATT */
12045 phba->hba_flag |= HBA_ERATT_HANDLED;
12046 }
12047
James Smarta257bf92009-04-06 18:48:10 -040012048 /*
12049 * If there is deferred error attention, do not check for any interrupt.
12050 */
12051 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020012052 spin_unlock(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -040012053 return IRQ_NONE;
12054 }
12055
James Smart93996272008-08-24 21:50:30 -040012056 /* Clear attention sources except link and error attentions */
James Smart9940b972011-03-11 16:06:12 -050012057 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12058 spin_unlock(&phba->hbalock);
12059 return IRQ_HANDLED;
12060 }
James Smarta747c9c2009-11-18 15:41:10 -050012061 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12062 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12063 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012064 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050012065 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040012066 readl(phba->HAregaddr); /* flush */
12067 spin_unlock(&phba->hbalock);
12068
12069 /*
12070 * Invokes slow-path host attention interrupt handling as appropriate.
12071 */
12072
12073 /* status of events with mailbox and link attention */
12074 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12075
12076 /* status of events with ELS ring */
12077 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12078 status2 >>= (4*LPFC_ELS_RING);
12079
12080 if (status1 || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012081 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012082 else
12083 sp_irq_rc = IRQ_NONE;
12084
12085 /*
12086 * Invoke fast-path host attention interrupt handling as appropriate.
12087 */
12088
12089 /* status of events with FCP ring */
12090 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12091 status1 >>= (4*LPFC_FCP_RING);
12092
12093 /* status of events with extra ring */
12094 if (phba->cfg_multi_ring_support == 2) {
12095 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12096 status2 >>= (4*LPFC_EXTRA_RING);
12097 } else
12098 status2 = 0;
12099
12100 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040012101 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040012102 else
12103 fp_irq_rc = IRQ_NONE;
12104
12105 /* Return device-level interrupt handling status */
12106 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
James Smart3772a992009-05-22 14:50:54 -040012107} /* lpfc_sli_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040012108
12109/**
12110 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12111 * @phba: pointer to lpfc hba data structure.
12112 *
12113 * This routine is invoked by the worker thread to process all the pending
12114 * SLI4 FCP abort XRI events.
12115 **/
12116void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12117{
12118 struct lpfc_cq_event *cq_event;
12119
12120 /* First, declare the fcp xri abort event has been handled */
12121 spin_lock_irq(&phba->hbalock);
12122 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12123 spin_unlock_irq(&phba->hbalock);
12124 /* Now, handle all the fcp xri abort events */
12125 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12126 /* Get the first event from the head of the event queue */
12127 spin_lock_irq(&phba->hbalock);
12128 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12129 cq_event, struct lpfc_cq_event, list);
12130 spin_unlock_irq(&phba->hbalock);
12131 /* Notify aborted XRI for FCP work queue */
12132 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12133 /* Free the event processed back to the free pool */
12134 lpfc_sli4_cq_event_release(phba, cq_event);
12135 }
12136}
12137
12138/**
12139 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12140 * @phba: pointer to lpfc hba data structure.
12141 *
12142 * This routine is invoked by the worker thread to process all the pending
12143 * SLI4 els abort xri events.
12144 **/
12145void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12146{
12147 struct lpfc_cq_event *cq_event;
12148
12149 /* First, declare the els xri abort event has been handled */
12150 spin_lock_irq(&phba->hbalock);
12151 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12152 spin_unlock_irq(&phba->hbalock);
12153 /* Now, handle all the els xri abort events */
12154 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12155 /* Get the first event from the head of the event queue */
12156 spin_lock_irq(&phba->hbalock);
12157 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12158 cq_event, struct lpfc_cq_event, list);
12159 spin_unlock_irq(&phba->hbalock);
12160 /* Notify aborted XRI for ELS work queue */
12161 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12162 /* Free the event processed back to the free pool */
12163 lpfc_sli4_cq_event_release(phba, cq_event);
12164 }
12165}
12166
James Smart341af102010-01-26 23:07:37 -050012167/**
12168 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12169 * @phba: pointer to lpfc hba data structure
12170 * @pIocbIn: pointer to the rspiocbq
12171 * @pIocbOut: pointer to the cmdiocbq
12172 * @wcqe: pointer to the complete wcqe
12173 *
12174 * This routine transfers the fields of a command iocbq to a response iocbq
12175 * by copying all the IOCB fields from command iocbq and transferring the
12176 * completion status information from the complete wcqe.
12177 **/
James Smart4f774512009-05-22 14:52:35 -040012178static void
James Smart341af102010-01-26 23:07:37 -050012179lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12180 struct lpfc_iocbq *pIocbIn,
James Smart4f774512009-05-22 14:52:35 -040012181 struct lpfc_iocbq *pIocbOut,
12182 struct lpfc_wcqe_complete *wcqe)
12183{
James Smartaf227412013-10-10 12:23:10 -040012184 int numBdes, i;
James Smart341af102010-01-26 23:07:37 -050012185 unsigned long iflags;
James Smartaf227412013-10-10 12:23:10 -040012186 uint32_t status, max_response;
12187 struct lpfc_dmabuf *dmabuf;
12188 struct ulp_bde64 *bpl, bde;
James Smart4f774512009-05-22 14:52:35 -040012189 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12190
12191 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12192 sizeof(struct lpfc_iocbq) - offset);
James Smart4f774512009-05-22 14:52:35 -040012193 /* Map WCQE parameters into irspiocb parameters */
James Smartacd68592012-01-18 16:25:09 -050012194 status = bf_get(lpfc_wcqe_c_status, wcqe);
12195 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
James Smart4f774512009-05-22 14:52:35 -040012196 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12197 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12198 pIocbIn->iocb.un.fcpi.fcpi_parm =
12199 pIocbOut->iocb.un.fcpi.fcpi_parm -
12200 wcqe->total_data_placed;
12201 else
12202 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050012203 else {
James Smart4f774512009-05-22 14:52:35 -040012204 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smartaf227412013-10-10 12:23:10 -040012205 switch (pIocbOut->iocb.ulpCommand) {
12206 case CMD_ELS_REQUEST64_CR:
12207 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12208 bpl = (struct ulp_bde64 *)dmabuf->virt;
12209 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12210 max_response = bde.tus.f.bdeSize;
12211 break;
12212 case CMD_GEN_REQUEST64_CR:
12213 max_response = 0;
12214 if (!pIocbOut->context3)
12215 break;
12216 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12217 sizeof(struct ulp_bde64);
12218 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12219 bpl = (struct ulp_bde64 *)dmabuf->virt;
12220 for (i = 0; i < numBdes; i++) {
12221 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12222 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12223 max_response += bde.tus.f.bdeSize;
12224 }
12225 break;
12226 default:
12227 max_response = wcqe->total_data_placed;
12228 break;
12229 }
12230 if (max_response < wcqe->total_data_placed)
12231 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12232 else
12233 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12234 wcqe->total_data_placed;
James Smart695a8142010-01-26 23:08:03 -050012235 }
James Smart341af102010-01-26 23:07:37 -050012236
James Smartacd68592012-01-18 16:25:09 -050012237 /* Convert BG errors for completion status */
12238 if (status == CQE_STATUS_DI_ERROR) {
12239 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12240
12241 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12242 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12243 else
12244 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12245
12246 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12247 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12248 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12249 BGS_GUARD_ERR_MASK;
12250 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12251 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12252 BGS_APPTAG_ERR_MASK;
12253 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12254 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12255 BGS_REFTAG_ERR_MASK;
12256
12257 /* Check to see if there was any good data before the error */
12258 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12259 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12260 BGS_HI_WATER_MARK_PRESENT_MASK;
12261 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12262 wcqe->total_data_placed;
12263 }
12264
12265 /*
12266 * Set ALL the error bits to indicate we don't know what
12267 * type of error it is.
12268 */
12269 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12270 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12271 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12272 BGS_GUARD_ERR_MASK);
12273 }
12274
James Smart341af102010-01-26 23:07:37 -050012275 /* Pick up HBA exchange busy condition */
12276 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12277 spin_lock_irqsave(&phba->hbalock, iflags);
12278 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12279 spin_unlock_irqrestore(&phba->hbalock, iflags);
12280 }
James Smart4f774512009-05-22 14:52:35 -040012281}
12282
12283/**
James Smart45ed1192009-10-02 15:17:02 -040012284 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12285 * @phba: Pointer to HBA context object.
12286 * @wcqe: Pointer to work-queue completion queue entry.
12287 *
12288 * This routine handles an ELS work-queue completion event and construct
12289 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12290 * discovery engine to handle.
12291 *
12292 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12293 **/
12294static struct lpfc_iocbq *
12295lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12296 struct lpfc_iocbq *irspiocbq)
12297{
James Smart895427b2017-02-12 13:52:30 -080012298 struct lpfc_sli_ring *pring;
James Smart45ed1192009-10-02 15:17:02 -040012299 struct lpfc_iocbq *cmdiocbq;
12300 struct lpfc_wcqe_complete *wcqe;
12301 unsigned long iflags;
12302
James Smart895427b2017-02-12 13:52:30 -080012303 pring = lpfc_phba_elsring(phba);
12304
James Smart45ed1192009-10-02 15:17:02 -040012305 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
James Smart7e56aa22012-08-03 12:35:34 -040012306 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart45ed1192009-10-02 15:17:02 -040012307 pring->stats.iocb_event++;
12308 /* Look up the ELS command IOCB and create pseudo response IOCB */
12309 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12310 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart89533e92016-10-13 15:06:15 -070012311 /* Put the iocb back on the txcmplq */
12312 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
James Smart7e56aa22012-08-03 12:35:34 -040012313 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart45ed1192009-10-02 15:17:02 -040012314
12315 if (unlikely(!cmdiocbq)) {
12316 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12317 "0386 ELS complete with no corresponding "
12318 "cmdiocb: iotag (%d)\n",
12319 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12320 lpfc_sli_release_iocbq(phba, irspiocbq);
12321 return NULL;
12322 }
12323
12324 /* Fake the irspiocbq and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050012325 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
James Smart45ed1192009-10-02 15:17:02 -040012326
12327 return irspiocbq;
12328}
12329
12330/**
James Smart04c68492009-05-22 14:52:52 -040012331 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12332 * @phba: Pointer to HBA context object.
12333 * @cqe: Pointer to mailbox completion queue entry.
12334 *
12335 * This routine process a mailbox completion queue entry with asynchrous
12336 * event.
12337 *
12338 * Return: true if work posted to worker thread, otherwise false.
12339 **/
12340static bool
12341lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12342{
12343 struct lpfc_cq_event *cq_event;
12344 unsigned long iflags;
12345
12346 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12347 "0392 Async Event: word0:x%x, word1:x%x, "
12348 "word2:x%x, word3:x%x\n", mcqe->word0,
12349 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12350
12351 /* Allocate a new internal CQ_EVENT entry */
12352 cq_event = lpfc_sli4_cq_event_alloc(phba);
12353 if (!cq_event) {
12354 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12355 "0394 Failed to allocate CQ_EVENT entry\n");
12356 return false;
12357 }
12358
12359 /* Move the CQE into an asynchronous event entry */
12360 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12361 spin_lock_irqsave(&phba->hbalock, iflags);
12362 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12363 /* Set the async event flag */
12364 phba->hba_flag |= ASYNC_EVENT;
12365 spin_unlock_irqrestore(&phba->hbalock, iflags);
12366
12367 return true;
12368}
12369
12370/**
12371 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12372 * @phba: Pointer to HBA context object.
12373 * @cqe: Pointer to mailbox completion queue entry.
12374 *
12375 * This routine process a mailbox completion queue entry with mailbox
12376 * completion event.
12377 *
12378 * Return: true if work posted to worker thread, otherwise false.
12379 **/
12380static bool
12381lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12382{
12383 uint32_t mcqe_status;
12384 MAILBOX_t *mbox, *pmbox;
12385 struct lpfc_mqe *mqe;
12386 struct lpfc_vport *vport;
12387 struct lpfc_nodelist *ndlp;
12388 struct lpfc_dmabuf *mp;
12389 unsigned long iflags;
12390 LPFC_MBOXQ_t *pmb;
12391 bool workposted = false;
12392 int rc;
12393
12394 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12395 if (!bf_get(lpfc_trailer_completed, mcqe))
12396 goto out_no_mqe_complete;
12397
12398 /* Get the reference to the active mbox command */
12399 spin_lock_irqsave(&phba->hbalock, iflags);
12400 pmb = phba->sli.mbox_active;
12401 if (unlikely(!pmb)) {
12402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12403 "1832 No pending MBOX command to handle\n");
12404 spin_unlock_irqrestore(&phba->hbalock, iflags);
12405 goto out_no_mqe_complete;
12406 }
12407 spin_unlock_irqrestore(&phba->hbalock, iflags);
12408 mqe = &pmb->u.mqe;
12409 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12410 mbox = phba->mbox;
12411 vport = pmb->vport;
12412
12413 /* Reset heartbeat timer */
12414 phba->last_completion_time = jiffies;
12415 del_timer(&phba->sli.mbox_tmo);
12416
12417 /* Move mbox data to caller's mailbox region, do endian swapping */
12418 if (pmb->mbox_cmpl && mbox)
12419 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
James Smart04c68492009-05-22 14:52:52 -040012420
James Smart73d91e52011-10-10 21:32:10 -040012421 /*
12422 * For mcqe errors, conditionally move a modified error code to
12423 * the mbox so that the error will not be missed.
12424 */
12425 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12426 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12427 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12428 bf_set(lpfc_mqe_status, mqe,
12429 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12430 }
James Smart04c68492009-05-22 14:52:52 -040012431 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12432 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12434 "MBOX dflt rpi: status:x%x rpi:x%x",
12435 mcqe_status,
12436 pmbox->un.varWords[0], 0);
12437 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12438 mp = (struct lpfc_dmabuf *)(pmb->context1);
12439 ndlp = (struct lpfc_nodelist *)pmb->context2;
12440 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12441 * RID of the PPI using the same mbox buffer.
12442 */
12443 lpfc_unreg_login(phba, vport->vpi,
12444 pmbox->un.varWords[0], pmb);
12445 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12446 pmb->context1 = mp;
12447 pmb->context2 = ndlp;
12448 pmb->vport = vport;
12449 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12450 if (rc != MBX_BUSY)
12451 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12452 LOG_SLI, "0385 rc should "
12453 "have been MBX_BUSY\n");
12454 if (rc != MBX_NOT_FINISHED)
12455 goto send_current_mbox;
12456 }
12457 }
12458 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12459 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12460 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12461
12462 /* There is mailbox completion work to do */
12463 spin_lock_irqsave(&phba->hbalock, iflags);
12464 __lpfc_mbox_cmpl_put(phba, pmb);
12465 phba->work_ha |= HA_MBATT;
12466 spin_unlock_irqrestore(&phba->hbalock, iflags);
12467 workposted = true;
12468
12469send_current_mbox:
12470 spin_lock_irqsave(&phba->hbalock, iflags);
12471 /* Release the mailbox command posting token */
12472 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12473 /* Setting active mailbox pointer need to be in sync to flag clear */
12474 phba->sli.mbox_active = NULL;
12475 spin_unlock_irqrestore(&phba->hbalock, iflags);
12476 /* Wake up worker thread to post the next pending mailbox command */
12477 lpfc_worker_wake_up(phba);
12478out_no_mqe_complete:
12479 if (bf_get(lpfc_trailer_consumed, mcqe))
12480 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12481 return workposted;
12482}
12483
12484/**
12485 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12486 * @phba: Pointer to HBA context object.
12487 * @cqe: Pointer to mailbox completion queue entry.
12488 *
12489 * This routine process a mailbox completion queue entry, it invokes the
12490 * proper mailbox complete handling or asynchrous event handling routine
12491 * according to the MCQE's async bit.
12492 *
12493 * Return: true if work posted to worker thread, otherwise false.
12494 **/
12495static bool
12496lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12497{
12498 struct lpfc_mcqe mcqe;
12499 bool workposted;
12500
12501 /* Copy the mailbox MCQE and convert endian order as needed */
12502 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12503
12504 /* Invoke the proper event handling routine */
12505 if (!bf_get(lpfc_trailer_async, &mcqe))
12506 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12507 else
12508 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12509 return workposted;
12510}
12511
12512/**
James Smart4f774512009-05-22 14:52:35 -040012513 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12514 * @phba: Pointer to HBA context object.
James Smart2a76a282012-08-03 12:35:54 -040012515 * @cq: Pointer to associated CQ
James Smart4f774512009-05-22 14:52:35 -040012516 * @wcqe: Pointer to work-queue completion queue entry.
12517 *
12518 * This routine handles an ELS work-queue completion event.
12519 *
12520 * Return: true if work posted to worker thread, otherwise false.
12521 **/
12522static bool
James Smart2a76a282012-08-03 12:35:54 -040012523lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040012524 struct lpfc_wcqe_complete *wcqe)
12525{
James Smart4f774512009-05-22 14:52:35 -040012526 struct lpfc_iocbq *irspiocbq;
12527 unsigned long iflags;
James Smart2a76a282012-08-03 12:35:54 -040012528 struct lpfc_sli_ring *pring = cq->pring;
James Smart0e9bb8d2013-03-01 16:35:12 -050012529 int txq_cnt = 0;
12530 int txcmplq_cnt = 0;
12531 int fcp_txcmplq_cnt = 0;
James Smart4f774512009-05-22 14:52:35 -040012532
James Smart45ed1192009-10-02 15:17:02 -040012533 /* Get an irspiocbq for later ELS response processing use */
James Smart4f774512009-05-22 14:52:35 -040012534 irspiocbq = lpfc_sli_get_iocbq(phba);
12535 if (!irspiocbq) {
James Smart0e9bb8d2013-03-01 16:35:12 -050012536 if (!list_empty(&pring->txq))
12537 txq_cnt++;
12538 if (!list_empty(&pring->txcmplq))
12539 txcmplq_cnt++;
James Smart4f774512009-05-22 14:52:35 -040012540 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2a9bf3d2010-06-07 15:24:45 -040012541 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12542 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
James Smart0e9bb8d2013-03-01 16:35:12 -050012543 txq_cnt, phba->iocb_cnt,
12544 fcp_txcmplq_cnt,
12545 txcmplq_cnt);
James Smart45ed1192009-10-02 15:17:02 -040012546 return false;
James Smart4f774512009-05-22 14:52:35 -040012547 }
James Smart4f774512009-05-22 14:52:35 -040012548
James Smart45ed1192009-10-02 15:17:02 -040012549 /* Save off the slow-path queue event for work thread to process */
12550 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
James Smart4f774512009-05-22 14:52:35 -040012551 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4d9ab992009-10-02 15:16:39 -040012552 list_add_tail(&irspiocbq->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040012553 &phba->sli4_hba.sp_queue_event);
12554 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4f774512009-05-22 14:52:35 -040012555 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040012556
James Smart45ed1192009-10-02 15:17:02 -040012557 return true;
James Smart4f774512009-05-22 14:52:35 -040012558}
12559
12560/**
12561 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12562 * @phba: Pointer to HBA context object.
12563 * @wcqe: Pointer to work-queue completion queue entry.
12564 *
12565 * This routine handles slow-path WQ entry comsumed event by invoking the
12566 * proper WQ release routine to the slow-path WQ.
12567 **/
12568static void
12569lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12570 struct lpfc_wcqe_release *wcqe)
12571{
James Smart2e90f4b2011-12-13 13:22:37 -050012572 /* sanity check on queue memory */
12573 if (unlikely(!phba->sli4_hba.els_wq))
12574 return;
James Smart4f774512009-05-22 14:52:35 -040012575 /* Check for the slow-path ELS work queue */
12576 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12577 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12578 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12579 else
12580 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12581 "2579 Slow-path wqe consume event carries "
12582 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12583 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12584 phba->sli4_hba.els_wq->queue_id);
12585}
12586
12587/**
12588 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12589 * @phba: Pointer to HBA context object.
12590 * @cq: Pointer to a WQ completion queue.
12591 * @wcqe: Pointer to work-queue completion queue entry.
12592 *
12593 * This routine handles an XRI abort event.
12594 *
12595 * Return: true if work posted to worker thread, otherwise false.
12596 **/
12597static bool
12598lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12599 struct lpfc_queue *cq,
12600 struct sli4_wcqe_xri_aborted *wcqe)
12601{
12602 bool workposted = false;
12603 struct lpfc_cq_event *cq_event;
12604 unsigned long iflags;
12605
12606 /* Allocate a new internal CQ_EVENT entry */
12607 cq_event = lpfc_sli4_cq_event_alloc(phba);
12608 if (!cq_event) {
12609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12610 "0602 Failed to allocate CQ_EVENT entry\n");
12611 return false;
12612 }
12613
12614 /* Move the CQE into the proper xri abort event list */
12615 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12616 switch (cq->subtype) {
12617 case LPFC_FCP:
12618 spin_lock_irqsave(&phba->hbalock, iflags);
12619 list_add_tail(&cq_event->list,
12620 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12621 /* Set the fcp xri abort event flag */
12622 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12623 spin_unlock_irqrestore(&phba->hbalock, iflags);
12624 workposted = true;
12625 break;
12626 case LPFC_ELS:
12627 spin_lock_irqsave(&phba->hbalock, iflags);
12628 list_add_tail(&cq_event->list,
12629 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12630 /* Set the els xri abort event flag */
12631 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12632 spin_unlock_irqrestore(&phba->hbalock, iflags);
12633 workposted = true;
12634 break;
12635 default:
12636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12637 "0603 Invalid work queue CQE subtype (x%x)\n",
12638 cq->subtype);
12639 workposted = false;
12640 break;
12641 }
12642 return workposted;
12643}
12644
12645/**
James Smart4d9ab992009-10-02 15:16:39 -040012646 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
James Smart4f774512009-05-22 14:52:35 -040012647 * @phba: Pointer to HBA context object.
James Smart4d9ab992009-10-02 15:16:39 -040012648 * @rcqe: Pointer to receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012649 *
James Smart4d9ab992009-10-02 15:16:39 -040012650 * This routine process a receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012651 *
12652 * Return: true if work posted to worker thread, otherwise false.
12653 **/
12654static bool
James Smart4d9ab992009-10-02 15:16:39 -040012655lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12656{
12657 bool workposted = false;
James Smart895427b2017-02-12 13:52:30 -080012658 struct fc_frame_header *fc_hdr;
James Smart4d9ab992009-10-02 15:16:39 -040012659 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12660 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12661 struct hbq_dmabuf *dma_buf;
James Smart7851fe22011-07-22 18:36:52 -040012662 uint32_t status, rq_id;
James Smart4d9ab992009-10-02 15:16:39 -040012663 unsigned long iflags;
12664
James Smart2e90f4b2011-12-13 13:22:37 -050012665 /* sanity check on queue memory */
12666 if (unlikely(!hrq) || unlikely(!drq))
12667 return workposted;
12668
James Smart7851fe22011-07-22 18:36:52 -040012669 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12670 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12671 else
12672 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12673 if (rq_id != hrq->queue_id)
James Smart4d9ab992009-10-02 15:16:39 -040012674 goto out;
12675
12676 status = bf_get(lpfc_rcqe_status, rcqe);
12677 switch (status) {
12678 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12679 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12680 "2537 Receive Frame Truncated!!\n");
James Smartb84daac2012-08-03 12:35:13 -040012681 hrq->RQ_buf_trunc++;
James Smart4d9ab992009-10-02 15:16:39 -040012682 case FC_STATUS_RQ_SUCCESS:
James Smart5ffc2662009-11-18 15:39:44 -050012683 lpfc_sli4_rq_release(hrq, drq);
James Smart4d9ab992009-10-02 15:16:39 -040012684 spin_lock_irqsave(&phba->hbalock, iflags);
12685 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12686 if (!dma_buf) {
James Smartb84daac2012-08-03 12:35:13 -040012687 hrq->RQ_no_buf_found++;
James Smart4d9ab992009-10-02 15:16:39 -040012688 spin_unlock_irqrestore(&phba->hbalock, iflags);
12689 goto out;
12690 }
James Smartb84daac2012-08-03 12:35:13 -040012691 hrq->RQ_rcv_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040012692 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
James Smart895427b2017-02-12 13:52:30 -080012693
12694 /* If a NVME LS event (type 0x28), treat it as Fast path */
12695 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12696
James Smart4d9ab992009-10-02 15:16:39 -040012697 /* save off the frame for the word thread to process */
12698 list_add_tail(&dma_buf->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040012699 &phba->sli4_hba.sp_queue_event);
James Smart4d9ab992009-10-02 15:16:39 -040012700 /* Frame received */
James Smart45ed1192009-10-02 15:17:02 -040012701 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4d9ab992009-10-02 15:16:39 -040012702 spin_unlock_irqrestore(&phba->hbalock, iflags);
12703 workposted = true;
12704 break;
12705 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12706 case FC_STATUS_INSUFF_BUF_FRM_DISC:
James Smartb84daac2012-08-03 12:35:13 -040012707 hrq->RQ_no_posted_buf++;
James Smart4d9ab992009-10-02 15:16:39 -040012708 /* Post more buffers if possible */
12709 spin_lock_irqsave(&phba->hbalock, iflags);
12710 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12711 spin_unlock_irqrestore(&phba->hbalock, iflags);
12712 workposted = true;
12713 break;
12714 }
12715out:
12716 return workposted;
James Smart4d9ab992009-10-02 15:16:39 -040012717}
12718
12719/**
12720 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12721 * @phba: Pointer to HBA context object.
12722 * @cq: Pointer to the completion queue.
12723 * @wcqe: Pointer to a completion queue entry.
12724 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030012725 * This routine process a slow-path work-queue or receive queue completion queue
James Smart4d9ab992009-10-02 15:16:39 -040012726 * entry.
12727 *
12728 * Return: true if work posted to worker thread, otherwise false.
12729 **/
12730static bool
12731lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040012732 struct lpfc_cqe *cqe)
12733{
James Smart45ed1192009-10-02 15:17:02 -040012734 struct lpfc_cqe cqevt;
James Smart4f774512009-05-22 14:52:35 -040012735 bool workposted = false;
12736
12737 /* Copy the work queue CQE and convert endian order if needed */
James Smart45ed1192009-10-02 15:17:02 -040012738 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040012739
12740 /* Check and process for different type of WCQE and dispatch */
James Smart45ed1192009-10-02 15:17:02 -040012741 switch (bf_get(lpfc_cqe_code, &cqevt)) {
James Smart4f774512009-05-22 14:52:35 -040012742 case CQE_CODE_COMPL_WQE:
James Smart45ed1192009-10-02 15:17:02 -040012743 /* Process the WQ/RQ complete event */
James Smartbc739052010-08-04 16:11:18 -040012744 phba->last_completion_time = jiffies;
James Smart2a76a282012-08-03 12:35:54 -040012745 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040012746 (struct lpfc_wcqe_complete *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012747 break;
12748 case CQE_CODE_RELEASE_WQE:
12749 /* Process the WQ release event */
12750 lpfc_sli4_sp_handle_rel_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040012751 (struct lpfc_wcqe_release *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012752 break;
12753 case CQE_CODE_XRI_ABORTED:
12754 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040012755 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040012756 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040012757 (struct sli4_wcqe_xri_aborted *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040012758 break;
James Smart4d9ab992009-10-02 15:16:39 -040012759 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -040012760 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -040012761 /* Process the RQ event */
James Smartbc739052010-08-04 16:11:18 -040012762 phba->last_completion_time = jiffies;
James Smart4d9ab992009-10-02 15:16:39 -040012763 workposted = lpfc_sli4_sp_handle_rcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040012764 (struct lpfc_rcqe *)&cqevt);
James Smart4d9ab992009-10-02 15:16:39 -040012765 break;
James Smart4f774512009-05-22 14:52:35 -040012766 default:
12767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12768 "0388 Not a valid WCQE code: x%x\n",
James Smart45ed1192009-10-02 15:17:02 -040012769 bf_get(lpfc_cqe_code, &cqevt));
James Smart4f774512009-05-22 14:52:35 -040012770 break;
12771 }
12772 return workposted;
12773}
12774
12775/**
James Smart4f774512009-05-22 14:52:35 -040012776 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12777 * @phba: Pointer to HBA context object.
12778 * @eqe: Pointer to fast-path event queue entry.
12779 *
12780 * This routine process a event queue entry from the slow-path event queue.
12781 * It will check the MajorCode and MinorCode to determine this is for a
12782 * completion event on a completion queue, if not, an error shall be logged
12783 * and just return. Otherwise, it will get to the corresponding completion
12784 * queue and process all the entries on that completion queue, rearm the
12785 * completion queue, and then return.
12786 *
12787 **/
12788static void
James Smart67d12732012-08-03 12:36:13 -040012789lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12790 struct lpfc_queue *speq)
James Smart4f774512009-05-22 14:52:35 -040012791{
James Smart67d12732012-08-03 12:36:13 -040012792 struct lpfc_queue *cq = NULL, *childq;
James Smart4f774512009-05-22 14:52:35 -040012793 struct lpfc_cqe *cqe;
12794 bool workposted = false;
12795 int ecount = 0;
12796 uint16_t cqid;
12797
James Smart4f774512009-05-22 14:52:35 -040012798 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040012799 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040012800
James Smart4f774512009-05-22 14:52:35 -040012801 list_for_each_entry(childq, &speq->child_list, list) {
12802 if (childq->queue_id == cqid) {
12803 cq = childq;
12804 break;
12805 }
12806 }
12807 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040012808 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12809 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12810 "0365 Slow-path CQ identifier "
12811 "(%d) does not exist\n", cqid);
James Smart4f774512009-05-22 14:52:35 -040012812 return;
12813 }
12814
James Smart895427b2017-02-12 13:52:30 -080012815 /* Save EQ associated with this CQ */
12816 cq->assoc_qp = speq;
12817
James Smart4f774512009-05-22 14:52:35 -040012818 /* Process all the entries to the CQ */
12819 switch (cq->type) {
12820 case LPFC_MCQ:
12821 while ((cqe = lpfc_sli4_cq_get(cq))) {
12822 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
James Smart73d91e52011-10-10 21:32:10 -040012823 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040012824 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
James Smartb84daac2012-08-03 12:35:13 -040012825 cq->CQ_mbox++;
James Smart4f774512009-05-22 14:52:35 -040012826 }
12827 break;
12828 case LPFC_WCQ:
12829 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080012830 if ((cq->subtype == LPFC_FCP) ||
12831 (cq->subtype == LPFC_NVME))
12832 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
James Smart05580562011-05-24 11:40:48 -040012833 cqe);
12834 else
12835 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12836 cqe);
James Smart73d91e52011-10-10 21:32:10 -040012837 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040012838 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12839 }
James Smartb84daac2012-08-03 12:35:13 -040012840
12841 /* Track the max number of CQEs processed in 1 EQ */
12842 if (ecount > cq->CQ_max_cqe)
12843 cq->CQ_max_cqe = ecount;
James Smart4f774512009-05-22 14:52:35 -040012844 break;
12845 default:
12846 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12847 "0370 Invalid completion queue type (%d)\n",
12848 cq->type);
12849 return;
12850 }
12851
12852 /* Catch the no cq entry condition, log an error */
12853 if (unlikely(ecount == 0))
12854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12855 "0371 No entry from the CQ: identifier "
12856 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12857
12858 /* In any case, flash and re-arm the RCQ */
12859 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12860
12861 /* wake up worker thread if there are works to be done */
12862 if (workposted)
12863 lpfc_worker_wake_up(phba);
12864}
12865
12866/**
12867 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
James Smart2a76a282012-08-03 12:35:54 -040012868 * @phba: Pointer to HBA context object.
12869 * @cq: Pointer to associated CQ
12870 * @wcqe: Pointer to work-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040012871 *
12872 * This routine process a fast-path work queue completion entry from fast-path
12873 * event queue for FCP command response completion.
12874 **/
12875static void
James Smart2a76a282012-08-03 12:35:54 -040012876lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040012877 struct lpfc_wcqe_complete *wcqe)
12878{
James Smart2a76a282012-08-03 12:35:54 -040012879 struct lpfc_sli_ring *pring = cq->pring;
James Smart4f774512009-05-22 14:52:35 -040012880 struct lpfc_iocbq *cmdiocbq;
12881 struct lpfc_iocbq irspiocbq;
12882 unsigned long iflags;
12883
James Smart4f774512009-05-22 14:52:35 -040012884 /* Check for response status */
12885 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12886 /* If resource errors reported from HBA, reduce queue
12887 * depth of the SCSI device.
12888 */
James Smarte3d2b802012-08-14 14:25:43 -040012889 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12890 IOSTAT_LOCAL_REJECT)) &&
12891 ((wcqe->parameter & IOERR_PARAM_MASK) ==
12892 IOERR_NO_RESOURCES))
James Smart4f774512009-05-22 14:52:35 -040012893 phba->lpfc_rampdown_queue_depth(phba);
James Smarte3d2b802012-08-14 14:25:43 -040012894
James Smart4f774512009-05-22 14:52:35 -040012895 /* Log the error status */
12896 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12897 "0373 FCP complete error: status=x%x, "
12898 "hw_status=x%x, total_data_specified=%d, "
12899 "parameter=x%x, word3=x%x\n",
12900 bf_get(lpfc_wcqe_c_status, wcqe),
12901 bf_get(lpfc_wcqe_c_hw_status, wcqe),
12902 wcqe->total_data_placed, wcqe->parameter,
12903 wcqe->word3);
12904 }
12905
12906 /* Look up the FCP command IOCB and create pseudo response IOCB */
James Smart7e56aa22012-08-03 12:35:34 -040012907 spin_lock_irqsave(&pring->ring_lock, iflags);
12908 pring->stats.iocb_event++;
James Smart4f774512009-05-22 14:52:35 -040012909 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12910 bf_get(lpfc_wcqe_c_request_tag, wcqe));
James Smart7e56aa22012-08-03 12:35:34 -040012911 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart4f774512009-05-22 14:52:35 -040012912 if (unlikely(!cmdiocbq)) {
12913 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12914 "0374 FCP complete with no corresponding "
12915 "cmdiocb: iotag (%d)\n",
12916 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12917 return;
12918 }
James Smart895427b2017-02-12 13:52:30 -080012919
12920 if (cq->assoc_qp)
12921 cmdiocbq->isr_timestamp =
12922 cq->assoc_qp->isr_timestamp;
12923
12924 if (cmdiocbq->iocb_cmpl == NULL) {
12925 if (cmdiocbq->wqe_cmpl) {
12926 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12927 spin_lock_irqsave(&phba->hbalock, iflags);
12928 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12929 spin_unlock_irqrestore(&phba->hbalock, iflags);
12930 }
12931
12932 /* Pass the cmd_iocb and the wcqe to the upper layer */
12933 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
12934 return;
12935 }
James Smart4f774512009-05-22 14:52:35 -040012936 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12937 "0375 FCP cmdiocb not callback function "
12938 "iotag: (%d)\n",
12939 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12940 return;
12941 }
12942
12943 /* Fake the irspiocb and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050012944 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
James Smart4f774512009-05-22 14:52:35 -040012945
James Smart0f65ff62010-02-26 14:14:23 -050012946 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12947 spin_lock_irqsave(&phba->hbalock, iflags);
12948 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12949 spin_unlock_irqrestore(&phba->hbalock, iflags);
12950 }
12951
James Smart4f774512009-05-22 14:52:35 -040012952 /* Pass the cmd_iocb and the rsp state to the upper layer */
12953 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12954}
12955
12956/**
12957 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12958 * @phba: Pointer to HBA context object.
12959 * @cq: Pointer to completion queue.
12960 * @wcqe: Pointer to work-queue completion queue entry.
12961 *
12962 * This routine handles an fast-path WQ entry comsumed event by invoking the
12963 * proper WQ release routine to the slow-path WQ.
12964 **/
12965static void
12966lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12967 struct lpfc_wcqe_release *wcqe)
12968{
12969 struct lpfc_queue *childwq;
12970 bool wqid_matched = false;
James Smart895427b2017-02-12 13:52:30 -080012971 uint16_t hba_wqid;
James Smart4f774512009-05-22 14:52:35 -040012972
12973 /* Check for fast-path FCP work queue release */
James Smart895427b2017-02-12 13:52:30 -080012974 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
James Smart4f774512009-05-22 14:52:35 -040012975 list_for_each_entry(childwq, &cq->child_list, list) {
James Smart895427b2017-02-12 13:52:30 -080012976 if (childwq->queue_id == hba_wqid) {
James Smart4f774512009-05-22 14:52:35 -040012977 lpfc_sli4_wq_release(childwq,
12978 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12979 wqid_matched = true;
12980 break;
12981 }
12982 }
12983 /* Report warning log message if no match found */
12984 if (wqid_matched != true)
12985 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12986 "2580 Fast-path wqe consume event carries "
James Smart895427b2017-02-12 13:52:30 -080012987 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
James Smart4f774512009-05-22 14:52:35 -040012988}
12989
12990/**
James Smart895427b2017-02-12 13:52:30 -080012991 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
James Smart4f774512009-05-22 14:52:35 -040012992 * @cq: Pointer to the completion queue.
12993 * @eqe: Pointer to fast-path completion queue entry.
12994 *
12995 * This routine process a fast-path work queue completion entry from fast-path
12996 * event queue for FCP command response completion.
12997 **/
12998static int
James Smart895427b2017-02-12 13:52:30 -080012999lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040013000 struct lpfc_cqe *cqe)
13001{
13002 struct lpfc_wcqe_release wcqe;
13003 bool workposted = false;
13004
13005 /* Copy the work queue CQE and convert endian order if needed */
13006 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13007
13008 /* Check and process for different type of WCQE and dispatch */
13009 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13010 case CQE_CODE_COMPL_WQE:
James Smart895427b2017-02-12 13:52:30 -080013011 case CQE_CODE_NVME_ERSP:
James Smartb84daac2012-08-03 12:35:13 -040013012 cq->CQ_wq++;
James Smart4f774512009-05-22 14:52:35 -040013013 /* Process the WQ complete event */
James Smart98fc5dd2010-06-07 15:24:29 -040013014 phba->last_completion_time = jiffies;
James Smart895427b2017-02-12 13:52:30 -080013015 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13016 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13017 (struct lpfc_wcqe_complete *)&wcqe);
13018 if (cq->subtype == LPFC_NVME_LS)
13019 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
James Smart4f774512009-05-22 14:52:35 -040013020 (struct lpfc_wcqe_complete *)&wcqe);
13021 break;
13022 case CQE_CODE_RELEASE_WQE:
James Smartb84daac2012-08-03 12:35:13 -040013023 cq->CQ_release_wqe++;
James Smart4f774512009-05-22 14:52:35 -040013024 /* Process the WQ release event */
13025 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13026 (struct lpfc_wcqe_release *)&wcqe);
13027 break;
13028 case CQE_CODE_XRI_ABORTED:
James Smartb84daac2012-08-03 12:35:13 -040013029 cq->CQ_xri_aborted++;
James Smart4f774512009-05-22 14:52:35 -040013030 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040013031 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040013032 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13033 (struct sli4_wcqe_xri_aborted *)&wcqe);
13034 break;
James Smart895427b2017-02-12 13:52:30 -080013035 case CQE_CODE_RECEIVE_V1:
13036 case CQE_CODE_RECEIVE:
13037 phba->last_completion_time = jiffies;
13038 break;
James Smart4f774512009-05-22 14:52:35 -040013039 default:
13040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart895427b2017-02-12 13:52:30 -080013041 "0144 Not a valid CQE code: x%x\n",
James Smart4f774512009-05-22 14:52:35 -040013042 bf_get(lpfc_wcqe_c_code, &wcqe));
13043 break;
13044 }
13045 return workposted;
13046}
13047
13048/**
James Smart67d12732012-08-03 12:36:13 -040013049 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
James Smart4f774512009-05-22 14:52:35 -040013050 * @phba: Pointer to HBA context object.
13051 * @eqe: Pointer to fast-path event queue entry.
13052 *
13053 * This routine process a event queue entry from the fast-path event queue.
13054 * It will check the MajorCode and MinorCode to determine this is for a
13055 * completion event on a completion queue, if not, an error shall be logged
13056 * and just return. Otherwise, it will get to the corresponding completion
13057 * queue and process all the entries on the completion queue, rearm the
13058 * completion queue, and then return.
13059 **/
13060static void
James Smart67d12732012-08-03 12:36:13 -040013061lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13062 uint32_t qidx)
James Smart4f774512009-05-22 14:52:35 -040013063{
James Smart895427b2017-02-12 13:52:30 -080013064 struct lpfc_queue *cq = NULL;
James Smart4f774512009-05-22 14:52:35 -040013065 struct lpfc_cqe *cqe;
13066 bool workposted = false;
13067 uint16_t cqid;
13068 int ecount = 0;
13069
James Smartcb5172e2010-03-15 11:25:07 -040013070 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
James Smart4f774512009-05-22 14:52:35 -040013071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart67d12732012-08-03 12:36:13 -040013072 "0366 Not a valid completion "
James Smart4f774512009-05-22 14:52:35 -040013073 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040013074 bf_get_le32(lpfc_eqe_major_code, eqe),
13075 bf_get_le32(lpfc_eqe_minor_code, eqe));
James Smart4f774512009-05-22 14:52:35 -040013076 return;
13077 }
13078
James Smart67d12732012-08-03 12:36:13 -040013079 /* Get the reference to the corresponding CQ */
13080 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13081
James Smart895427b2017-02-12 13:52:30 -080013082 if (phba->sli4_hba.nvme_cq_map &&
13083 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
James Smartf358dd02017-02-12 13:52:34 -080013084 /* Process NVME / NVMET command completion */
James Smart895427b2017-02-12 13:52:30 -080013085 cq = phba->sli4_hba.nvme_cq[qidx];
13086 goto process_cq;
13087 }
13088
13089 if (phba->sli4_hba.fcp_cq_map &&
13090 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13091 /* Process FCP command completion */
13092 cq = phba->sli4_hba.fcp_cq[qidx];
13093 goto process_cq;
13094 }
13095
13096 if (phba->sli4_hba.nvmels_cq &&
13097 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13098 /* Process NVME unsol rcv */
13099 cq = phba->sli4_hba.nvmels_cq;
13100 }
13101
13102 /* Otherwise this is a Slow path event */
13103 if (cq == NULL) {
13104 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
James Smart67d12732012-08-03 12:36:13 -040013105 return;
13106 }
13107
James Smart895427b2017-02-12 13:52:30 -080013108process_cq:
James Smart4f774512009-05-22 14:52:35 -040013109 if (unlikely(cqid != cq->queue_id)) {
13110 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13111 "0368 Miss-matched fast-path completion "
13112 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13113 cqid, cq->queue_id);
13114 return;
13115 }
13116
James Smart895427b2017-02-12 13:52:30 -080013117 /* Save EQ associated with this CQ */
13118 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13119
James Smart4f774512009-05-22 14:52:35 -040013120 /* Process all the entries to the CQ */
13121 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080013122 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
James Smart73d91e52011-10-10 21:32:10 -040013123 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040013124 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13125 }
13126
James Smartb84daac2012-08-03 12:35:13 -040013127 /* Track the max number of CQEs processed in 1 EQ */
13128 if (ecount > cq->CQ_max_cqe)
13129 cq->CQ_max_cqe = ecount;
13130
James Smart4f774512009-05-22 14:52:35 -040013131 /* Catch the no cq entry condition */
13132 if (unlikely(ecount == 0))
13133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13134 "0369 No entry from fast-path completion "
13135 "queue fcpcqid=%d\n", cq->queue_id);
13136
13137 /* In any case, flash and re-arm the CQ */
13138 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13139
13140 /* wake up worker thread if there are works to be done */
13141 if (workposted)
13142 lpfc_worker_wake_up(phba);
13143}
13144
13145static void
13146lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13147{
13148 struct lpfc_eqe *eqe;
13149
13150 /* walk all the EQ entries and drop on the floor */
13151 while ((eqe = lpfc_sli4_eq_get(eq)))
13152 ;
13153
13154 /* Clear and re-arm the EQ */
13155 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13156}
13157
James Smart1ba981f2014-02-20 09:56:45 -050013158
13159/**
13160 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13161 * entry
13162 * @phba: Pointer to HBA context object.
13163 * @eqe: Pointer to fast-path event queue entry.
13164 *
13165 * This routine process a event queue entry from the Flash Optimized Fabric
13166 * event queue. It will check the MajorCode and MinorCode to determine this
13167 * is for a completion event on a completion queue, if not, an error shall be
13168 * logged and just return. Otherwise, it will get to the corresponding
13169 * completion queue and process all the entries on the completion queue, rearm
13170 * the completion queue, and then return.
13171 **/
13172static void
13173lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13174{
13175 struct lpfc_queue *cq;
13176 struct lpfc_cqe *cqe;
13177 bool workposted = false;
13178 uint16_t cqid;
13179 int ecount = 0;
13180
13181 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13182 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13183 "9147 Not a valid completion "
13184 "event: majorcode=x%x, minorcode=x%x\n",
13185 bf_get_le32(lpfc_eqe_major_code, eqe),
13186 bf_get_le32(lpfc_eqe_minor_code, eqe));
13187 return;
13188 }
13189
13190 /* Get the reference to the corresponding CQ */
13191 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13192
13193 /* Next check for OAS */
13194 cq = phba->sli4_hba.oas_cq;
13195 if (unlikely(!cq)) {
13196 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13197 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13198 "9148 OAS completion queue "
13199 "does not exist\n");
13200 return;
13201 }
13202
13203 if (unlikely(cqid != cq->queue_id)) {
13204 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13205 "9149 Miss-matched fast-path compl "
13206 "queue id: eqcqid=%d, fcpcqid=%d\n",
13207 cqid, cq->queue_id);
13208 return;
13209 }
13210
13211 /* Process all the entries to the OAS CQ */
13212 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart895427b2017-02-12 13:52:30 -080013213 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
James Smart1ba981f2014-02-20 09:56:45 -050013214 if (!(++ecount % cq->entry_repost))
13215 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13216 }
13217
13218 /* Track the max number of CQEs processed in 1 EQ */
13219 if (ecount > cq->CQ_max_cqe)
13220 cq->CQ_max_cqe = ecount;
13221
13222 /* Catch the no cq entry condition */
13223 if (unlikely(ecount == 0))
13224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13225 "9153 No entry from fast-path completion "
13226 "queue fcpcqid=%d\n", cq->queue_id);
13227
13228 /* In any case, flash and re-arm the CQ */
13229 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13230
13231 /* wake up worker thread if there are works to be done */
13232 if (workposted)
13233 lpfc_worker_wake_up(phba);
13234}
13235
13236/**
13237 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13238 * @irq: Interrupt number.
13239 * @dev_id: The device context pointer.
13240 *
13241 * This function is directly called from the PCI layer as an interrupt
13242 * service routine when device with SLI-4 interface spec is enabled with
13243 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13244 * IOCB ring event in the HBA. However, when the device is enabled with either
13245 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13246 * device-level interrupt handler. When the PCI slot is in error recovery
13247 * or the HBA is undergoing initialization, the interrupt handler will not
13248 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13249 * the intrrupt context. This function is called without any lock held.
13250 * It gets the hbalock to access and update SLI data structures. Note that,
13251 * the EQ to CQ are one-to-one map such that the EQ index is
13252 * equal to that of CQ index.
13253 *
13254 * This function returns IRQ_HANDLED when interrupt is handled else it
13255 * returns IRQ_NONE.
13256 **/
13257irqreturn_t
13258lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13259{
13260 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080013261 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart1ba981f2014-02-20 09:56:45 -050013262 struct lpfc_queue *eq;
13263 struct lpfc_eqe *eqe;
13264 unsigned long iflag;
13265 int ecount = 0;
James Smart1ba981f2014-02-20 09:56:45 -050013266
13267 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080013268 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13269 phba = hba_eq_hdl->phba;
James Smart1ba981f2014-02-20 09:56:45 -050013270
13271 if (unlikely(!phba))
13272 return IRQ_NONE;
13273
13274 /* Get to the EQ struct associated with this vector */
13275 eq = phba->sli4_hba.fof_eq;
13276 if (unlikely(!eq))
13277 return IRQ_NONE;
13278
13279 /* Check device state for handling interrupt */
13280 if (unlikely(lpfc_intr_state_check(phba))) {
13281 eq->EQ_badstate++;
13282 /* Check again for link_state with lock held */
13283 spin_lock_irqsave(&phba->hbalock, iflag);
13284 if (phba->link_state < LPFC_LINK_DOWN)
13285 /* Flush, clear interrupt, and rearm the EQ */
13286 lpfc_sli4_eq_flush(phba, eq);
13287 spin_unlock_irqrestore(&phba->hbalock, iflag);
13288 return IRQ_NONE;
13289 }
13290
13291 /*
13292 * Process all the event on FCP fast-path EQ
13293 */
13294 while ((eqe = lpfc_sli4_eq_get(eq))) {
13295 lpfc_sli4_fof_handle_eqe(phba, eqe);
13296 if (!(++ecount % eq->entry_repost))
13297 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
13298 eq->EQ_processed++;
13299 }
13300
13301 /* Track the max number of EQEs processed in 1 intr */
13302 if (ecount > eq->EQ_max_eqe)
13303 eq->EQ_max_eqe = ecount;
13304
13305
13306 if (unlikely(ecount == 0)) {
13307 eq->EQ_no_entry++;
13308
13309 if (phba->intr_type == MSIX)
13310 /* MSI-X treated interrupt served as no EQ share INT */
13311 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13312 "9145 MSI-X interrupt with no EQE\n");
13313 else {
13314 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13315 "9146 ISR interrupt with no EQE\n");
13316 /* Non MSI-X treated on interrupt as EQ share INT */
13317 return IRQ_NONE;
13318 }
13319 }
13320 /* Always clear and re-arm the fast-path EQ */
13321 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13322 return IRQ_HANDLED;
13323}
13324
James Smart4f774512009-05-22 14:52:35 -040013325/**
James Smart67d12732012-08-03 12:36:13 -040013326 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
James Smart4f774512009-05-22 14:52:35 -040013327 * @irq: Interrupt number.
13328 * @dev_id: The device context pointer.
13329 *
13330 * This function is directly called from the PCI layer as an interrupt
13331 * service routine when device with SLI-4 interface spec is enabled with
13332 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13333 * ring event in the HBA. However, when the device is enabled with either
13334 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13335 * device-level interrupt handler. When the PCI slot is in error recovery
13336 * or the HBA is undergoing initialization, the interrupt handler will not
13337 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13338 * the intrrupt context. This function is called without any lock held.
13339 * It gets the hbalock to access and update SLI data structures. Note that,
13340 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13341 * equal to that of FCP CQ index.
13342 *
James Smart67d12732012-08-03 12:36:13 -040013343 * The link attention and ELS ring attention events are handled
13344 * by the worker thread. The interrupt handler signals the worker thread
13345 * and returns for these events. This function is called without any lock
13346 * held. It gets the hbalock to access and update SLI data structures.
13347 *
James Smart4f774512009-05-22 14:52:35 -040013348 * This function returns IRQ_HANDLED when interrupt is handled else it
13349 * returns IRQ_NONE.
13350 **/
13351irqreturn_t
James Smart67d12732012-08-03 12:36:13 -040013352lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
James Smart4f774512009-05-22 14:52:35 -040013353{
13354 struct lpfc_hba *phba;
James Smart895427b2017-02-12 13:52:30 -080013355 struct lpfc_hba_eq_hdl *hba_eq_hdl;
James Smart4f774512009-05-22 14:52:35 -040013356 struct lpfc_queue *fpeq;
13357 struct lpfc_eqe *eqe;
13358 unsigned long iflag;
13359 int ecount = 0;
James Smart895427b2017-02-12 13:52:30 -080013360 int hba_eqidx;
James Smart4f774512009-05-22 14:52:35 -040013361
13362 /* Get the driver's phba structure from the dev_id */
James Smart895427b2017-02-12 13:52:30 -080013363 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13364 phba = hba_eq_hdl->phba;
13365 hba_eqidx = hba_eq_hdl->idx;
James Smart4f774512009-05-22 14:52:35 -040013366
13367 if (unlikely(!phba))
13368 return IRQ_NONE;
James Smart67d12732012-08-03 12:36:13 -040013369 if (unlikely(!phba->sli4_hba.hba_eq))
James Smart5350d872011-10-10 21:33:49 -040013370 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013371
13372 /* Get to the EQ struct associated with this vector */
James Smart895427b2017-02-12 13:52:30 -080013373 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
James Smart2e90f4b2011-12-13 13:22:37 -050013374 if (unlikely(!fpeq))
13375 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013376
James Smartbd2cdd52017-02-12 13:52:33 -080013377#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13378 if (phba->ktime_on)
13379 fpeq->isr_timestamp = ktime_get_ns();
13380#endif
13381
James Smartba20c852012-08-03 12:36:52 -040013382 if (lpfc_fcp_look_ahead) {
James Smart895427b2017-02-12 13:52:30 -080013383 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
James Smartba20c852012-08-03 12:36:52 -040013384 lpfc_sli4_eq_clr_intr(fpeq);
13385 else {
James Smart895427b2017-02-12 13:52:30 -080013386 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smartba20c852012-08-03 12:36:52 -040013387 return IRQ_NONE;
13388 }
13389 }
13390
James Smart4f774512009-05-22 14:52:35 -040013391 /* Check device state for handling interrupt */
13392 if (unlikely(lpfc_intr_state_check(phba))) {
James Smartb84daac2012-08-03 12:35:13 -040013393 fpeq->EQ_badstate++;
James Smart4f774512009-05-22 14:52:35 -040013394 /* Check again for link_state with lock held */
13395 spin_lock_irqsave(&phba->hbalock, iflag);
13396 if (phba->link_state < LPFC_LINK_DOWN)
13397 /* Flush, clear interrupt, and rearm the EQ */
13398 lpfc_sli4_eq_flush(phba, fpeq);
13399 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smartba20c852012-08-03 12:36:52 -040013400 if (lpfc_fcp_look_ahead)
James Smart895427b2017-02-12 13:52:30 -080013401 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smart4f774512009-05-22 14:52:35 -040013402 return IRQ_NONE;
13403 }
13404
13405 /*
13406 * Process all the event on FCP fast-path EQ
13407 */
13408 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
James Smarteb016562014-09-03 12:58:06 -040013409 if (eqe == NULL)
13410 break;
13411
James Smart895427b2017-02-12 13:52:30 -080013412 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
James Smart73d91e52011-10-10 21:32:10 -040013413 if (!(++ecount % fpeq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040013414 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
James Smartb84daac2012-08-03 12:35:13 -040013415 fpeq->EQ_processed++;
James Smart4f774512009-05-22 14:52:35 -040013416 }
13417
James Smartb84daac2012-08-03 12:35:13 -040013418 /* Track the max number of EQEs processed in 1 intr */
13419 if (ecount > fpeq->EQ_max_eqe)
13420 fpeq->EQ_max_eqe = ecount;
13421
James Smart4f774512009-05-22 14:52:35 -040013422 /* Always clear and re-arm the fast-path EQ */
13423 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13424
13425 if (unlikely(ecount == 0)) {
James Smartb84daac2012-08-03 12:35:13 -040013426 fpeq->EQ_no_entry++;
James Smartba20c852012-08-03 12:36:52 -040013427
13428 if (lpfc_fcp_look_ahead) {
James Smart895427b2017-02-12 13:52:30 -080013429 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
James Smartba20c852012-08-03 12:36:52 -040013430 return IRQ_NONE;
13431 }
13432
James Smart4f774512009-05-22 14:52:35 -040013433 if (phba->intr_type == MSIX)
13434 /* MSI-X treated interrupt served as no EQ share INT */
13435 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13436 "0358 MSI-X interrupt with no EQE\n");
13437 else
13438 /* Non MSI-X treated on interrupt as EQ share INT */
13439 return IRQ_NONE;
13440 }
13441
James Smartba20c852012-08-03 12:36:52 -040013442 if (lpfc_fcp_look_ahead)
James Smart895427b2017-02-12 13:52:30 -080013443 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13444
James Smart4f774512009-05-22 14:52:35 -040013445 return IRQ_HANDLED;
13446} /* lpfc_sli4_fp_intr_handler */
13447
13448/**
13449 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13450 * @irq: Interrupt number.
13451 * @dev_id: The device context pointer.
13452 *
13453 * This function is the device-level interrupt handler to device with SLI-4
13454 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13455 * interrupt mode is enabled and there is an event in the HBA which requires
13456 * driver attention. This function invokes the slow-path interrupt attention
13457 * handling function and fast-path interrupt attention handling function in
13458 * turn to process the relevant HBA attention events. This function is called
13459 * without any lock held. It gets the hbalock to access and update SLI data
13460 * structures.
13461 *
13462 * This function returns IRQ_HANDLED when interrupt is handled, else it
13463 * returns IRQ_NONE.
13464 **/
13465irqreturn_t
13466lpfc_sli4_intr_handler(int irq, void *dev_id)
13467{
13468 struct lpfc_hba *phba;
James Smart67d12732012-08-03 12:36:13 -040013469 irqreturn_t hba_irq_rc;
13470 bool hba_handled = false;
James Smart895427b2017-02-12 13:52:30 -080013471 int qidx;
James Smart4f774512009-05-22 14:52:35 -040013472
13473 /* Get the driver's phba structure from the dev_id */
13474 phba = (struct lpfc_hba *)dev_id;
13475
13476 if (unlikely(!phba))
13477 return IRQ_NONE;
13478
13479 /*
James Smart4f774512009-05-22 14:52:35 -040013480 * Invoke fast-path host attention interrupt handling as appropriate.
13481 */
James Smart895427b2017-02-12 13:52:30 -080013482 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
James Smart67d12732012-08-03 12:36:13 -040013483 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080013484 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart67d12732012-08-03 12:36:13 -040013485 if (hba_irq_rc == IRQ_HANDLED)
13486 hba_handled |= true;
James Smart4f774512009-05-22 14:52:35 -040013487 }
13488
James Smart1ba981f2014-02-20 09:56:45 -050013489 if (phba->cfg_fof) {
13490 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
James Smart895427b2017-02-12 13:52:30 -080013491 &phba->sli4_hba.hba_eq_hdl[qidx]);
James Smart1ba981f2014-02-20 09:56:45 -050013492 if (hba_irq_rc == IRQ_HANDLED)
13493 hba_handled |= true;
13494 }
13495
James Smart67d12732012-08-03 12:36:13 -040013496 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040013497} /* lpfc_sli4_intr_handler */
13498
13499/**
13500 * lpfc_sli4_queue_free - free a queue structure and associated memory
13501 * @queue: The queue structure to free.
13502 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040013503 * This function frees a queue structure and the DMAable memory used for
James Smart4f774512009-05-22 14:52:35 -040013504 * the host resident queue. This function must be called after destroying the
13505 * queue on the HBA.
13506 **/
13507void
13508lpfc_sli4_queue_free(struct lpfc_queue *queue)
13509{
13510 struct lpfc_dmabuf *dmabuf;
13511
13512 if (!queue)
13513 return;
13514
13515 while (!list_empty(&queue->page_list)) {
13516 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13517 list);
James Smart49198b32010-04-06 15:04:33 -040013518 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
James Smart4f774512009-05-22 14:52:35 -040013519 dmabuf->virt, dmabuf->phys);
13520 kfree(dmabuf);
13521 }
James Smart895427b2017-02-12 13:52:30 -080013522 if (queue->rqbp) {
13523 lpfc_free_rq_buffer(queue->phba, queue);
13524 kfree(queue->rqbp);
13525 }
13526 kfree(queue->pring);
James Smart4f774512009-05-22 14:52:35 -040013527 kfree(queue);
13528 return;
13529}
13530
13531/**
13532 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13533 * @phba: The HBA that this queue is being created on.
13534 * @entry_size: The size of each queue entry for this queue.
13535 * @entry count: The number of entries that this queue will handle.
13536 *
13537 * This function allocates a queue structure and the DMAable memory used for
13538 * the host resident queue. This function must be called before creating the
13539 * queue on the HBA.
13540 **/
13541struct lpfc_queue *
13542lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13543 uint32_t entry_count)
13544{
13545 struct lpfc_queue *queue;
13546 struct lpfc_dmabuf *dmabuf;
13547 int x, total_qe_count;
13548 void *dma_pointer;
James Smartcb5172e2010-03-15 11:25:07 -040013549 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart4f774512009-05-22 14:52:35 -040013550
James Smartcb5172e2010-03-15 11:25:07 -040013551 if (!phba->sli4_hba.pc_sli4_params.supported)
13552 hw_page_size = SLI4_PAGE_SIZE;
13553
James Smart4f774512009-05-22 14:52:35 -040013554 queue = kzalloc(sizeof(struct lpfc_queue) +
13555 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13556 if (!queue)
13557 return NULL;
James Smartcb5172e2010-03-15 11:25:07 -040013558 queue->page_count = (ALIGN(entry_size * entry_count,
13559 hw_page_size))/hw_page_size;
James Smart895427b2017-02-12 13:52:30 -080013560
13561 /* If needed, Adjust page count to match the max the adapter supports */
13562 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
13563 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13564
James Smart4f774512009-05-22 14:52:35 -040013565 INIT_LIST_HEAD(&queue->list);
James Smart895427b2017-02-12 13:52:30 -080013566 INIT_LIST_HEAD(&queue->wq_list);
James Smart4f774512009-05-22 14:52:35 -040013567 INIT_LIST_HEAD(&queue->page_list);
13568 INIT_LIST_HEAD(&queue->child_list);
13569 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13570 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13571 if (!dmabuf)
13572 goto out_fail;
Joe Perches1aee3832014-09-03 12:56:12 -040013573 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13574 hw_page_size, &dmabuf->phys,
13575 GFP_KERNEL);
James Smart4f774512009-05-22 14:52:35 -040013576 if (!dmabuf->virt) {
13577 kfree(dmabuf);
13578 goto out_fail;
13579 }
13580 dmabuf->buffer_tag = x;
13581 list_add_tail(&dmabuf->list, &queue->page_list);
13582 /* initialize queue's entry array */
13583 dma_pointer = dmabuf->virt;
13584 for (; total_qe_count < entry_count &&
James Smartcb5172e2010-03-15 11:25:07 -040013585 dma_pointer < (hw_page_size + dmabuf->virt);
James Smart4f774512009-05-22 14:52:35 -040013586 total_qe_count++, dma_pointer += entry_size) {
13587 queue->qe[total_qe_count].address = dma_pointer;
13588 }
13589 }
13590 queue->entry_size = entry_size;
13591 queue->entry_count = entry_count;
James Smart73d91e52011-10-10 21:32:10 -040013592
13593 /*
13594 * entry_repost is calculated based on the number of entries in the
13595 * queue. This works out except for RQs. If buffers are NOT initially
13596 * posted for every RQE, entry_repost should be adjusted accordingly.
13597 */
13598 queue->entry_repost = (entry_count >> 3);
13599 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13600 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
James Smart4f774512009-05-22 14:52:35 -040013601 queue->phba = phba;
13602
13603 return queue;
13604out_fail:
13605 lpfc_sli4_queue_free(queue);
13606 return NULL;
13607}
13608
13609/**
James Smart962bc512013-01-03 15:44:00 -050013610 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13611 * @phba: HBA structure that indicates port to create a queue on.
13612 * @pci_barset: PCI BAR set flag.
13613 *
13614 * This function shall perform iomap of the specified PCI BAR address to host
13615 * memory address if not already done so and return it. The returned host
13616 * memory address can be NULL.
13617 */
13618static void __iomem *
13619lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13620{
James Smart962bc512013-01-03 15:44:00 -050013621 if (!phba->pcidev)
13622 return NULL;
James Smart962bc512013-01-03 15:44:00 -050013623
13624 switch (pci_barset) {
13625 case WQ_PCI_BAR_0_AND_1:
James Smart962bc512013-01-03 15:44:00 -050013626 return phba->pci_bar0_memmap_p;
13627 case WQ_PCI_BAR_2_AND_3:
James Smart962bc512013-01-03 15:44:00 -050013628 return phba->pci_bar2_memmap_p;
13629 case WQ_PCI_BAR_4_AND_5:
James Smart962bc512013-01-03 15:44:00 -050013630 return phba->pci_bar4_memmap_p;
13631 default:
13632 break;
13633 }
13634 return NULL;
13635}
13636
13637/**
James Smart895427b2017-02-12 13:52:30 -080013638 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
James Smart173edbb2012-06-12 13:54:50 -040013639 * @phba: HBA structure that indicates port to create a queue on.
13640 * @startq: The starting FCP EQ to modify
13641 *
13642 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13643 *
13644 * The @phba struct is used to send mailbox command to HBA. The @startq
13645 * is used to get the starting FCP EQ to change.
13646 * This function is asynchronous and will wait for the mailbox
13647 * command to finish before continuing.
13648 *
13649 * On success this function will return a zero. If unable to allocate enough
13650 * memory this function will return -ENOMEM. If the queue create mailbox command
13651 * fails this function will return -ENXIO.
13652 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040013653int
James Smart895427b2017-02-12 13:52:30 -080013654lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
James Smart173edbb2012-06-12 13:54:50 -040013655{
13656 struct lpfc_mbx_modify_eq_delay *eq_delay;
13657 LPFC_MBOXQ_t *mbox;
13658 struct lpfc_queue *eq;
13659 int cnt, rc, length, status = 0;
13660 uint32_t shdr_status, shdr_add_status;
James Smartee020062012-09-29 11:28:52 -040013661 uint32_t result;
James Smart895427b2017-02-12 13:52:30 -080013662 int qidx;
James Smart173edbb2012-06-12 13:54:50 -040013663 union lpfc_sli4_cfg_shdr *shdr;
13664 uint16_t dmult;
13665
James Smart895427b2017-02-12 13:52:30 -080013666 if (startq >= phba->io_channel_irqs)
James Smart173edbb2012-06-12 13:54:50 -040013667 return 0;
13668
13669 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13670 if (!mbox)
13671 return -ENOMEM;
13672 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13673 sizeof(struct lpfc_sli4_cfg_mhdr));
13674 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13675 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13676 length, LPFC_SLI4_MBX_EMBED);
13677 eq_delay = &mbox->u.mqe.un.eq_delay;
13678
13679 /* Calculate delay multiper from maximum interrupt per second */
James Smart895427b2017-02-12 13:52:30 -080013680 result = phba->cfg_fcp_imax / phba->io_channel_irqs;
13681 if (result > LPFC_DMULT_CONST || result == 0)
James Smartee020062012-09-29 11:28:52 -040013682 dmult = 0;
13683 else
13684 dmult = LPFC_DMULT_CONST/result - 1;
James Smart173edbb2012-06-12 13:54:50 -040013685
13686 cnt = 0;
James Smart895427b2017-02-12 13:52:30 -080013687 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
13688 eq = phba->sli4_hba.hba_eq[qidx];
James Smart173edbb2012-06-12 13:54:50 -040013689 if (!eq)
13690 continue;
13691 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13692 eq_delay->u.request.eq[cnt].phase = 0;
13693 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13694 cnt++;
13695 if (cnt >= LPFC_MAX_EQ_DELAY)
13696 break;
13697 }
13698 eq_delay->u.request.num_eq = cnt;
13699
13700 mbox->vport = phba->pport;
13701 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13702 mbox->context1 = NULL;
13703 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13704 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13705 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13706 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13707 if (shdr_status || shdr_add_status || rc) {
13708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13709 "2512 MODIFY_EQ_DELAY mailbox failed with "
13710 "status x%x add_status x%x, mbx status x%x\n",
13711 shdr_status, shdr_add_status, rc);
13712 status = -ENXIO;
13713 }
13714 mempool_free(mbox, phba->mbox_mem_pool);
13715 return status;
13716}
13717
13718/**
James Smart4f774512009-05-22 14:52:35 -040013719 * lpfc_eq_create - Create an Event Queue on the HBA
13720 * @phba: HBA structure that indicates port to create a queue on.
13721 * @eq: The queue structure to use to create the event queue.
13722 * @imax: The maximum interrupt per second limit.
13723 *
13724 * This function creates an event queue, as detailed in @eq, on a port,
13725 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13726 *
13727 * The @phba struct is used to send mailbox command to HBA. The @eq struct
13728 * is used to get the entry count and entry size that are necessary to
13729 * determine the number of pages to allocate and use for this queue. This
13730 * function will send the EQ_CREATE mailbox command to the HBA to setup the
13731 * event queue. This function is asynchronous and will wait for the mailbox
13732 * command to finish before continuing.
13733 *
13734 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040013735 * memory this function will return -ENOMEM. If the queue create mailbox command
13736 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040013737 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040013738int
James Smartee020062012-09-29 11:28:52 -040013739lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
James Smart4f774512009-05-22 14:52:35 -040013740{
13741 struct lpfc_mbx_eq_create *eq_create;
13742 LPFC_MBOXQ_t *mbox;
13743 int rc, length, status = 0;
13744 struct lpfc_dmabuf *dmabuf;
13745 uint32_t shdr_status, shdr_add_status;
13746 union lpfc_sli4_cfg_shdr *shdr;
13747 uint16_t dmult;
James Smart49198b32010-04-06 15:04:33 -040013748 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13749
James Smart2e90f4b2011-12-13 13:22:37 -050013750 /* sanity check on queue memory */
13751 if (!eq)
13752 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040013753 if (!phba->sli4_hba.pc_sli4_params.supported)
13754 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040013755
13756 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13757 if (!mbox)
13758 return -ENOMEM;
13759 length = (sizeof(struct lpfc_mbx_eq_create) -
13760 sizeof(struct lpfc_sli4_cfg_mhdr));
13761 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13762 LPFC_MBOX_OPCODE_EQ_CREATE,
13763 length, LPFC_SLI4_MBX_EMBED);
13764 eq_create = &mbox->u.mqe.un.eq_create;
13765 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13766 eq->page_count);
13767 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13768 LPFC_EQE_SIZE);
13769 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
James Smart2c9c5a02015-04-07 15:07:15 -040013770 /* don't setup delay multiplier using EQ_CREATE */
13771 dmult = 0;
James Smart4f774512009-05-22 14:52:35 -040013772 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13773 dmult);
13774 switch (eq->entry_count) {
13775 default:
13776 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13777 "0360 Unsupported EQ count. (%d)\n",
13778 eq->entry_count);
13779 if (eq->entry_count < 256)
13780 return -EINVAL;
13781 /* otherwise default to smallest count (drop through) */
13782 case 256:
13783 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13784 LPFC_EQ_CNT_256);
13785 break;
13786 case 512:
13787 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13788 LPFC_EQ_CNT_512);
13789 break;
13790 case 1024:
13791 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13792 LPFC_EQ_CNT_1024);
13793 break;
13794 case 2048:
13795 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13796 LPFC_EQ_CNT_2048);
13797 break;
13798 case 4096:
13799 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13800 LPFC_EQ_CNT_4096);
13801 break;
13802 }
13803 list_for_each_entry(dmabuf, &eq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040013804 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040013805 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13806 putPaddrLow(dmabuf->phys);
13807 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13808 putPaddrHigh(dmabuf->phys);
13809 }
13810 mbox->vport = phba->pport;
13811 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13812 mbox->context1 = NULL;
13813 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13814 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13815 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13817 if (shdr_status || shdr_add_status || rc) {
13818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13819 "2500 EQ_CREATE mailbox failed with "
13820 "status x%x add_status x%x, mbx status x%x\n",
13821 shdr_status, shdr_add_status, rc);
13822 status = -ENXIO;
13823 }
13824 eq->type = LPFC_EQ;
13825 eq->subtype = LPFC_NONE;
13826 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13827 if (eq->queue_id == 0xFFFF)
13828 status = -ENXIO;
13829 eq->host_index = 0;
13830 eq->hba_index = 0;
13831
James Smart8fa38512009-07-19 10:01:03 -040013832 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040013833 return status;
13834}
13835
13836/**
13837 * lpfc_cq_create - Create a Completion Queue on the HBA
13838 * @phba: HBA structure that indicates port to create a queue on.
13839 * @cq: The queue structure to use to create the completion queue.
13840 * @eq: The event queue to bind this completion queue to.
13841 *
13842 * This function creates a completion queue, as detailed in @wq, on a port,
13843 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13844 *
13845 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13846 * is used to get the entry count and entry size that are necessary to
13847 * determine the number of pages to allocate and use for this queue. The @eq
13848 * is used to indicate which event queue to bind this completion queue to. This
13849 * function will send the CQ_CREATE mailbox command to the HBA to setup the
13850 * completion queue. This function is asynchronous and will wait for the mailbox
13851 * command to finish before continuing.
13852 *
13853 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040013854 * memory this function will return -ENOMEM. If the queue create mailbox command
13855 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040013856 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040013857int
James Smart4f774512009-05-22 14:52:35 -040013858lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13859 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13860{
13861 struct lpfc_mbx_cq_create *cq_create;
13862 struct lpfc_dmabuf *dmabuf;
13863 LPFC_MBOXQ_t *mbox;
13864 int rc, length, status = 0;
13865 uint32_t shdr_status, shdr_add_status;
13866 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040013867 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13868
James Smart2e90f4b2011-12-13 13:22:37 -050013869 /* sanity check on queue memory */
13870 if (!cq || !eq)
13871 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040013872 if (!phba->sli4_hba.pc_sli4_params.supported)
13873 hw_page_size = SLI4_PAGE_SIZE;
13874
James Smart4f774512009-05-22 14:52:35 -040013875 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13876 if (!mbox)
13877 return -ENOMEM;
13878 length = (sizeof(struct lpfc_mbx_cq_create) -
13879 sizeof(struct lpfc_sli4_cfg_mhdr));
13880 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13881 LPFC_MBOX_OPCODE_CQ_CREATE,
13882 length, LPFC_SLI4_MBX_EMBED);
13883 cq_create = &mbox->u.mqe.un.cq_create;
James Smart5a6f1332011-03-11 16:05:35 -050013884 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040013885 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13886 cq->page_count);
13887 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13888 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050013889 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13890 phba->sli4_hba.pc_sli4_params.cqv);
13891 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
James Smartc31098c2011-04-16 11:03:33 -040013892 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13893 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
James Smart5a6f1332011-03-11 16:05:35 -050013894 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13895 eq->queue_id);
13896 } else {
13897 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13898 eq->queue_id);
13899 }
James Smart4f774512009-05-22 14:52:35 -040013900 switch (cq->entry_count) {
13901 default:
13902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2ea259e2017-02-12 13:52:27 -080013903 "0361 Unsupported CQ count: "
13904 "entry cnt %d sz %d pg cnt %d repost %d\n",
13905 cq->entry_count, cq->entry_size,
13906 cq->page_count, cq->entry_repost);
James Smart4f4c1862012-06-12 13:54:02 -040013907 if (cq->entry_count < 256) {
13908 status = -EINVAL;
13909 goto out;
13910 }
James Smart4f774512009-05-22 14:52:35 -040013911 /* otherwise default to smallest count (drop through) */
13912 case 256:
13913 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13914 LPFC_CQ_CNT_256);
13915 break;
13916 case 512:
13917 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13918 LPFC_CQ_CNT_512);
13919 break;
13920 case 1024:
13921 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13922 LPFC_CQ_CNT_1024);
13923 break;
13924 }
13925 list_for_each_entry(dmabuf, &cq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040013926 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040013927 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13928 putPaddrLow(dmabuf->phys);
13929 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13930 putPaddrHigh(dmabuf->phys);
13931 }
13932 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13933
13934 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040013935 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13936 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13937 if (shdr_status || shdr_add_status || rc) {
13938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13939 "2501 CQ_CREATE mailbox failed with "
13940 "status x%x add_status x%x, mbx status x%x\n",
13941 shdr_status, shdr_add_status, rc);
13942 status = -ENXIO;
13943 goto out;
13944 }
13945 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13946 if (cq->queue_id == 0xFFFF) {
13947 status = -ENXIO;
13948 goto out;
13949 }
13950 /* link the cq onto the parent eq child list */
13951 list_add_tail(&cq->list, &eq->child_list);
13952 /* Set up completion queue's type and subtype */
13953 cq->type = type;
13954 cq->subtype = subtype;
13955 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
James Smart2a622bf2011-02-16 12:40:06 -050013956 cq->assoc_qid = eq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040013957 cq->host_index = 0;
13958 cq->hba_index = 0;
James Smart4f774512009-05-22 14:52:35 -040013959
James Smart8fa38512009-07-19 10:01:03 -040013960out:
13961 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040013962 return status;
13963}
13964
13965/**
James Smartb19a0612010-04-06 14:48:51 -040013966 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
James Smart04c68492009-05-22 14:52:52 -040013967 * @phba: HBA structure that indicates port to create a queue on.
13968 * @mq: The queue structure to use to create the mailbox queue.
James Smartb19a0612010-04-06 14:48:51 -040013969 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13970 * @cq: The completion queue to associate with this cq.
James Smart04c68492009-05-22 14:52:52 -040013971 *
James Smartb19a0612010-04-06 14:48:51 -040013972 * This function provides failback (fb) functionality when the
13973 * mq_create_ext fails on older FW generations. It's purpose is identical
13974 * to mq_create_ext otherwise.
James Smart04c68492009-05-22 14:52:52 -040013975 *
James Smartb19a0612010-04-06 14:48:51 -040013976 * This routine cannot fail as all attributes were previously accessed and
13977 * initialized in mq_create_ext.
James Smart04c68492009-05-22 14:52:52 -040013978 **/
James Smartb19a0612010-04-06 14:48:51 -040013979static void
13980lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13981 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
James Smart04c68492009-05-22 14:52:52 -040013982{
13983 struct lpfc_mbx_mq_create *mq_create;
13984 struct lpfc_dmabuf *dmabuf;
James Smartb19a0612010-04-06 14:48:51 -040013985 int length;
James Smart04c68492009-05-22 14:52:52 -040013986
James Smart04c68492009-05-22 14:52:52 -040013987 length = (sizeof(struct lpfc_mbx_mq_create) -
13988 sizeof(struct lpfc_sli4_cfg_mhdr));
13989 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13990 LPFC_MBOX_OPCODE_MQ_CREATE,
13991 length, LPFC_SLI4_MBX_EMBED);
13992 mq_create = &mbox->u.mqe.un.mq_create;
13993 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
James Smartb19a0612010-04-06 14:48:51 -040013994 mq->page_count);
James Smart04c68492009-05-22 14:52:52 -040013995 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
James Smartb19a0612010-04-06 14:48:51 -040013996 cq->queue_id);
James Smart04c68492009-05-22 14:52:52 -040013997 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13998 switch (mq->entry_count) {
James Smart04c68492009-05-22 14:52:52 -040013999 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050014000 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14001 LPFC_MQ_RING_SIZE_16);
James Smart04c68492009-05-22 14:52:52 -040014002 break;
14003 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050014004 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14005 LPFC_MQ_RING_SIZE_32);
James Smart04c68492009-05-22 14:52:52 -040014006 break;
14007 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050014008 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14009 LPFC_MQ_RING_SIZE_64);
James Smart04c68492009-05-22 14:52:52 -040014010 break;
14011 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050014012 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14013 LPFC_MQ_RING_SIZE_128);
James Smart04c68492009-05-22 14:52:52 -040014014 break;
14015 }
14016 list_for_each_entry(dmabuf, &mq->page_list, list) {
14017 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
James Smartb19a0612010-04-06 14:48:51 -040014018 putPaddrLow(dmabuf->phys);
James Smart04c68492009-05-22 14:52:52 -040014019 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smartb19a0612010-04-06 14:48:51 -040014020 putPaddrHigh(dmabuf->phys);
14021 }
14022}
14023
14024/**
14025 * lpfc_mq_create - Create a mailbox Queue on the HBA
14026 * @phba: HBA structure that indicates port to create a queue on.
14027 * @mq: The queue structure to use to create the mailbox queue.
14028 * @cq: The completion queue to associate with this cq.
14029 * @subtype: The queue's subtype.
14030 *
14031 * This function creates a mailbox queue, as detailed in @mq, on a port,
14032 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14033 *
14034 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14035 * is used to get the entry count and entry size that are necessary to
14036 * determine the number of pages to allocate and use for this queue. This
14037 * function will send the MQ_CREATE mailbox command to the HBA to setup the
14038 * mailbox queue. This function is asynchronous and will wait for the mailbox
14039 * command to finish before continuing.
14040 *
14041 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014042 * memory this function will return -ENOMEM. If the queue create mailbox command
14043 * fails this function will return -ENXIO.
James Smartb19a0612010-04-06 14:48:51 -040014044 **/
14045int32_t
14046lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14047 struct lpfc_queue *cq, uint32_t subtype)
14048{
14049 struct lpfc_mbx_mq_create *mq_create;
14050 struct lpfc_mbx_mq_create_ext *mq_create_ext;
14051 struct lpfc_dmabuf *dmabuf;
14052 LPFC_MBOXQ_t *mbox;
14053 int rc, length, status = 0;
14054 uint32_t shdr_status, shdr_add_status;
14055 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014056 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smartb19a0612010-04-06 14:48:51 -040014057
James Smart2e90f4b2011-12-13 13:22:37 -050014058 /* sanity check on queue memory */
14059 if (!mq || !cq)
14060 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014061 if (!phba->sli4_hba.pc_sli4_params.supported)
14062 hw_page_size = SLI4_PAGE_SIZE;
James Smartb19a0612010-04-06 14:48:51 -040014063
14064 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14065 if (!mbox)
14066 return -ENOMEM;
14067 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14068 sizeof(struct lpfc_sli4_cfg_mhdr));
14069 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14070 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14071 length, LPFC_SLI4_MBX_EMBED);
14072
14073 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
James Smart5a6f1332011-03-11 16:05:35 -050014074 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
James Smart70f3c072010-12-15 17:57:33 -050014075 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14076 &mq_create_ext->u.request, mq->page_count);
14077 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14078 &mq_create_ext->u.request, 1);
14079 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
James Smartb19a0612010-04-06 14:48:51 -040014080 &mq_create_ext->u.request, 1);
14081 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14082 &mq_create_ext->u.request, 1);
James Smart70f3c072010-12-15 17:57:33 -050014083 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14084 &mq_create_ext->u.request, 1);
14085 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14086 &mq_create_ext->u.request, 1);
James Smartb19a0612010-04-06 14:48:51 -040014087 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050014088 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14089 phba->sli4_hba.pc_sli4_params.mqv);
14090 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14091 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14092 cq->queue_id);
14093 else
14094 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14095 cq->queue_id);
James Smartb19a0612010-04-06 14:48:51 -040014096 switch (mq->entry_count) {
14097 default:
14098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14099 "0362 Unsupported MQ count. (%d)\n",
14100 mq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040014101 if (mq->entry_count < 16) {
14102 status = -EINVAL;
14103 goto out;
14104 }
James Smartb19a0612010-04-06 14:48:51 -040014105 /* otherwise default to smallest count (drop through) */
14106 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050014107 bf_set(lpfc_mq_context_ring_size,
14108 &mq_create_ext->u.request.context,
14109 LPFC_MQ_RING_SIZE_16);
James Smartb19a0612010-04-06 14:48:51 -040014110 break;
14111 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050014112 bf_set(lpfc_mq_context_ring_size,
14113 &mq_create_ext->u.request.context,
14114 LPFC_MQ_RING_SIZE_32);
James Smartb19a0612010-04-06 14:48:51 -040014115 break;
14116 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050014117 bf_set(lpfc_mq_context_ring_size,
14118 &mq_create_ext->u.request.context,
14119 LPFC_MQ_RING_SIZE_64);
James Smartb19a0612010-04-06 14:48:51 -040014120 break;
14121 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050014122 bf_set(lpfc_mq_context_ring_size,
14123 &mq_create_ext->u.request.context,
14124 LPFC_MQ_RING_SIZE_128);
James Smartb19a0612010-04-06 14:48:51 -040014125 break;
14126 }
14127 list_for_each_entry(dmabuf, &mq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014128 memset(dmabuf->virt, 0, hw_page_size);
James Smartb19a0612010-04-06 14:48:51 -040014129 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
14130 putPaddrLow(dmabuf->phys);
14131 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smart04c68492009-05-22 14:52:52 -040014132 putPaddrHigh(dmabuf->phys);
14133 }
14134 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smartb19a0612010-04-06 14:48:51 -040014135 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14136 &mq_create_ext->u.response);
14137 if (rc != MBX_SUCCESS) {
14138 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14139 "2795 MQ_CREATE_EXT failed with "
14140 "status x%x. Failback to MQ_CREATE.\n",
14141 rc);
14142 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14143 mq_create = &mbox->u.mqe.un.mq_create;
14144 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14145 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14146 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14147 &mq_create->u.response);
14148 }
14149
James Smart04c68492009-05-22 14:52:52 -040014150 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart04c68492009-05-22 14:52:52 -040014151 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14152 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14153 if (shdr_status || shdr_add_status || rc) {
14154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14155 "2502 MQ_CREATE mailbox failed with "
14156 "status x%x add_status x%x, mbx status x%x\n",
14157 shdr_status, shdr_add_status, rc);
14158 status = -ENXIO;
14159 goto out;
14160 }
James Smart04c68492009-05-22 14:52:52 -040014161 if (mq->queue_id == 0xFFFF) {
14162 status = -ENXIO;
14163 goto out;
14164 }
14165 mq->type = LPFC_MQ;
James Smart2a622bf2011-02-16 12:40:06 -050014166 mq->assoc_qid = cq->queue_id;
James Smart04c68492009-05-22 14:52:52 -040014167 mq->subtype = subtype;
14168 mq->host_index = 0;
14169 mq->hba_index = 0;
14170
14171 /* link the mq onto the parent cq child list */
14172 list_add_tail(&mq->list, &cq->child_list);
14173out:
James Smart8fa38512009-07-19 10:01:03 -040014174 mempool_free(mbox, phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040014175 return status;
14176}
14177
14178/**
James Smart4f774512009-05-22 14:52:35 -040014179 * lpfc_wq_create - Create a Work Queue on the HBA
14180 * @phba: HBA structure that indicates port to create a queue on.
14181 * @wq: The queue structure to use to create the work queue.
14182 * @cq: The completion queue to bind this work queue to.
14183 * @subtype: The subtype of the work queue indicating its functionality.
14184 *
14185 * This function creates a work queue, as detailed in @wq, on a port, described
14186 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14187 *
14188 * The @phba struct is used to send mailbox command to HBA. The @wq struct
14189 * is used to get the entry count and entry size that are necessary to
14190 * determine the number of pages to allocate and use for this queue. The @cq
14191 * is used to indicate which completion queue to bind this work queue to. This
14192 * function will send the WQ_CREATE mailbox command to the HBA to setup the
14193 * work queue. This function is asynchronous and will wait for the mailbox
14194 * command to finish before continuing.
14195 *
14196 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014197 * memory this function will return -ENOMEM. If the queue create mailbox command
14198 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014199 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014200int
James Smart4f774512009-05-22 14:52:35 -040014201lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14202 struct lpfc_queue *cq, uint32_t subtype)
14203{
14204 struct lpfc_mbx_wq_create *wq_create;
14205 struct lpfc_dmabuf *dmabuf;
14206 LPFC_MBOXQ_t *mbox;
14207 int rc, length, status = 0;
14208 uint32_t shdr_status, shdr_add_status;
14209 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014210 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart5a6f1332011-03-11 16:05:35 -050014211 struct dma_address *page;
James Smart962bc512013-01-03 15:44:00 -050014212 void __iomem *bar_memmap_p;
14213 uint32_t db_offset;
14214 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040014215
James Smart2e90f4b2011-12-13 13:22:37 -050014216 /* sanity check on queue memory */
14217 if (!wq || !cq)
14218 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014219 if (!phba->sli4_hba.pc_sli4_params.supported)
14220 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040014221
14222 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14223 if (!mbox)
14224 return -ENOMEM;
14225 length = (sizeof(struct lpfc_mbx_wq_create) -
14226 sizeof(struct lpfc_sli4_cfg_mhdr));
14227 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14228 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14229 length, LPFC_SLI4_MBX_EMBED);
14230 wq_create = &mbox->u.mqe.un.wq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014231 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040014232 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14233 wq->page_count);
14234 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14235 cq->queue_id);
James Smart0c651872013-07-15 18:33:23 -040014236
14237 /* wqv is the earliest version supported, NOT the latest */
James Smart5a6f1332011-03-11 16:05:35 -050014238 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14239 phba->sli4_hba.pc_sli4_params.wqv);
James Smart962bc512013-01-03 15:44:00 -050014240
James Smart0c651872013-07-15 18:33:23 -040014241 switch (phba->sli4_hba.pc_sli4_params.wqv) {
14242 case LPFC_Q_CREATE_VERSION_0:
14243 switch (wq->entry_size) {
14244 default:
14245 case 64:
14246 /* Nothing to do, version 0 ONLY supports 64 byte */
14247 page = wq_create->u.request.page;
14248 break;
14249 case 128:
14250 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14251 LPFC_WQ_SZ128_SUPPORT)) {
14252 status = -ERANGE;
14253 goto out;
14254 }
14255 /* If we get here the HBA MUST also support V1 and
14256 * we MUST use it
14257 */
14258 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14259 LPFC_Q_CREATE_VERSION_1);
14260
14261 bf_set(lpfc_mbx_wq_create_wqe_count,
14262 &wq_create->u.request_1, wq->entry_count);
14263 bf_set(lpfc_mbx_wq_create_wqe_size,
14264 &wq_create->u.request_1,
14265 LPFC_WQ_WQE_SIZE_128);
14266 bf_set(lpfc_mbx_wq_create_page_size,
14267 &wq_create->u.request_1,
James Smart8ea73db2017-02-12 13:52:25 -080014268 LPFC_WQ_PAGE_SIZE_4096);
James Smart0c651872013-07-15 18:33:23 -040014269 page = wq_create->u.request_1.page;
14270 break;
14271 }
14272 break;
14273 case LPFC_Q_CREATE_VERSION_1:
James Smart5a6f1332011-03-11 16:05:35 -050014274 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14275 wq->entry_count);
14276 switch (wq->entry_size) {
14277 default:
14278 case 64:
14279 bf_set(lpfc_mbx_wq_create_wqe_size,
14280 &wq_create->u.request_1,
14281 LPFC_WQ_WQE_SIZE_64);
14282 break;
14283 case 128:
James Smart0c651872013-07-15 18:33:23 -040014284 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14285 LPFC_WQ_SZ128_SUPPORT)) {
14286 status = -ERANGE;
14287 goto out;
14288 }
James Smart5a6f1332011-03-11 16:05:35 -050014289 bf_set(lpfc_mbx_wq_create_wqe_size,
14290 &wq_create->u.request_1,
14291 LPFC_WQ_WQE_SIZE_128);
14292 break;
14293 }
James Smart8ea73db2017-02-12 13:52:25 -080014294 bf_set(lpfc_mbx_wq_create_page_size,
14295 &wq_create->u.request_1,
14296 LPFC_WQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050014297 page = wq_create->u.request_1.page;
James Smart0c651872013-07-15 18:33:23 -040014298 break;
14299 default:
14300 status = -ERANGE;
14301 goto out;
James Smart5a6f1332011-03-11 16:05:35 -050014302 }
James Smart0c651872013-07-15 18:33:23 -040014303
James Smart4f774512009-05-22 14:52:35 -040014304 list_for_each_entry(dmabuf, &wq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014305 memset(dmabuf->virt, 0, hw_page_size);
James Smart5a6f1332011-03-11 16:05:35 -050014306 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14307 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
James Smart4f774512009-05-22 14:52:35 -040014308 }
James Smart962bc512013-01-03 15:44:00 -050014309
14310 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14311 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14312
James Smart4f774512009-05-22 14:52:35 -040014313 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14314 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040014315 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14316 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14317 if (shdr_status || shdr_add_status || rc) {
14318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14319 "2503 WQ_CREATE mailbox failed with "
14320 "status x%x add_status x%x, mbx status x%x\n",
14321 shdr_status, shdr_add_status, rc);
14322 status = -ENXIO;
14323 goto out;
14324 }
14325 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14326 if (wq->queue_id == 0xFFFF) {
14327 status = -ENXIO;
14328 goto out;
14329 }
James Smart962bc512013-01-03 15:44:00 -050014330 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14331 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14332 &wq_create->u.response);
14333 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14334 (wq->db_format != LPFC_DB_RING_FORMAT)) {
14335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14336 "3265 WQ[%d] doorbell format not "
14337 "supported: x%x\n", wq->queue_id,
14338 wq->db_format);
14339 status = -EINVAL;
14340 goto out;
14341 }
14342 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14343 &wq_create->u.response);
14344 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14345 if (!bar_memmap_p) {
14346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14347 "3263 WQ[%d] failed to memmap pci "
14348 "barset:x%x\n", wq->queue_id,
14349 pci_barset);
14350 status = -ENOMEM;
14351 goto out;
14352 }
14353 db_offset = wq_create->u.response.doorbell_offset;
14354 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14355 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14357 "3252 WQ[%d] doorbell offset not "
14358 "supported: x%x\n", wq->queue_id,
14359 db_offset);
14360 status = -EINVAL;
14361 goto out;
14362 }
14363 wq->db_regaddr = bar_memmap_p + db_offset;
14364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040014365 "3264 WQ[%d]: barset:x%x, offset:x%x, "
14366 "format:x%x\n", wq->queue_id, pci_barset,
14367 db_offset, wq->db_format);
James Smart962bc512013-01-03 15:44:00 -050014368 } else {
14369 wq->db_format = LPFC_DB_LIST_FORMAT;
14370 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
14371 }
James Smart895427b2017-02-12 13:52:30 -080014372 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
14373 if (wq->pring == NULL) {
14374 status = -ENOMEM;
14375 goto out;
14376 }
James Smart4f774512009-05-22 14:52:35 -040014377 wq->type = LPFC_WQ;
James Smart2a622bf2011-02-16 12:40:06 -050014378 wq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040014379 wq->subtype = subtype;
14380 wq->host_index = 0;
14381 wq->hba_index = 0;
James Smartff78d8f2011-12-13 13:21:35 -050014382 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
James Smart4f774512009-05-22 14:52:35 -040014383
14384 /* link the wq onto the parent cq child list */
14385 list_add_tail(&wq->list, &cq->child_list);
14386out:
James Smart8fa38512009-07-19 10:01:03 -040014387 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014388 return status;
14389}
14390
14391/**
James Smart73d91e52011-10-10 21:32:10 -040014392 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
14393 * @phba: HBA structure that indicates port to create a queue on.
14394 * @rq: The queue structure to use for the receive queue.
14395 * @qno: The associated HBQ number
14396 *
14397 *
14398 * For SLI4 we need to adjust the RQ repost value based on
14399 * the number of buffers that are initially posted to the RQ.
14400 */
14401void
14402lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
14403{
14404 uint32_t cnt;
14405
James Smart2e90f4b2011-12-13 13:22:37 -050014406 /* sanity check on queue memory */
14407 if (!rq)
14408 return;
James Smart73d91e52011-10-10 21:32:10 -040014409 cnt = lpfc_hbq_defs[qno]->entry_count;
14410
14411 /* Recalc repost for RQs based on buffers initially posted */
14412 cnt = (cnt >> 3);
14413 if (cnt < LPFC_QUEUE_MIN_REPOST)
14414 cnt = LPFC_QUEUE_MIN_REPOST;
14415
14416 rq->entry_repost = cnt;
14417}
14418
14419/**
James Smart4f774512009-05-22 14:52:35 -040014420 * lpfc_rq_create - Create a Receive Queue on the HBA
14421 * @phba: HBA structure that indicates port to create a queue on.
14422 * @hrq: The queue structure to use to create the header receive queue.
14423 * @drq: The queue structure to use to create the data receive queue.
14424 * @cq: The completion queue to bind this work queue to.
14425 *
14426 * This function creates a receive buffer queue pair , as detailed in @hrq and
14427 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
14428 * to the HBA.
14429 *
14430 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
14431 * struct is used to get the entry count that is necessary to determine the
14432 * number of pages to use for this queue. The @cq is used to indicate which
14433 * completion queue to bind received buffers that are posted to these queues to.
14434 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
14435 * receive queue pair. This function is asynchronous and will wait for the
14436 * mailbox command to finish before continuing.
14437 *
14438 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040014439 * memory this function will return -ENOMEM. If the queue create mailbox command
14440 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014441 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014442int
James Smart4f774512009-05-22 14:52:35 -040014443lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14444 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
14445{
14446 struct lpfc_mbx_rq_create *rq_create;
14447 struct lpfc_dmabuf *dmabuf;
14448 LPFC_MBOXQ_t *mbox;
14449 int rc, length, status = 0;
14450 uint32_t shdr_status, shdr_add_status;
14451 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040014452 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart962bc512013-01-03 15:44:00 -050014453 void __iomem *bar_memmap_p;
14454 uint32_t db_offset;
14455 uint16_t pci_barset;
James Smart49198b32010-04-06 15:04:33 -040014456
James Smart2e90f4b2011-12-13 13:22:37 -050014457 /* sanity check on queue memory */
14458 if (!hrq || !drq || !cq)
14459 return -ENODEV;
James Smart49198b32010-04-06 15:04:33 -040014460 if (!phba->sli4_hba.pc_sli4_params.supported)
14461 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040014462
14463 if (hrq->entry_count != drq->entry_count)
14464 return -EINVAL;
14465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14466 if (!mbox)
14467 return -ENOMEM;
14468 length = (sizeof(struct lpfc_mbx_rq_create) -
14469 sizeof(struct lpfc_sli4_cfg_mhdr));
14470 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14471 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14472 length, LPFC_SLI4_MBX_EMBED);
14473 rq_create = &mbox->u.mqe.un.rq_create;
James Smart5a6f1332011-03-11 16:05:35 -050014474 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14475 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14476 phba->sli4_hba.pc_sli4_params.rqv);
14477 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14478 bf_set(lpfc_rq_context_rqe_count_1,
14479 &rq_create->u.request.context,
14480 hrq->entry_count);
14481 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040014482 bf_set(lpfc_rq_context_rqe_size,
14483 &rq_create->u.request.context,
14484 LPFC_RQE_SIZE_8);
14485 bf_set(lpfc_rq_context_page_size,
14486 &rq_create->u.request.context,
James Smart8ea73db2017-02-12 13:52:25 -080014487 LPFC_RQ_PAGE_SIZE_4096);
James Smart5a6f1332011-03-11 16:05:35 -050014488 } else {
14489 switch (hrq->entry_count) {
14490 default:
14491 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14492 "2535 Unsupported RQ count. (%d)\n",
14493 hrq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040014494 if (hrq->entry_count < 512) {
14495 status = -EINVAL;
14496 goto out;
14497 }
James Smart5a6f1332011-03-11 16:05:35 -050014498 /* otherwise default to smallest count (drop through) */
14499 case 512:
14500 bf_set(lpfc_rq_context_rqe_count,
14501 &rq_create->u.request.context,
14502 LPFC_RQ_RING_SIZE_512);
14503 break;
14504 case 1024:
14505 bf_set(lpfc_rq_context_rqe_count,
14506 &rq_create->u.request.context,
14507 LPFC_RQ_RING_SIZE_1024);
14508 break;
14509 case 2048:
14510 bf_set(lpfc_rq_context_rqe_count,
14511 &rq_create->u.request.context,
14512 LPFC_RQ_RING_SIZE_2048);
14513 break;
14514 case 4096:
14515 bf_set(lpfc_rq_context_rqe_count,
14516 &rq_create->u.request.context,
14517 LPFC_RQ_RING_SIZE_4096);
14518 break;
14519 }
14520 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14521 LPFC_HDR_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040014522 }
14523 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14524 cq->queue_id);
14525 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14526 hrq->page_count);
James Smart4f774512009-05-22 14:52:35 -040014527 list_for_each_entry(dmabuf, &hrq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040014528 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040014529 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14530 putPaddrLow(dmabuf->phys);
14531 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14532 putPaddrHigh(dmabuf->phys);
14533 }
James Smart962bc512013-01-03 15:44:00 -050014534 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14535 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14536
James Smart4f774512009-05-22 14:52:35 -040014537 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14538 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040014539 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14540 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14541 if (shdr_status || shdr_add_status || rc) {
14542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14543 "2504 RQ_CREATE mailbox failed with "
14544 "status x%x add_status x%x, mbx status x%x\n",
14545 shdr_status, shdr_add_status, rc);
14546 status = -ENXIO;
14547 goto out;
14548 }
14549 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14550 if (hrq->queue_id == 0xFFFF) {
14551 status = -ENXIO;
14552 goto out;
14553 }
James Smart962bc512013-01-03 15:44:00 -050014554
14555 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14556 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
14557 &rq_create->u.response);
14558 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
14559 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
14560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14561 "3262 RQ [%d] doorbell format not "
14562 "supported: x%x\n", hrq->queue_id,
14563 hrq->db_format);
14564 status = -EINVAL;
14565 goto out;
14566 }
14567
14568 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
14569 &rq_create->u.response);
14570 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14571 if (!bar_memmap_p) {
14572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14573 "3269 RQ[%d] failed to memmap pci "
14574 "barset:x%x\n", hrq->queue_id,
14575 pci_barset);
14576 status = -ENOMEM;
14577 goto out;
14578 }
14579
14580 db_offset = rq_create->u.response.doorbell_offset;
14581 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
14582 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
14583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14584 "3270 RQ[%d] doorbell offset not "
14585 "supported: x%x\n", hrq->queue_id,
14586 db_offset);
14587 status = -EINVAL;
14588 goto out;
14589 }
14590 hrq->db_regaddr = bar_memmap_p + db_offset;
14591 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarta22e7db2013-04-17 20:16:37 -040014592 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
14593 "format:x%x\n", hrq->queue_id, pci_barset,
14594 db_offset, hrq->db_format);
James Smart962bc512013-01-03 15:44:00 -050014595 } else {
14596 hrq->db_format = LPFC_DB_RING_FORMAT;
14597 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
14598 }
James Smart4f774512009-05-22 14:52:35 -040014599 hrq->type = LPFC_HRQ;
James Smart2a622bf2011-02-16 12:40:06 -050014600 hrq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040014601 hrq->subtype = subtype;
14602 hrq->host_index = 0;
14603 hrq->hba_index = 0;
14604
14605 /* now create the data queue */
14606 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14607 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14608 length, LPFC_SLI4_MBX_EMBED);
James Smart5a6f1332011-03-11 16:05:35 -050014609 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14610 phba->sli4_hba.pc_sli4_params.rqv);
14611 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14612 bf_set(lpfc_rq_context_rqe_count_1,
James Smartc31098c2011-04-16 11:03:33 -040014613 &rq_create->u.request.context, hrq->entry_count);
James Smart5a6f1332011-03-11 16:05:35 -050014614 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040014615 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
14616 LPFC_RQE_SIZE_8);
14617 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
14618 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050014619 } else {
14620 switch (drq->entry_count) {
14621 default:
14622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14623 "2536 Unsupported RQ count. (%d)\n",
14624 drq->entry_count);
James Smart4f4c1862012-06-12 13:54:02 -040014625 if (drq->entry_count < 512) {
14626 status = -EINVAL;
14627 goto out;
14628 }
James Smart5a6f1332011-03-11 16:05:35 -050014629 /* otherwise default to smallest count (drop through) */
14630 case 512:
14631 bf_set(lpfc_rq_context_rqe_count,
14632 &rq_create->u.request.context,
14633 LPFC_RQ_RING_SIZE_512);
14634 break;
14635 case 1024:
14636 bf_set(lpfc_rq_context_rqe_count,
14637 &rq_create->u.request.context,
14638 LPFC_RQ_RING_SIZE_1024);
14639 break;
14640 case 2048:
14641 bf_set(lpfc_rq_context_rqe_count,
14642 &rq_create->u.request.context,
14643 LPFC_RQ_RING_SIZE_2048);
14644 break;
14645 case 4096:
14646 bf_set(lpfc_rq_context_rqe_count,
14647 &rq_create->u.request.context,
14648 LPFC_RQ_RING_SIZE_4096);
14649 break;
14650 }
14651 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14652 LPFC_DATA_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040014653 }
14654 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14655 cq->queue_id);
14656 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14657 drq->page_count);
James Smart4f774512009-05-22 14:52:35 -040014658 list_for_each_entry(dmabuf, &drq->page_list, list) {
14659 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14660 putPaddrLow(dmabuf->phys);
14661 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14662 putPaddrHigh(dmabuf->phys);
14663 }
James Smart962bc512013-01-03 15:44:00 -050014664 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14665 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
James Smart4f774512009-05-22 14:52:35 -040014666 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14667 /* The IOCTL status is embedded in the mailbox subheader. */
14668 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14669 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14670 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14671 if (shdr_status || shdr_add_status || rc) {
14672 status = -ENXIO;
14673 goto out;
14674 }
14675 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14676 if (drq->queue_id == 0xFFFF) {
14677 status = -ENXIO;
14678 goto out;
14679 }
14680 drq->type = LPFC_DRQ;
James Smart2a622bf2011-02-16 12:40:06 -050014681 drq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040014682 drq->subtype = subtype;
14683 drq->host_index = 0;
14684 drq->hba_index = 0;
14685
14686 /* link the header and data RQs onto the parent cq child list */
14687 list_add_tail(&hrq->list, &cq->child_list);
14688 list_add_tail(&drq->list, &cq->child_list);
14689
14690out:
James Smart8fa38512009-07-19 10:01:03 -040014691 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014692 return status;
14693}
14694
14695/**
14696 * lpfc_eq_destroy - Destroy an event Queue on the HBA
14697 * @eq: The queue structure associated with the queue to destroy.
14698 *
14699 * This function destroys a queue, as detailed in @eq by sending an mailbox
14700 * command, specific to the type of queue, to the HBA.
14701 *
14702 * The @eq struct is used to get the queue ID of the queue to destroy.
14703 *
14704 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040014705 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014706 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014707int
James Smart4f774512009-05-22 14:52:35 -040014708lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
14709{
14710 LPFC_MBOXQ_t *mbox;
14711 int rc, length, status = 0;
14712 uint32_t shdr_status, shdr_add_status;
14713 union lpfc_sli4_cfg_shdr *shdr;
14714
James Smart2e90f4b2011-12-13 13:22:37 -050014715 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040014716 if (!eq)
14717 return -ENODEV;
14718 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
14719 if (!mbox)
14720 return -ENOMEM;
14721 length = (sizeof(struct lpfc_mbx_eq_destroy) -
14722 sizeof(struct lpfc_sli4_cfg_mhdr));
14723 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14724 LPFC_MBOX_OPCODE_EQ_DESTROY,
14725 length, LPFC_SLI4_MBX_EMBED);
14726 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
14727 eq->queue_id);
14728 mbox->vport = eq->phba->pport;
14729 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14730
14731 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
14732 /* The IOCTL status is embedded in the mailbox subheader. */
14733 shdr = (union lpfc_sli4_cfg_shdr *)
14734 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
14735 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14736 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14737 if (shdr_status || shdr_add_status || rc) {
14738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14739 "2505 EQ_DESTROY mailbox failed with "
14740 "status x%x add_status x%x, mbx status x%x\n",
14741 shdr_status, shdr_add_status, rc);
14742 status = -ENXIO;
14743 }
14744
14745 /* Remove eq from any list */
14746 list_del_init(&eq->list);
James Smart8fa38512009-07-19 10:01:03 -040014747 mempool_free(mbox, eq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014748 return status;
14749}
14750
14751/**
14752 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
14753 * @cq: The queue structure associated with the queue to destroy.
14754 *
14755 * This function destroys a queue, as detailed in @cq by sending an mailbox
14756 * command, specific to the type of queue, to the HBA.
14757 *
14758 * The @cq struct is used to get the queue ID of the queue to destroy.
14759 *
14760 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040014761 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014762 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014763int
James Smart4f774512009-05-22 14:52:35 -040014764lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14765{
14766 LPFC_MBOXQ_t *mbox;
14767 int rc, length, status = 0;
14768 uint32_t shdr_status, shdr_add_status;
14769 union lpfc_sli4_cfg_shdr *shdr;
14770
James Smart2e90f4b2011-12-13 13:22:37 -050014771 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040014772 if (!cq)
14773 return -ENODEV;
14774 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14775 if (!mbox)
14776 return -ENOMEM;
14777 length = (sizeof(struct lpfc_mbx_cq_destroy) -
14778 sizeof(struct lpfc_sli4_cfg_mhdr));
14779 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14780 LPFC_MBOX_OPCODE_CQ_DESTROY,
14781 length, LPFC_SLI4_MBX_EMBED);
14782 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14783 cq->queue_id);
14784 mbox->vport = cq->phba->pport;
14785 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14786 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14787 /* The IOCTL status is embedded in the mailbox subheader. */
14788 shdr = (union lpfc_sli4_cfg_shdr *)
14789 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
14790 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14791 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14792 if (shdr_status || shdr_add_status || rc) {
14793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14794 "2506 CQ_DESTROY mailbox failed with "
14795 "status x%x add_status x%x, mbx status x%x\n",
14796 shdr_status, shdr_add_status, rc);
14797 status = -ENXIO;
14798 }
14799 /* Remove cq from any list */
14800 list_del_init(&cq->list);
James Smart8fa38512009-07-19 10:01:03 -040014801 mempool_free(mbox, cq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014802 return status;
14803}
14804
14805/**
James Smart04c68492009-05-22 14:52:52 -040014806 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14807 * @qm: The queue structure associated with the queue to destroy.
14808 *
14809 * This function destroys a queue, as detailed in @mq by sending an mailbox
14810 * command, specific to the type of queue, to the HBA.
14811 *
14812 * The @mq struct is used to get the queue ID of the queue to destroy.
14813 *
14814 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040014815 * command fails this function will return -ENXIO.
James Smart04c68492009-05-22 14:52:52 -040014816 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014817int
James Smart04c68492009-05-22 14:52:52 -040014818lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14819{
14820 LPFC_MBOXQ_t *mbox;
14821 int rc, length, status = 0;
14822 uint32_t shdr_status, shdr_add_status;
14823 union lpfc_sli4_cfg_shdr *shdr;
14824
James Smart2e90f4b2011-12-13 13:22:37 -050014825 /* sanity check on queue memory */
James Smart04c68492009-05-22 14:52:52 -040014826 if (!mq)
14827 return -ENODEV;
14828 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14829 if (!mbox)
14830 return -ENOMEM;
14831 length = (sizeof(struct lpfc_mbx_mq_destroy) -
14832 sizeof(struct lpfc_sli4_cfg_mhdr));
14833 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14834 LPFC_MBOX_OPCODE_MQ_DESTROY,
14835 length, LPFC_SLI4_MBX_EMBED);
14836 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14837 mq->queue_id);
14838 mbox->vport = mq->phba->pport;
14839 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14840 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14841 /* The IOCTL status is embedded in the mailbox subheader. */
14842 shdr = (union lpfc_sli4_cfg_shdr *)
14843 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14844 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14845 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14846 if (shdr_status || shdr_add_status || rc) {
14847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14848 "2507 MQ_DESTROY mailbox failed with "
14849 "status x%x add_status x%x, mbx status x%x\n",
14850 shdr_status, shdr_add_status, rc);
14851 status = -ENXIO;
14852 }
14853 /* Remove mq from any list */
14854 list_del_init(&mq->list);
James Smart8fa38512009-07-19 10:01:03 -040014855 mempool_free(mbox, mq->phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040014856 return status;
14857}
14858
14859/**
James Smart4f774512009-05-22 14:52:35 -040014860 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14861 * @wq: The queue structure associated with the queue to destroy.
14862 *
14863 * This function destroys a queue, as detailed in @wq by sending an mailbox
14864 * command, specific to the type of queue, to the HBA.
14865 *
14866 * The @wq struct is used to get the queue ID of the queue to destroy.
14867 *
14868 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040014869 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014870 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014871int
James Smart4f774512009-05-22 14:52:35 -040014872lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14873{
14874 LPFC_MBOXQ_t *mbox;
14875 int rc, length, status = 0;
14876 uint32_t shdr_status, shdr_add_status;
14877 union lpfc_sli4_cfg_shdr *shdr;
14878
James Smart2e90f4b2011-12-13 13:22:37 -050014879 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040014880 if (!wq)
14881 return -ENODEV;
14882 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14883 if (!mbox)
14884 return -ENOMEM;
14885 length = (sizeof(struct lpfc_mbx_wq_destroy) -
14886 sizeof(struct lpfc_sli4_cfg_mhdr));
14887 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14888 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14889 length, LPFC_SLI4_MBX_EMBED);
14890 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14891 wq->queue_id);
14892 mbox->vport = wq->phba->pport;
14893 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14894 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14895 shdr = (union lpfc_sli4_cfg_shdr *)
14896 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14897 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14898 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14899 if (shdr_status || shdr_add_status || rc) {
14900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14901 "2508 WQ_DESTROY mailbox failed with "
14902 "status x%x add_status x%x, mbx status x%x\n",
14903 shdr_status, shdr_add_status, rc);
14904 status = -ENXIO;
14905 }
14906 /* Remove wq from any list */
14907 list_del_init(&wq->list);
James Smart8fa38512009-07-19 10:01:03 -040014908 mempool_free(mbox, wq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014909 return status;
14910}
14911
14912/**
14913 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14914 * @rq: The queue structure associated with the queue to destroy.
14915 *
14916 * This function destroys a queue, as detailed in @rq by sending an mailbox
14917 * command, specific to the type of queue, to the HBA.
14918 *
14919 * The @rq struct is used to get the queue ID of the queue to destroy.
14920 *
14921 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040014922 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040014923 **/
James Smarta2fc4aef2014-09-03 12:57:55 -040014924int
James Smart4f774512009-05-22 14:52:35 -040014925lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14926 struct lpfc_queue *drq)
14927{
14928 LPFC_MBOXQ_t *mbox;
14929 int rc, length, status = 0;
14930 uint32_t shdr_status, shdr_add_status;
14931 union lpfc_sli4_cfg_shdr *shdr;
14932
James Smart2e90f4b2011-12-13 13:22:37 -050014933 /* sanity check on queue memory */
James Smart4f774512009-05-22 14:52:35 -040014934 if (!hrq || !drq)
14935 return -ENODEV;
14936 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14937 if (!mbox)
14938 return -ENOMEM;
14939 length = (sizeof(struct lpfc_mbx_rq_destroy) -
James Smartfedd3b72011-02-16 12:39:24 -050014940 sizeof(struct lpfc_sli4_cfg_mhdr));
James Smart4f774512009-05-22 14:52:35 -040014941 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14942 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14943 length, LPFC_SLI4_MBX_EMBED);
14944 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14945 hrq->queue_id);
14946 mbox->vport = hrq->phba->pport;
14947 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14948 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14949 /* The IOCTL status is embedded in the mailbox subheader. */
14950 shdr = (union lpfc_sli4_cfg_shdr *)
14951 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14952 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14953 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14954 if (shdr_status || shdr_add_status || rc) {
14955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14956 "2509 RQ_DESTROY mailbox failed with "
14957 "status x%x add_status x%x, mbx status x%x\n",
14958 shdr_status, shdr_add_status, rc);
14959 if (rc != MBX_TIMEOUT)
14960 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14961 return -ENXIO;
14962 }
14963 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14964 drq->queue_id);
14965 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14966 shdr = (union lpfc_sli4_cfg_shdr *)
14967 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14968 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14969 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14970 if (shdr_status || shdr_add_status || rc) {
14971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14972 "2510 RQ_DESTROY mailbox failed with "
14973 "status x%x add_status x%x, mbx status x%x\n",
14974 shdr_status, shdr_add_status, rc);
14975 status = -ENXIO;
14976 }
14977 list_del_init(&hrq->list);
14978 list_del_init(&drq->list);
James Smart8fa38512009-07-19 10:01:03 -040014979 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040014980 return status;
14981}
14982
14983/**
14984 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14985 * @phba: The virtual port for which this call being executed.
14986 * @pdma_phys_addr0: Physical address of the 1st SGL page.
14987 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14988 * @xritag: the xritag that ties this io to the SGL pages.
14989 *
14990 * This routine will post the sgl pages for the IO that has the xritag
14991 * that is in the iocbq structure. The xritag is assigned during iocbq
14992 * creation and persists for as long as the driver is loaded.
14993 * if the caller has fewer than 256 scatter gather segments to map then
14994 * pdma_phys_addr1 should be 0.
14995 * If the caller needs to map more than 256 scatter gather segment then
14996 * pdma_phys_addr1 should be a valid physical address.
14997 * physical address for SGLs must be 64 byte aligned.
14998 * If you are going to map 2 SGL's then the first one must have 256 entries
14999 * the second sgl can have between 1 and 256 entries.
15000 *
15001 * Return codes:
15002 * 0 - Success
15003 * -ENXIO, -ENOMEM - Failure
15004 **/
15005int
15006lpfc_sli4_post_sgl(struct lpfc_hba *phba,
15007 dma_addr_t pdma_phys_addr0,
15008 dma_addr_t pdma_phys_addr1,
15009 uint16_t xritag)
15010{
15011 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
15012 LPFC_MBOXQ_t *mbox;
15013 int rc;
15014 uint32_t shdr_status, shdr_add_status;
James Smart6d368e52011-05-24 11:44:12 -040015015 uint32_t mbox_tmo;
James Smart4f774512009-05-22 14:52:35 -040015016 union lpfc_sli4_cfg_shdr *shdr;
15017
15018 if (xritag == NO_XRI) {
15019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15020 "0364 Invalid param:\n");
15021 return -EINVAL;
15022 }
15023
15024 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15025 if (!mbox)
15026 return -ENOMEM;
15027
15028 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15029 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
15030 sizeof(struct lpfc_mbx_post_sgl_pages) -
James Smartfedd3b72011-02-16 12:39:24 -050015031 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
James Smart4f774512009-05-22 14:52:35 -040015032
15033 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
15034 &mbox->u.mqe.un.post_sgl_pages;
15035 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
15036 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
15037
15038 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
15039 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
15040 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
15041 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
15042
15043 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
15044 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
15045 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
15046 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
15047 if (!phba->sli4_hba.intr_enable)
15048 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart6d368e52011-05-24 11:44:12 -040015049 else {
James Smarta183a152011-10-10 21:32:43 -040015050 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040015051 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15052 }
James Smart4f774512009-05-22 14:52:35 -040015053 /* The IOCTL status is embedded in the mailbox subheader. */
15054 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
15055 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15056 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15057 if (rc != MBX_TIMEOUT)
15058 mempool_free(mbox, phba->mbox_mem_pool);
15059 if (shdr_status || shdr_add_status || rc) {
15060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15061 "2511 POST_SGL mailbox failed with "
15062 "status x%x add_status x%x, mbx status x%x\n",
15063 shdr_status, shdr_add_status, rc);
James Smart4f774512009-05-22 14:52:35 -040015064 }
15065 return 0;
15066}
James Smart4f774512009-05-22 14:52:35 -040015067
15068/**
James Smart88a2cfb2011-07-22 18:36:33 -040015069 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
James Smart6d368e52011-05-24 11:44:12 -040015070 * @phba: pointer to lpfc hba data structure.
15071 *
15072 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -040015073 * HBA consistent with the SLI-4 interface spec. This routine
15074 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15075 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6d368e52011-05-24 11:44:12 -040015076 *
James Smart88a2cfb2011-07-22 18:36:33 -040015077 * Returns
15078 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15079 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15080 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040015081static uint16_t
James Smart6d368e52011-05-24 11:44:12 -040015082lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15083{
15084 unsigned long xri;
15085
15086 /*
15087 * Fetch the next logical xri. Because this index is logical,
15088 * the driver starts at 0 each time.
15089 */
15090 spin_lock_irq(&phba->hbalock);
15091 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15092 phba->sli4_hba.max_cfg_param.max_xri, 0);
15093 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15094 spin_unlock_irq(&phba->hbalock);
15095 return NO_XRI;
15096 } else {
15097 set_bit(xri, phba->sli4_hba.xri_bmask);
15098 phba->sli4_hba.max_cfg_param.xri_used++;
James Smart6d368e52011-05-24 11:44:12 -040015099 }
James Smart6d368e52011-05-24 11:44:12 -040015100 spin_unlock_irq(&phba->hbalock);
15101 return xri;
15102}
15103
15104/**
15105 * lpfc_sli4_free_xri - Release an xri for reuse.
15106 * @phba: pointer to lpfc hba data structure.
15107 *
15108 * This routine is invoked to release an xri to the pool of
15109 * available rpis maintained by the driver.
15110 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040015111static void
James Smart6d368e52011-05-24 11:44:12 -040015112__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15113{
15114 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
James Smart6d368e52011-05-24 11:44:12 -040015115 phba->sli4_hba.max_cfg_param.xri_used--;
15116 }
15117}
15118
15119/**
15120 * lpfc_sli4_free_xri - Release an xri for reuse.
15121 * @phba: pointer to lpfc hba data structure.
15122 *
15123 * This routine is invoked to release an xri to the pool of
15124 * available rpis maintained by the driver.
15125 **/
15126void
15127lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15128{
15129 spin_lock_irq(&phba->hbalock);
15130 __lpfc_sli4_free_xri(phba, xri);
15131 spin_unlock_irq(&phba->hbalock);
15132}
15133
15134/**
James Smart4f774512009-05-22 14:52:35 -040015135 * lpfc_sli4_next_xritag - Get an xritag for the io
15136 * @phba: Pointer to HBA context object.
15137 *
15138 * This function gets an xritag for the iocb. If there is no unused xritag
15139 * it will return 0xffff.
15140 * The function returns the allocated xritag if successful, else returns zero.
15141 * Zero is not a valid xritag.
15142 * The caller is not required to hold any lock.
15143 **/
15144uint16_t
15145lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15146{
James Smart6d368e52011-05-24 11:44:12 -040015147 uint16_t xri_index;
James Smart4f774512009-05-22 14:52:35 -040015148
James Smart6d368e52011-05-24 11:44:12 -040015149 xri_index = lpfc_sli4_alloc_xri(phba);
James Smart81378052012-05-09 21:17:37 -040015150 if (xri_index == NO_XRI)
15151 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15152 "2004 Failed to allocate XRI.last XRITAG is %d"
15153 " Max XRI is %d, Used XRI is %d\n",
15154 xri_index,
15155 phba->sli4_hba.max_cfg_param.max_xri,
15156 phba->sli4_hba.max_cfg_param.xri_used);
15157 return xri_index;
James Smart4f774512009-05-22 14:52:35 -040015158}
15159
15160/**
James Smart895427b2017-02-12 13:52:30 -080015161 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
James Smart4f774512009-05-22 14:52:35 -040015162 * @phba: pointer to lpfc hba data structure.
James Smart8a9d2e82012-05-09 21:16:12 -040015163 * @post_sgl_list: pointer to els sgl entry list.
15164 * @count: number of els sgl entries on the list.
James Smart4f774512009-05-22 14:52:35 -040015165 *
15166 * This routine is invoked to post a block of driver's sgl pages to the
15167 * HBA using non-embedded mailbox command. No Lock is held. This routine
15168 * is only called when the driver is loading and after all IO has been
15169 * stopped.
15170 **/
James Smart8a9d2e82012-05-09 21:16:12 -040015171static int
James Smart895427b2017-02-12 13:52:30 -080015172lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
James Smart8a9d2e82012-05-09 21:16:12 -040015173 struct list_head *post_sgl_list,
15174 int post_cnt)
James Smart4f774512009-05-22 14:52:35 -040015175{
James Smart8a9d2e82012-05-09 21:16:12 -040015176 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
James Smart4f774512009-05-22 14:52:35 -040015177 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15178 struct sgl_page_pairs *sgl_pg_pairs;
15179 void *viraddr;
15180 LPFC_MBOXQ_t *mbox;
15181 uint32_t reqlen, alloclen, pg_pairs;
15182 uint32_t mbox_tmo;
James Smart8a9d2e82012-05-09 21:16:12 -040015183 uint16_t xritag_start = 0;
15184 int rc = 0;
James Smart4f774512009-05-22 14:52:35 -040015185 uint32_t shdr_status, shdr_add_status;
15186 union lpfc_sli4_cfg_shdr *shdr;
15187
James Smart895427b2017-02-12 13:52:30 -080015188 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040015189 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040015190 if (reqlen > SLI4_PAGE_SIZE) {
James Smart895427b2017-02-12 13:52:30 -080015191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart4f774512009-05-22 14:52:35 -040015192 "2559 Block sgl registration required DMA "
15193 "size (%d) great than a page\n", reqlen);
15194 return -ENOMEM;
15195 }
James Smart895427b2017-02-12 13:52:30 -080015196
James Smart4f774512009-05-22 14:52:35 -040015197 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -040015198 if (!mbox)
James Smart4f774512009-05-22 14:52:35 -040015199 return -ENOMEM;
James Smart4f774512009-05-22 14:52:35 -040015200
15201 /* Allocate DMA memory and set up the non-embedded mailbox command */
15202 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15203 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15204 LPFC_SLI4_MBX_NEMBED);
15205
15206 if (alloclen < reqlen) {
15207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15208 "0285 Allocated DMA memory size (%d) is "
15209 "less than the requested DMA memory "
15210 "size (%d)\n", alloclen, reqlen);
15211 lpfc_sli4_mbox_cmd_free(phba, mbox);
15212 return -ENOMEM;
15213 }
James Smart4f774512009-05-22 14:52:35 -040015214 /* Set up the SGL pages in the non-embedded DMA pages */
James Smart6d368e52011-05-24 11:44:12 -040015215 viraddr = mbox->sge_array->addr[0];
James Smart4f774512009-05-22 14:52:35 -040015216 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15217 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15218
James Smart8a9d2e82012-05-09 21:16:12 -040015219 pg_pairs = 0;
15220 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
James Smart4f774512009-05-22 14:52:35 -040015221 /* Set up the sge entry */
15222 sgl_pg_pairs->sgl_pg0_addr_lo =
15223 cpu_to_le32(putPaddrLow(sglq_entry->phys));
15224 sgl_pg_pairs->sgl_pg0_addr_hi =
15225 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
15226 sgl_pg_pairs->sgl_pg1_addr_lo =
15227 cpu_to_le32(putPaddrLow(0));
15228 sgl_pg_pairs->sgl_pg1_addr_hi =
15229 cpu_to_le32(putPaddrHigh(0));
James Smart6d368e52011-05-24 11:44:12 -040015230
James Smart4f774512009-05-22 14:52:35 -040015231 /* Keep the first xritag on the list */
15232 if (pg_pairs == 0)
15233 xritag_start = sglq_entry->sli4_xritag;
15234 sgl_pg_pairs++;
James Smart8a9d2e82012-05-09 21:16:12 -040015235 pg_pairs++;
James Smart4f774512009-05-22 14:52:35 -040015236 }
James Smart6d368e52011-05-24 11:44:12 -040015237
15238 /* Complete initialization and perform endian conversion. */
James Smart4f774512009-05-22 14:52:35 -040015239 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
James Smart895427b2017-02-12 13:52:30 -080015240 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
James Smart4f774512009-05-22 14:52:35 -040015241 sgl->word0 = cpu_to_le32(sgl->word0);
James Smart895427b2017-02-12 13:52:30 -080015242
James Smart4f774512009-05-22 14:52:35 -040015243 if (!phba->sli4_hba.intr_enable)
15244 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15245 else {
James Smarta183a152011-10-10 21:32:43 -040015246 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040015247 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15248 }
15249 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15250 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15251 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15252 if (rc != MBX_TIMEOUT)
15253 lpfc_sli4_mbox_cmd_free(phba, mbox);
15254 if (shdr_status || shdr_add_status || rc) {
15255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15256 "2513 POST_SGL_BLOCK mailbox command failed "
15257 "status x%x add_status x%x mbx status x%x\n",
15258 shdr_status, shdr_add_status, rc);
15259 rc = -ENXIO;
15260 }
15261 return rc;
15262}
15263
15264/**
15265 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
15266 * @phba: pointer to lpfc hba data structure.
15267 * @sblist: pointer to scsi buffer list.
15268 * @count: number of scsi buffers on the list.
15269 *
15270 * This routine is invoked to post a block of @count scsi sgl pages from a
15271 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
15272 * No Lock is held.
15273 *
15274 **/
15275int
James Smart8a9d2e82012-05-09 21:16:12 -040015276lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
15277 struct list_head *sblist,
15278 int count)
James Smart4f774512009-05-22 14:52:35 -040015279{
15280 struct lpfc_scsi_buf *psb;
15281 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15282 struct sgl_page_pairs *sgl_pg_pairs;
15283 void *viraddr;
15284 LPFC_MBOXQ_t *mbox;
15285 uint32_t reqlen, alloclen, pg_pairs;
15286 uint32_t mbox_tmo;
15287 uint16_t xritag_start = 0;
15288 int rc = 0;
15289 uint32_t shdr_status, shdr_add_status;
15290 dma_addr_t pdma_phys_bpl1;
15291 union lpfc_sli4_cfg_shdr *shdr;
15292
15293 /* Calculate the requested length of the dma memory */
James Smart8a9d2e82012-05-09 21:16:12 -040015294 reqlen = count * sizeof(struct sgl_page_pairs) +
James Smart4f774512009-05-22 14:52:35 -040015295 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040015296 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040015297 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
15298 "0217 Block sgl registration required DMA "
15299 "size (%d) great than a page\n", reqlen);
15300 return -ENOMEM;
15301 }
15302 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15303 if (!mbox) {
15304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15305 "0283 Failed to allocate mbox cmd memory\n");
15306 return -ENOMEM;
15307 }
15308
15309 /* Allocate DMA memory and set up the non-embedded mailbox command */
15310 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15311 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15312 LPFC_SLI4_MBX_NEMBED);
15313
15314 if (alloclen < reqlen) {
15315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15316 "2561 Allocated DMA memory size (%d) is "
15317 "less than the requested DMA memory "
15318 "size (%d)\n", alloclen, reqlen);
15319 lpfc_sli4_mbox_cmd_free(phba, mbox);
15320 return -ENOMEM;
15321 }
James Smart6d368e52011-05-24 11:44:12 -040015322
James Smart4f774512009-05-22 14:52:35 -040015323 /* Get the first SGE entry from the non-embedded DMA memory */
James Smart4f774512009-05-22 14:52:35 -040015324 viraddr = mbox->sge_array->addr[0];
15325
15326 /* Set up the SGL pages in the non-embedded DMA pages */
15327 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15328 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15329
15330 pg_pairs = 0;
15331 list_for_each_entry(psb, sblist, list) {
15332 /* Set up the sge entry */
15333 sgl_pg_pairs->sgl_pg0_addr_lo =
15334 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
15335 sgl_pg_pairs->sgl_pg0_addr_hi =
15336 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
15337 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
15338 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
15339 else
15340 pdma_phys_bpl1 = 0;
15341 sgl_pg_pairs->sgl_pg1_addr_lo =
15342 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
15343 sgl_pg_pairs->sgl_pg1_addr_hi =
15344 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
15345 /* Keep the first xritag on the list */
15346 if (pg_pairs == 0)
15347 xritag_start = psb->cur_iocbq.sli4_xritag;
15348 sgl_pg_pairs++;
15349 pg_pairs++;
15350 }
15351 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
15352 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
15353 /* Perform endian conversion if necessary */
15354 sgl->word0 = cpu_to_le32(sgl->word0);
15355
15356 if (!phba->sli4_hba.intr_enable)
15357 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15358 else {
James Smarta183a152011-10-10 21:32:43 -040015359 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040015360 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15361 }
15362 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15363 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15364 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15365 if (rc != MBX_TIMEOUT)
15366 lpfc_sli4_mbox_cmd_free(phba, mbox);
15367 if (shdr_status || shdr_add_status || rc) {
15368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15369 "2564 POST_SGL_BLOCK mailbox command failed "
15370 "status x%x add_status x%x mbx status x%x\n",
15371 shdr_status, shdr_add_status, rc);
15372 rc = -ENXIO;
15373 }
15374 return rc;
15375}
15376
James Smart2ea259e2017-02-12 13:52:27 -080015377static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
15378static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
15379
James Smart4f774512009-05-22 14:52:35 -040015380/**
15381 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
15382 * @phba: pointer to lpfc_hba struct that the frame was received on
15383 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15384 *
15385 * This function checks the fields in the @fc_hdr to see if the FC frame is a
15386 * valid type of frame that the LPFC driver will handle. This function will
15387 * return a zero if the frame is a valid frame or a non zero value when the
15388 * frame does not pass the check.
15389 **/
15390static int
15391lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
15392{
Tomas Henzl474ffb72010-12-22 16:52:40 +010015393 /* make rctl_names static to save stack space */
James Smart4f774512009-05-22 14:52:35 -040015394 struct fc_vft_header *fc_vft_hdr;
James Smart546fc852011-03-11 16:06:29 -050015395 uint32_t *header = (uint32_t *) fc_hdr;
James Smart4f774512009-05-22 14:52:35 -040015396
15397 switch (fc_hdr->fh_r_ctl) {
15398 case FC_RCTL_DD_UNCAT: /* uncategorized information */
15399 case FC_RCTL_DD_SOL_DATA: /* solicited data */
15400 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
15401 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
15402 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
15403 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
15404 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
15405 case FC_RCTL_DD_CMD_STATUS: /* command status */
15406 case FC_RCTL_ELS_REQ: /* extended link services request */
15407 case FC_RCTL_ELS_REP: /* extended link services reply */
15408 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
15409 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
15410 case FC_RCTL_BA_NOP: /* basic link service NOP */
15411 case FC_RCTL_BA_ABTS: /* basic link service abort */
15412 case FC_RCTL_BA_RMC: /* remove connection */
15413 case FC_RCTL_BA_ACC: /* basic accept */
15414 case FC_RCTL_BA_RJT: /* basic reject */
15415 case FC_RCTL_BA_PRMT:
15416 case FC_RCTL_ACK_1: /* acknowledge_1 */
15417 case FC_RCTL_ACK_0: /* acknowledge_0 */
15418 case FC_RCTL_P_RJT: /* port reject */
15419 case FC_RCTL_F_RJT: /* fabric reject */
15420 case FC_RCTL_P_BSY: /* port busy */
15421 case FC_RCTL_F_BSY: /* fabric busy to data frame */
15422 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
15423 case FC_RCTL_LCR: /* link credit reset */
15424 case FC_RCTL_END: /* end */
15425 break;
15426 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
15427 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
15428 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
15429 return lpfc_fc_frame_check(phba, fc_hdr);
15430 default:
15431 goto drop;
15432 }
15433 switch (fc_hdr->fh_type) {
15434 case FC_TYPE_BLS:
15435 case FC_TYPE_ELS:
15436 case FC_TYPE_FCP:
15437 case FC_TYPE_CT:
James Smart895427b2017-02-12 13:52:30 -080015438 case FC_TYPE_NVME:
James Smart4f774512009-05-22 14:52:35 -040015439 break;
15440 case FC_TYPE_IP:
15441 case FC_TYPE_ILS:
15442 default:
15443 goto drop;
15444 }
James Smart546fc852011-03-11 16:06:29 -050015445
James Smart4f774512009-05-22 14:52:35 -040015446 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart88f43a02013-04-17 20:19:44 -040015447 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
15448 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
James Smart2ea259e2017-02-12 13:52:27 -080015449 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
15450 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
James Smart546fc852011-03-11 16:06:29 -050015451 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
15452 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
James Smart88f43a02013-04-17 20:19:44 -040015453 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
15454 be32_to_cpu(header[6]));
James Smart4f774512009-05-22 14:52:35 -040015455 return 0;
15456drop:
15457 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
15458 "2539 Dropped frame rctl:%s type:%s\n",
James Smart2ea259e2017-02-12 13:52:27 -080015459 lpfc_rctl_names[fc_hdr->fh_r_ctl],
15460 lpfc_type_names[fc_hdr->fh_type]);
James Smart4f774512009-05-22 14:52:35 -040015461 return 1;
15462}
15463
15464/**
15465 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
15466 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15467 *
15468 * This function processes the FC header to retrieve the VFI from the VF
15469 * header, if one exists. This function will return the VFI if one exists
15470 * or 0 if no VSAN Header exists.
15471 **/
15472static uint32_t
15473lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
15474{
15475 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
15476
15477 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
15478 return 0;
15479 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
15480}
15481
15482/**
15483 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
15484 * @phba: Pointer to the HBA structure to search for the vport on
15485 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15486 * @fcfi: The FC Fabric ID that the frame came from
15487 *
15488 * This function searches the @phba for a vport that matches the content of the
15489 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
15490 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
15491 * returns the matching vport pointer or NULL if unable to match frame to a
15492 * vport.
15493 **/
15494static struct lpfc_vport *
15495lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
James Smart895427b2017-02-12 13:52:30 -080015496 uint16_t fcfi, uint32_t did)
James Smart4f774512009-05-22 14:52:35 -040015497{
15498 struct lpfc_vport **vports;
15499 struct lpfc_vport *vport = NULL;
15500 int i;
James Smart939723a2012-05-09 21:19:03 -040015501
James Smartbf086112011-08-21 21:48:13 -040015502 if (did == Fabric_DID)
15503 return phba->pport;
James Smart939723a2012-05-09 21:19:03 -040015504 if ((phba->pport->fc_flag & FC_PT2PT) &&
15505 !(phba->link_state == LPFC_HBA_READY))
15506 return phba->pport;
15507
James Smart4f774512009-05-22 14:52:35 -040015508 vports = lpfc_create_vport_work_array(phba);
James Smart895427b2017-02-12 13:52:30 -080015509 if (vports != NULL) {
James Smart4f774512009-05-22 14:52:35 -040015510 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
15511 if (phba->fcf.fcfi == fcfi &&
15512 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
15513 vports[i]->fc_myDID == did) {
15514 vport = vports[i];
15515 break;
15516 }
15517 }
James Smart895427b2017-02-12 13:52:30 -080015518 }
James Smart4f774512009-05-22 14:52:35 -040015519 lpfc_destroy_vport_work_array(phba, vports);
15520 return vport;
15521}
15522
15523/**
James Smart45ed1192009-10-02 15:17:02 -040015524 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
15525 * @vport: The vport to work on.
15526 *
15527 * This function updates the receive sequence time stamp for this vport. The
15528 * receive sequence time stamp indicates the time that the last frame of the
15529 * the sequence that has been idle for the longest amount of time was received.
15530 * the driver uses this time stamp to indicate if any received sequences have
15531 * timed out.
15532 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040015533static void
James Smart45ed1192009-10-02 15:17:02 -040015534lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
15535{
15536 struct lpfc_dmabuf *h_buf;
15537 struct hbq_dmabuf *dmabuf = NULL;
15538
15539 /* get the oldest sequence on the rcv list */
15540 h_buf = list_get_first(&vport->rcv_buffer_list,
15541 struct lpfc_dmabuf, list);
15542 if (!h_buf)
15543 return;
15544 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15545 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
15546}
15547
15548/**
15549 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
15550 * @vport: The vport that the received sequences were sent to.
15551 *
15552 * This function cleans up all outstanding received sequences. This is called
15553 * by the driver when a link event or user action invalidates all the received
15554 * sequences.
15555 **/
15556void
15557lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
15558{
15559 struct lpfc_dmabuf *h_buf, *hnext;
15560 struct lpfc_dmabuf *d_buf, *dnext;
15561 struct hbq_dmabuf *dmabuf = NULL;
15562
15563 /* start with the oldest sequence on the rcv list */
15564 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15565 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15566 list_del_init(&dmabuf->hbuf.list);
15567 list_for_each_entry_safe(d_buf, dnext,
15568 &dmabuf->dbuf.list, list) {
15569 list_del_init(&d_buf->list);
15570 lpfc_in_buf_free(vport->phba, d_buf);
15571 }
15572 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15573 }
15574}
15575
15576/**
15577 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
15578 * @vport: The vport that the received sequences were sent to.
15579 *
15580 * This function determines whether any received sequences have timed out by
15581 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
15582 * indicates that there is at least one timed out sequence this routine will
15583 * go through the received sequences one at a time from most inactive to most
15584 * active to determine which ones need to be cleaned up. Once it has determined
15585 * that a sequence needs to be cleaned up it will simply free up the resources
15586 * without sending an abort.
15587 **/
15588void
15589lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
15590{
15591 struct lpfc_dmabuf *h_buf, *hnext;
15592 struct lpfc_dmabuf *d_buf, *dnext;
15593 struct hbq_dmabuf *dmabuf = NULL;
15594 unsigned long timeout;
15595 int abort_count = 0;
15596
15597 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15598 vport->rcv_buffer_time_stamp);
15599 if (list_empty(&vport->rcv_buffer_list) ||
15600 time_before(jiffies, timeout))
15601 return;
15602 /* start with the oldest sequence on the rcv list */
15603 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15604 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15605 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15606 dmabuf->time_stamp);
15607 if (time_before(jiffies, timeout))
15608 break;
15609 abort_count++;
15610 list_del_init(&dmabuf->hbuf.list);
15611 list_for_each_entry_safe(d_buf, dnext,
15612 &dmabuf->dbuf.list, list) {
15613 list_del_init(&d_buf->list);
15614 lpfc_in_buf_free(vport->phba, d_buf);
15615 }
15616 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15617 }
15618 if (abort_count)
15619 lpfc_update_rcv_time_stamp(vport);
15620}
15621
15622/**
James Smart4f774512009-05-22 14:52:35 -040015623 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
15624 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
15625 *
15626 * This function searches through the existing incomplete sequences that have
15627 * been sent to this @vport. If the frame matches one of the incomplete
15628 * sequences then the dbuf in the @dmabuf is added to the list of frames that
15629 * make up that sequence. If no sequence is found that matches this frame then
15630 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
15631 * This function returns a pointer to the first dmabuf in the sequence list that
15632 * the frame was linked to.
15633 **/
15634static struct hbq_dmabuf *
15635lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15636{
15637 struct fc_frame_header *new_hdr;
15638 struct fc_frame_header *temp_hdr;
15639 struct lpfc_dmabuf *d_buf;
15640 struct lpfc_dmabuf *h_buf;
15641 struct hbq_dmabuf *seq_dmabuf = NULL;
15642 struct hbq_dmabuf *temp_dmabuf = NULL;
James Smart4360ca92015-12-16 18:12:04 -050015643 uint8_t found = 0;
James Smart4f774512009-05-22 14:52:35 -040015644
James Smart4d9ab992009-10-02 15:16:39 -040015645 INIT_LIST_HEAD(&dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040015646 dmabuf->time_stamp = jiffies;
James Smart4f774512009-05-22 14:52:35 -040015647 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart4360ca92015-12-16 18:12:04 -050015648
James Smart4f774512009-05-22 14:52:35 -040015649 /* Use the hdr_buf to find the sequence that this frame belongs to */
15650 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15651 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15652 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15653 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15654 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15655 continue;
15656 /* found a pending sequence that matches this frame */
15657 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15658 break;
15659 }
15660 if (!seq_dmabuf) {
15661 /*
15662 * This indicates first frame received for this sequence.
15663 * Queue the buffer on the vport's rcv_buffer_list.
15664 */
15665 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
James Smart45ed1192009-10-02 15:17:02 -040015666 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040015667 return dmabuf;
15668 }
15669 temp_hdr = seq_dmabuf->hbuf.virt;
James Smarteeead812009-12-21 17:01:23 -050015670 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
15671 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4d9ab992009-10-02 15:16:39 -040015672 list_del_init(&seq_dmabuf->hbuf.list);
15673 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15674 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040015675 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040015676 return dmabuf;
15677 }
James Smart45ed1192009-10-02 15:17:02 -040015678 /* move this sequence to the tail to indicate a young sequence */
15679 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
15680 seq_dmabuf->time_stamp = jiffies;
15681 lpfc_update_rcv_time_stamp(vport);
James Smarteeead812009-12-21 17:01:23 -050015682 if (list_empty(&seq_dmabuf->dbuf.list)) {
15683 temp_hdr = dmabuf->hbuf.virt;
15684 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15685 return seq_dmabuf;
15686 }
James Smart4f774512009-05-22 14:52:35 -040015687 /* find the correct place in the sequence to insert this frame */
James Smart4360ca92015-12-16 18:12:04 -050015688 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
15689 while (!found) {
James Smart4f774512009-05-22 14:52:35 -040015690 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15691 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
15692 /*
15693 * If the frame's sequence count is greater than the frame on
15694 * the list then insert the frame right after this frame
15695 */
James Smarteeead812009-12-21 17:01:23 -050015696 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
15697 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4f774512009-05-22 14:52:35 -040015698 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
James Smart4360ca92015-12-16 18:12:04 -050015699 found = 1;
15700 break;
James Smart4f774512009-05-22 14:52:35 -040015701 }
James Smart4360ca92015-12-16 18:12:04 -050015702
15703 if (&d_buf->list == &seq_dmabuf->dbuf.list)
15704 break;
15705 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
James Smart4f774512009-05-22 14:52:35 -040015706 }
James Smart4360ca92015-12-16 18:12:04 -050015707
15708 if (found)
15709 return seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040015710 return NULL;
15711}
15712
15713/**
James Smart6669f9b2009-10-02 15:16:45 -040015714 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
15715 * @vport: pointer to a vitural port
15716 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15717 *
15718 * This function tries to abort from the partially assembed sequence, described
15719 * by the information from basic abbort @dmabuf. It checks to see whether such
15720 * partially assembled sequence held by the driver. If so, it shall free up all
15721 * the frames from the partially assembled sequence.
15722 *
15723 * Return
15724 * true -- if there is matching partially assembled sequence present and all
15725 * the frames freed with the sequence;
15726 * false -- if there is no matching partially assembled sequence present so
15727 * nothing got aborted in the lower layer driver
15728 **/
15729static bool
15730lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
15731 struct hbq_dmabuf *dmabuf)
15732{
15733 struct fc_frame_header *new_hdr;
15734 struct fc_frame_header *temp_hdr;
15735 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
15736 struct hbq_dmabuf *seq_dmabuf = NULL;
15737
15738 /* Use the hdr_buf to find the sequence that matches this frame */
15739 INIT_LIST_HEAD(&dmabuf->dbuf.list);
15740 INIT_LIST_HEAD(&dmabuf->hbuf.list);
15741 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15742 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15743 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15744 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15745 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15746 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15747 continue;
15748 /* found a pending sequence that matches this frame */
15749 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15750 break;
15751 }
15752
15753 /* Free up all the frames from the partially assembled sequence */
15754 if (seq_dmabuf) {
15755 list_for_each_entry_safe(d_buf, n_buf,
15756 &seq_dmabuf->dbuf.list, list) {
15757 list_del_init(&d_buf->list);
15758 lpfc_in_buf_free(vport->phba, d_buf);
15759 }
15760 return true;
15761 }
15762 return false;
15763}
15764
15765/**
James Smart6dd9e312013-01-03 15:43:37 -050015766 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
15767 * @vport: pointer to a vitural port
15768 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15769 *
15770 * This function tries to abort from the assembed sequence from upper level
15771 * protocol, described by the information from basic abbort @dmabuf. It
15772 * checks to see whether such pending context exists at upper level protocol.
15773 * If so, it shall clean up the pending context.
15774 *
15775 * Return
15776 * true -- if there is matching pending context of the sequence cleaned
15777 * at ulp;
15778 * false -- if there is no matching pending context of the sequence present
15779 * at ulp.
15780 **/
15781static bool
15782lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15783{
15784 struct lpfc_hba *phba = vport->phba;
15785 int handled;
15786
15787 /* Accepting abort at ulp with SLI4 only */
15788 if (phba->sli_rev < LPFC_SLI_REV4)
15789 return false;
15790
15791 /* Register all caring upper level protocols to attend abort */
15792 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15793 if (handled)
15794 return true;
15795
15796 return false;
15797}
15798
15799/**
James Smart546fc852011-03-11 16:06:29 -050015800 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
James Smart6669f9b2009-10-02 15:16:45 -040015801 * @phba: Pointer to HBA context object.
15802 * @cmd_iocbq: pointer to the command iocbq structure.
15803 * @rsp_iocbq: pointer to the response iocbq structure.
15804 *
James Smart546fc852011-03-11 16:06:29 -050015805 * This function handles the sequence abort response iocb command complete
James Smart6669f9b2009-10-02 15:16:45 -040015806 * event. It properly releases the memory allocated to the sequence abort
15807 * accept iocb.
15808 **/
15809static void
James Smart546fc852011-03-11 16:06:29 -050015810lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040015811 struct lpfc_iocbq *cmd_iocbq,
15812 struct lpfc_iocbq *rsp_iocbq)
15813{
James Smart6dd9e312013-01-03 15:43:37 -050015814 struct lpfc_nodelist *ndlp;
15815
15816 if (cmd_iocbq) {
15817 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15818 lpfc_nlp_put(ndlp);
15819 lpfc_nlp_not_used(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040015820 lpfc_sli_release_iocbq(phba, cmd_iocbq);
James Smart6dd9e312013-01-03 15:43:37 -050015821 }
James Smart6b5151f2012-01-18 16:24:06 -050015822
15823 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
15824 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15825 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15826 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15827 rsp_iocbq->iocb.ulpStatus,
15828 rsp_iocbq->iocb.un.ulpWord[4]);
James Smart6669f9b2009-10-02 15:16:45 -040015829}
15830
15831/**
James Smart6d368e52011-05-24 11:44:12 -040015832 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15833 * @phba: Pointer to HBA context object.
15834 * @xri: xri id in transaction.
15835 *
15836 * This function validates the xri maps to the known range of XRIs allocated an
15837 * used by the driver.
15838 **/
James Smart7851fe22011-07-22 18:36:52 -040015839uint16_t
James Smart6d368e52011-05-24 11:44:12 -040015840lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15841 uint16_t xri)
15842{
James Smarta2fc4aef2014-09-03 12:57:55 -040015843 uint16_t i;
James Smart6d368e52011-05-24 11:44:12 -040015844
15845 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15846 if (xri == phba->sli4_hba.xri_ids[i])
15847 return i;
15848 }
15849 return NO_XRI;
15850}
15851
James Smart6d368e52011-05-24 11:44:12 -040015852/**
James Smart546fc852011-03-11 16:06:29 -050015853 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040015854 * @phba: Pointer to HBA context object.
15855 * @fc_hdr: pointer to a FC frame header.
15856 *
James Smart546fc852011-03-11 16:06:29 -050015857 * This function sends a basic response to a previous unsol sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040015858 * event after aborting the sequence handling.
15859 **/
15860static void
James Smart6dd9e312013-01-03 15:43:37 -050015861lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15862 struct fc_frame_header *fc_hdr, bool aborted)
James Smart6669f9b2009-10-02 15:16:45 -040015863{
James Smart6dd9e312013-01-03 15:43:37 -050015864 struct lpfc_hba *phba = vport->phba;
James Smart6669f9b2009-10-02 15:16:45 -040015865 struct lpfc_iocbq *ctiocb = NULL;
15866 struct lpfc_nodelist *ndlp;
James Smartee0f4fe2012-05-09 21:19:14 -040015867 uint16_t oxid, rxid, xri, lxri;
James Smart5ffc2662009-11-18 15:39:44 -050015868 uint32_t sid, fctl;
James Smart6669f9b2009-10-02 15:16:45 -040015869 IOCB_t *icmd;
James Smart546fc852011-03-11 16:06:29 -050015870 int rc;
James Smart6669f9b2009-10-02 15:16:45 -040015871
15872 if (!lpfc_is_link_up(phba))
15873 return;
15874
15875 sid = sli4_sid_from_fc_hdr(fc_hdr);
15876 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
James Smart5ffc2662009-11-18 15:39:44 -050015877 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
James Smart6669f9b2009-10-02 15:16:45 -040015878
James Smart6dd9e312013-01-03 15:43:37 -050015879 ndlp = lpfc_findnode_did(vport, sid);
James Smart6669f9b2009-10-02 15:16:45 -040015880 if (!ndlp) {
James Smart6dd9e312013-01-03 15:43:37 -050015881 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15882 if (!ndlp) {
15883 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15884 "1268 Failed to allocate ndlp for "
15885 "oxid:x%x SID:x%x\n", oxid, sid);
15886 return;
15887 }
15888 lpfc_nlp_init(vport, ndlp, sid);
15889 /* Put ndlp onto pport node list */
15890 lpfc_enqueue_node(vport, ndlp);
15891 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
15892 /* re-setup ndlp without removing from node list */
15893 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15894 if (!ndlp) {
15895 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15896 "3275 Failed to active ndlp found "
15897 "for oxid:x%x SID:x%x\n", oxid, sid);
15898 return;
15899 }
James Smart6669f9b2009-10-02 15:16:45 -040015900 }
15901
James Smart546fc852011-03-11 16:06:29 -050015902 /* Allocate buffer for rsp iocb */
James Smart6669f9b2009-10-02 15:16:45 -040015903 ctiocb = lpfc_sli_get_iocbq(phba);
15904 if (!ctiocb)
15905 return;
15906
James Smart5ffc2662009-11-18 15:39:44 -050015907 /* Extract the F_CTL field from FC_HDR */
15908 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15909
James Smart6669f9b2009-10-02 15:16:45 -040015910 icmd = &ctiocb->iocb;
James Smart6669f9b2009-10-02 15:16:45 -040015911 icmd->un.xseq64.bdl.bdeSize = 0;
James Smart5ffc2662009-11-18 15:39:44 -050015912 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
James Smart6669f9b2009-10-02 15:16:45 -040015913 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15914 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15915 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15916
15917 /* Fill in the rest of iocb fields */
15918 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15919 icmd->ulpBdeCount = 0;
15920 icmd->ulpLe = 1;
15921 icmd->ulpClass = CLASS3;
James Smart6d368e52011-05-24 11:44:12 -040015922 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smart6dd9e312013-01-03 15:43:37 -050015923 ctiocb->context1 = lpfc_nlp_get(ndlp);
James Smart6669f9b2009-10-02 15:16:45 -040015924
James Smart6669f9b2009-10-02 15:16:45 -040015925 ctiocb->iocb_cmpl = NULL;
15926 ctiocb->vport = phba->pport;
James Smart546fc852011-03-11 16:06:29 -050015927 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
James Smart6d368e52011-05-24 11:44:12 -040015928 ctiocb->sli4_lxritag = NO_XRI;
James Smart546fc852011-03-11 16:06:29 -050015929 ctiocb->sli4_xritag = NO_XRI;
15930
James Smartee0f4fe2012-05-09 21:19:14 -040015931 if (fctl & FC_FC_EX_CTX)
15932 /* Exchange responder sent the abort so we
15933 * own the oxid.
15934 */
15935 xri = oxid;
15936 else
15937 xri = rxid;
15938 lxri = lpfc_sli4_xri_inrange(phba, xri);
15939 if (lxri != NO_XRI)
15940 lpfc_set_rrq_active(phba, ndlp, lxri,
15941 (xri == oxid) ? rxid : oxid, 0);
James Smart6dd9e312013-01-03 15:43:37 -050015942 /* For BA_ABTS from exchange responder, if the logical xri with
15943 * the oxid maps to the FCP XRI range, the port no longer has
15944 * that exchange context, send a BLS_RJT. Override the IOCB for
15945 * a BA_RJT.
James Smart546fc852011-03-11 16:06:29 -050015946 */
James Smart6dd9e312013-01-03 15:43:37 -050015947 if ((fctl & FC_FC_EX_CTX) &&
James Smart895427b2017-02-12 13:52:30 -080015948 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
James Smart6dd9e312013-01-03 15:43:37 -050015949 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15950 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15951 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15952 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15953 }
15954
15955 /* If BA_ABTS failed to abort a partially assembled receive sequence,
15956 * the driver no longer has that exchange, send a BLS_RJT. Override
15957 * the IOCB for a BA_RJT.
15958 */
15959 if (aborted == false) {
James Smart546fc852011-03-11 16:06:29 -050015960 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15961 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15962 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15963 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15964 }
James Smart6669f9b2009-10-02 15:16:45 -040015965
James Smart5ffc2662009-11-18 15:39:44 -050015966 if (fctl & FC_FC_EX_CTX) {
15967 /* ABTS sent by responder to CT exchange, construction
15968 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15969 * field and RX_ID from ABTS for RX_ID field.
15970 */
James Smart546fc852011-03-11 16:06:29 -050015971 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
James Smart5ffc2662009-11-18 15:39:44 -050015972 } else {
15973 /* ABTS sent by initiator to CT exchange, construction
15974 * of BA_ACC will need to allocate a new XRI as for the
James Smartf09c3ac2012-03-01 22:33:29 -050015975 * XRI_TAG field.
James Smart5ffc2662009-11-18 15:39:44 -050015976 */
James Smart546fc852011-03-11 16:06:29 -050015977 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
James Smart5ffc2662009-11-18 15:39:44 -050015978 }
James Smartf09c3ac2012-03-01 22:33:29 -050015979 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
James Smart546fc852011-03-11 16:06:29 -050015980 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
James Smart5ffc2662009-11-18 15:39:44 -050015981
James Smart546fc852011-03-11 16:06:29 -050015982 /* Xmit CT abts response on exchange <xid> */
James Smart6dd9e312013-01-03 15:43:37 -050015983 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15984 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15985 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
James Smart546fc852011-03-11 16:06:29 -050015986
15987 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15988 if (rc == IOCB_ERROR) {
James Smart6dd9e312013-01-03 15:43:37 -050015989 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15990 "2925 Failed to issue CT ABTS RSP x%x on "
15991 "xri x%x, Data x%x\n",
15992 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15993 phba->link_state);
15994 lpfc_nlp_put(ndlp);
15995 ctiocb->context1 = NULL;
James Smart546fc852011-03-11 16:06:29 -050015996 lpfc_sli_release_iocbq(phba, ctiocb);
15997 }
James Smart6669f9b2009-10-02 15:16:45 -040015998}
15999
16000/**
16001 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
16002 * @vport: Pointer to the vport on which this sequence was received
16003 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16004 *
16005 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
16006 * receive sequence is only partially assembed by the driver, it shall abort
16007 * the partially assembled frames for the sequence. Otherwise, if the
16008 * unsolicited receive sequence has been completely assembled and passed to
16009 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
16010 * unsolicited sequence has been aborted. After that, it will issue a basic
16011 * accept to accept the abort.
16012 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016013static void
James Smart6669f9b2009-10-02 15:16:45 -040016014lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
16015 struct hbq_dmabuf *dmabuf)
16016{
16017 struct lpfc_hba *phba = vport->phba;
16018 struct fc_frame_header fc_hdr;
James Smart5ffc2662009-11-18 15:39:44 -050016019 uint32_t fctl;
James Smart6dd9e312013-01-03 15:43:37 -050016020 bool aborted;
James Smart6669f9b2009-10-02 15:16:45 -040016021
James Smart6669f9b2009-10-02 15:16:45 -040016022 /* Make a copy of fc_hdr before the dmabuf being released */
16023 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
James Smart5ffc2662009-11-18 15:39:44 -050016024 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040016025
James Smart5ffc2662009-11-18 15:39:44 -050016026 if (fctl & FC_FC_EX_CTX) {
James Smart6dd9e312013-01-03 15:43:37 -050016027 /* ABTS by responder to exchange, no cleanup needed */
16028 aborted = true;
James Smart5ffc2662009-11-18 15:39:44 -050016029 } else {
James Smart6dd9e312013-01-03 15:43:37 -050016030 /* ABTS by initiator to exchange, need to do cleanup */
16031 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
16032 if (aborted == false)
16033 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
James Smart5ffc2662009-11-18 15:39:44 -050016034 }
James Smart6dd9e312013-01-03 15:43:37 -050016035 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16036
16037 /* Respond with BA_ACC or BA_RJT accordingly */
16038 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
James Smart6669f9b2009-10-02 15:16:45 -040016039}
16040
16041/**
James Smart4f774512009-05-22 14:52:35 -040016042 * lpfc_seq_complete - Indicates if a sequence is complete
16043 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16044 *
16045 * This function checks the sequence, starting with the frame described by
16046 * @dmabuf, to see if all the frames associated with this sequence are present.
16047 * the frames associated with this sequence are linked to the @dmabuf using the
16048 * dbuf list. This function looks for two major things. 1) That the first frame
16049 * has a sequence count of zero. 2) There is a frame with last frame of sequence
16050 * set. 3) That there are no holes in the sequence count. The function will
16051 * return 1 when the sequence is complete, otherwise it will return 0.
16052 **/
16053static int
16054lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
16055{
16056 struct fc_frame_header *hdr;
16057 struct lpfc_dmabuf *d_buf;
16058 struct hbq_dmabuf *seq_dmabuf;
16059 uint32_t fctl;
16060 int seq_count = 0;
16061
16062 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16063 /* make sure first fame of sequence has a sequence count of zero */
16064 if (hdr->fh_seq_cnt != seq_count)
16065 return 0;
16066 fctl = (hdr->fh_f_ctl[0] << 16 |
16067 hdr->fh_f_ctl[1] << 8 |
16068 hdr->fh_f_ctl[2]);
16069 /* If last frame of sequence we can return success. */
16070 if (fctl & FC_FC_END_SEQ)
16071 return 1;
16072 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
16073 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16074 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16075 /* If there is a hole in the sequence count then fail. */
James Smarteeead812009-12-21 17:01:23 -050016076 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
James Smart4f774512009-05-22 14:52:35 -040016077 return 0;
16078 fctl = (hdr->fh_f_ctl[0] << 16 |
16079 hdr->fh_f_ctl[1] << 8 |
16080 hdr->fh_f_ctl[2]);
16081 /* If last frame of sequence we can return success. */
16082 if (fctl & FC_FC_END_SEQ)
16083 return 1;
16084 }
16085 return 0;
16086}
16087
16088/**
16089 * lpfc_prep_seq - Prep sequence for ULP processing
16090 * @vport: Pointer to the vport on which this sequence was received
16091 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16092 *
16093 * This function takes a sequence, described by a list of frames, and creates
16094 * a list of iocbq structures to describe the sequence. This iocbq list will be
16095 * used to issue to the generic unsolicited sequence handler. This routine
16096 * returns a pointer to the first iocbq in the list. If the function is unable
16097 * to allocate an iocbq then it throw out the received frames that were not
16098 * able to be described and return a pointer to the first iocbq. If unable to
16099 * allocate any iocbqs (including the first) this function will return NULL.
16100 **/
16101static struct lpfc_iocbq *
16102lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16103{
James Smart7851fe22011-07-22 18:36:52 -040016104 struct hbq_dmabuf *hbq_buf;
James Smart4f774512009-05-22 14:52:35 -040016105 struct lpfc_dmabuf *d_buf, *n_buf;
16106 struct lpfc_iocbq *first_iocbq, *iocbq;
16107 struct fc_frame_header *fc_hdr;
16108 uint32_t sid;
James Smart7851fe22011-07-22 18:36:52 -040016109 uint32_t len, tot_len;
James Smarteeead812009-12-21 17:01:23 -050016110 struct ulp_bde64 *pbde;
James Smart4f774512009-05-22 14:52:35 -040016111
16112 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16113 /* remove from receive buffer list */
16114 list_del_init(&seq_dmabuf->hbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040016115 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040016116 /* get the Remote Port's SID */
James Smart6669f9b2009-10-02 15:16:45 -040016117 sid = sli4_sid_from_fc_hdr(fc_hdr);
James Smart7851fe22011-07-22 18:36:52 -040016118 tot_len = 0;
James Smart4f774512009-05-22 14:52:35 -040016119 /* Get an iocbq struct to fill in. */
16120 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16121 if (first_iocbq) {
16122 /* Initialize the first IOCB. */
James Smart8fa38512009-07-19 10:01:03 -040016123 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
James Smart4f774512009-05-22 14:52:35 -040016124 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
James Smart895427b2017-02-12 13:52:30 -080016125 first_iocbq->vport = vport;
James Smart939723a2012-05-09 21:19:03 -040016126
16127 /* Check FC Header to see what TYPE of frame we are rcv'ing */
16128 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16129 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16130 first_iocbq->iocb.un.rcvels.parmRo =
16131 sli4_did_from_fc_hdr(fc_hdr);
16132 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16133 } else
16134 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
James Smart7851fe22011-07-22 18:36:52 -040016135 first_iocbq->iocb.ulpContext = NO_XRI;
16136 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16137 be16_to_cpu(fc_hdr->fh_ox_id);
16138 /* iocbq is prepped for internal consumption. Physical vpi. */
16139 first_iocbq->iocb.unsli3.rcvsli3.vpi =
16140 vport->phba->vpi_ids[vport->vpi];
James Smart4f774512009-05-22 14:52:35 -040016141 /* put the first buffer into the first IOCBq */
James Smart48a5a662013-07-15 18:32:28 -040016142 tot_len = bf_get(lpfc_rcqe_length,
16143 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16144
James Smart4f774512009-05-22 14:52:35 -040016145 first_iocbq->context2 = &seq_dmabuf->dbuf;
16146 first_iocbq->context3 = NULL;
16147 first_iocbq->iocb.ulpBdeCount = 1;
James Smart48a5a662013-07-15 18:32:28 -040016148 if (tot_len > LPFC_DATA_BUF_SIZE)
16149 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
James Smart4f774512009-05-22 14:52:35 -040016150 LPFC_DATA_BUF_SIZE;
James Smart48a5a662013-07-15 18:32:28 -040016151 else
16152 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16153
James Smart4f774512009-05-22 14:52:35 -040016154 first_iocbq->iocb.un.rcvels.remoteID = sid;
James Smart48a5a662013-07-15 18:32:28 -040016155
James Smart7851fe22011-07-22 18:36:52 -040016156 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
James Smart4f774512009-05-22 14:52:35 -040016157 }
16158 iocbq = first_iocbq;
16159 /*
16160 * Each IOCBq can have two Buffers assigned, so go through the list
16161 * of buffers for this sequence and save two buffers in each IOCBq
16162 */
16163 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16164 if (!iocbq) {
16165 lpfc_in_buf_free(vport->phba, d_buf);
16166 continue;
16167 }
16168 if (!iocbq->context3) {
16169 iocbq->context3 = d_buf;
16170 iocbq->iocb.ulpBdeCount++;
James Smart7851fe22011-07-22 18:36:52 -040016171 /* We need to get the size out of the right CQE */
16172 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16173 len = bf_get(lpfc_rcqe_length,
16174 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040016175 pbde = (struct ulp_bde64 *)
16176 &iocbq->iocb.unsli3.sli3Words[4];
16177 if (len > LPFC_DATA_BUF_SIZE)
16178 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
16179 else
16180 pbde->tus.f.bdeSize = len;
16181
James Smart7851fe22011-07-22 18:36:52 -040016182 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
16183 tot_len += len;
James Smart4f774512009-05-22 14:52:35 -040016184 } else {
16185 iocbq = lpfc_sli_get_iocbq(vport->phba);
16186 if (!iocbq) {
16187 if (first_iocbq) {
16188 first_iocbq->iocb.ulpStatus =
16189 IOSTAT_FCP_RSP_ERROR;
16190 first_iocbq->iocb.un.ulpWord[4] =
16191 IOERR_NO_RESOURCES;
16192 }
16193 lpfc_in_buf_free(vport->phba, d_buf);
16194 continue;
16195 }
James Smart7851fe22011-07-22 18:36:52 -040016196 /* We need to get the size out of the right CQE */
16197 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16198 len = bf_get(lpfc_rcqe_length,
16199 &hbq_buf->cq_event.cqe.rcqe_cmpl);
James Smart48a5a662013-07-15 18:32:28 -040016200 iocbq->context2 = d_buf;
16201 iocbq->context3 = NULL;
16202 iocbq->iocb.ulpBdeCount = 1;
16203 if (len > LPFC_DATA_BUF_SIZE)
16204 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16205 LPFC_DATA_BUF_SIZE;
16206 else
16207 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
16208
James Smart7851fe22011-07-22 18:36:52 -040016209 tot_len += len;
16210 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16211
James Smart4f774512009-05-22 14:52:35 -040016212 iocbq->iocb.un.rcvels.remoteID = sid;
16213 list_add_tail(&iocbq->list, &first_iocbq->list);
16214 }
16215 }
16216 return first_iocbq;
16217}
16218
James Smart6669f9b2009-10-02 15:16:45 -040016219static void
16220lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16221 struct hbq_dmabuf *seq_dmabuf)
16222{
16223 struct fc_frame_header *fc_hdr;
16224 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
16225 struct lpfc_hba *phba = vport->phba;
16226
16227 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16228 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
16229 if (!iocbq) {
16230 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16231 "2707 Ring %d handler: Failed to allocate "
16232 "iocb Rctl x%x Type x%x received\n",
16233 LPFC_ELS_RING,
16234 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16235 return;
16236 }
16237 if (!lpfc_complete_unsol_iocb(phba,
James Smart895427b2017-02-12 13:52:30 -080016238 phba->sli4_hba.els_wq->pring,
James Smart6669f9b2009-10-02 15:16:45 -040016239 iocbq, fc_hdr->fh_r_ctl,
16240 fc_hdr->fh_type))
James Smart6d368e52011-05-24 11:44:12 -040016241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart6669f9b2009-10-02 15:16:45 -040016242 "2540 Ring %d handler: unexpected Rctl "
16243 "x%x Type x%x received\n",
16244 LPFC_ELS_RING,
16245 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16246
16247 /* Free iocb created in lpfc_prep_seq */
16248 list_for_each_entry_safe(curr_iocb, next_iocb,
16249 &iocbq->list, list) {
16250 list_del_init(&curr_iocb->list);
16251 lpfc_sli_release_iocbq(phba, curr_iocb);
16252 }
16253 lpfc_sli_release_iocbq(phba, iocbq);
16254}
16255
James Smart4f774512009-05-22 14:52:35 -040016256/**
16257 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16258 * @phba: Pointer to HBA context object.
16259 *
16260 * This function is called with no lock held. This function processes all
16261 * the received buffers and gives it to upper layers when a received buffer
16262 * indicates that it is the final frame in the sequence. The interrupt
James Smart895427b2017-02-12 13:52:30 -080016263 * service routine processes received buffers at interrupt contexts.
James Smart4f774512009-05-22 14:52:35 -040016264 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
16265 * appropriate receive function when the final frame in a sequence is received.
16266 **/
James Smart4d9ab992009-10-02 15:16:39 -040016267void
16268lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16269 struct hbq_dmabuf *dmabuf)
James Smart4f774512009-05-22 14:52:35 -040016270{
James Smart4d9ab992009-10-02 15:16:39 -040016271 struct hbq_dmabuf *seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040016272 struct fc_frame_header *fc_hdr;
16273 struct lpfc_vport *vport;
16274 uint32_t fcfi;
James Smart939723a2012-05-09 21:19:03 -040016275 uint32_t did;
James Smart4f774512009-05-22 14:52:35 -040016276
James Smart4f774512009-05-22 14:52:35 -040016277 /* Process each received buffer */
James Smart4d9ab992009-10-02 15:16:39 -040016278 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
James Smart2ea259e2017-02-12 13:52:27 -080016279
James Smart4d9ab992009-10-02 15:16:39 -040016280 /* check to see if this a valid type of frame */
16281 if (lpfc_fc_frame_check(phba, fc_hdr)) {
16282 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16283 return;
16284 }
James Smart2ea259e2017-02-12 13:52:27 -080016285
James Smart7851fe22011-07-22 18:36:52 -040016286 if ((bf_get(lpfc_cqe_code,
16287 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
16288 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
16289 &dmabuf->cq_event.cqe.rcqe_cmpl);
16290 else
16291 fcfi = bf_get(lpfc_rcqe_fcf_id,
16292 &dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart939723a2012-05-09 21:19:03 -040016293
James Smart895427b2017-02-12 13:52:30 -080016294 /* d_id this frame is directed to */
16295 did = sli4_did_from_fc_hdr(fc_hdr);
16296
16297 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
James Smart939723a2012-05-09 21:19:03 -040016298 if (!vport) {
James Smart4d9ab992009-10-02 15:16:39 -040016299 /* throw out the frame */
16300 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16301 return;
16302 }
James Smart939723a2012-05-09 21:19:03 -040016303
James Smart939723a2012-05-09 21:19:03 -040016304 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
16305 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
16306 (did != Fabric_DID)) {
16307 /*
16308 * Throw out the frame if we are not pt2pt.
16309 * The pt2pt protocol allows for discovery frames
16310 * to be received without a registered VPI.
16311 */
16312 if (!(vport->fc_flag & FC_PT2PT) ||
16313 (phba->link_state == LPFC_HBA_READY)) {
16314 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16315 return;
16316 }
16317 }
16318
James Smart6669f9b2009-10-02 15:16:45 -040016319 /* Handle the basic abort sequence (BA_ABTS) event */
16320 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
16321 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
16322 return;
16323 }
16324
James Smart4d9ab992009-10-02 15:16:39 -040016325 /* Link this frame */
16326 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
16327 if (!seq_dmabuf) {
16328 /* unable to add frame to vport - throw it out */
16329 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16330 return;
16331 }
16332 /* If not last frame in sequence continue processing frames. */
James Smartdef9c7a2009-12-21 17:02:28 -050016333 if (!lpfc_seq_complete(seq_dmabuf))
James Smart4d9ab992009-10-02 15:16:39 -040016334 return;
James Smartdef9c7a2009-12-21 17:02:28 -050016335
James Smart6669f9b2009-10-02 15:16:45 -040016336 /* Send the complete sequence to the upper layer protocol */
16337 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
James Smart4f774512009-05-22 14:52:35 -040016338}
James Smart6fb120a2009-05-22 14:52:59 -040016339
16340/**
16341 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
16342 * @phba: pointer to lpfc hba data structure.
16343 *
16344 * This routine is invoked to post rpi header templates to the
16345 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040016346 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16347 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040016348 *
16349 * This routine does not require any locks. It's usage is expected
16350 * to be driver load or reset recovery when the driver is
16351 * sequential.
16352 *
16353 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020016354 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040016355 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040016356 * When this error occurs, the driver is not guaranteed
16357 * to have any rpi regions posted to the device and
16358 * must either attempt to repost the regions or take a
16359 * fatal error.
16360 **/
16361int
16362lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
16363{
16364 struct lpfc_rpi_hdr *rpi_page;
16365 uint32_t rc = 0;
James Smart6d368e52011-05-24 11:44:12 -040016366 uint16_t lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040016367
James Smart6d368e52011-05-24 11:44:12 -040016368 /* SLI4 ports that support extents do not require RPI headers. */
16369 if (!phba->sli4_hba.rpi_hdrs_in_use)
16370 goto exit;
16371 if (phba->sli4_hba.extents_in_use)
16372 return -EIO;
16373
James Smart6fb120a2009-05-22 14:52:59 -040016374 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
James Smart6d368e52011-05-24 11:44:12 -040016375 /*
16376 * Assign the rpi headers a physical rpi only if the driver
16377 * has not initialized those resources. A port reset only
16378 * needs the headers posted.
16379 */
16380 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
16381 LPFC_RPI_RSRC_RDY)
16382 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
16383
James Smart6fb120a2009-05-22 14:52:59 -040016384 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
16385 if (rc != MBX_SUCCESS) {
16386 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16387 "2008 Error %d posting all rpi "
16388 "headers\n", rc);
16389 rc = -EIO;
16390 break;
16391 }
16392 }
16393
James Smart6d368e52011-05-24 11:44:12 -040016394 exit:
16395 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
16396 LPFC_RPI_RSRC_RDY);
James Smart6fb120a2009-05-22 14:52:59 -040016397 return rc;
16398}
16399
16400/**
16401 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
16402 * @phba: pointer to lpfc hba data structure.
16403 * @rpi_page: pointer to the rpi memory region.
16404 *
16405 * This routine is invoked to post a single rpi header to the
16406 * HBA consistent with the SLI-4 interface spec. This memory region
16407 * maps up to 64 rpi context regions.
16408 *
16409 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020016410 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040016411 * -ENOMEM - No available memory
16412 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040016413 **/
16414int
16415lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
16416{
16417 LPFC_MBOXQ_t *mboxq;
16418 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
16419 uint32_t rc = 0;
James Smart6fb120a2009-05-22 14:52:59 -040016420 uint32_t shdr_status, shdr_add_status;
16421 union lpfc_sli4_cfg_shdr *shdr;
16422
James Smart6d368e52011-05-24 11:44:12 -040016423 /* SLI4 ports that support extents do not require RPI headers. */
16424 if (!phba->sli4_hba.rpi_hdrs_in_use)
16425 return rc;
16426 if (phba->sli4_hba.extents_in_use)
16427 return -EIO;
16428
James Smart6fb120a2009-05-22 14:52:59 -040016429 /* The port is notified of the header region via a mailbox command. */
16430 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16431 if (!mboxq) {
16432 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16433 "2001 Unable to allocate memory for issuing "
16434 "SLI_CONFIG_SPECIAL mailbox command\n");
16435 return -ENOMEM;
16436 }
16437
16438 /* Post all rpi memory regions to the port. */
16439 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
James Smart6fb120a2009-05-22 14:52:59 -040016440 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16441 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
16442 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
James Smartfedd3b72011-02-16 12:39:24 -050016443 sizeof(struct lpfc_sli4_cfg_mhdr),
16444 LPFC_SLI4_MBX_EMBED);
James Smart6d368e52011-05-24 11:44:12 -040016445
16446
16447 /* Post the physical rpi to the port for this rpi header. */
James Smart6fb120a2009-05-22 14:52:59 -040016448 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
16449 rpi_page->start_rpi);
James Smart6d368e52011-05-24 11:44:12 -040016450 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
16451 hdr_tmpl, rpi_page->page_count);
16452
James Smart6fb120a2009-05-22 14:52:59 -040016453 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
16454 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
James Smartf1126682009-06-10 17:22:44 -040016455 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart6fb120a2009-05-22 14:52:59 -040016456 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
16457 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16458 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16459 if (rc != MBX_TIMEOUT)
16460 mempool_free(mboxq, phba->mbox_mem_pool);
16461 if (shdr_status || shdr_add_status || rc) {
16462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16463 "2514 POST_RPI_HDR mailbox failed with "
16464 "status x%x add_status x%x, mbx status x%x\n",
16465 shdr_status, shdr_add_status, rc);
16466 rc = -ENXIO;
16467 }
16468 return rc;
16469}
16470
16471/**
16472 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
16473 * @phba: pointer to lpfc hba data structure.
16474 *
16475 * This routine is invoked to post rpi header templates to the
16476 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040016477 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16478 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040016479 *
16480 * Returns
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020016481 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
James Smart6fb120a2009-05-22 14:52:59 -040016482 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16483 **/
16484int
16485lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
16486{
James Smart6d368e52011-05-24 11:44:12 -040016487 unsigned long rpi;
16488 uint16_t max_rpi, rpi_limit;
16489 uint16_t rpi_remaining, lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040016490 struct lpfc_rpi_hdr *rpi_hdr;
James Smart4902b382013-10-10 12:20:35 -040016491 unsigned long iflag;
James Smart6fb120a2009-05-22 14:52:59 -040016492
James Smart6fb120a2009-05-22 14:52:59 -040016493 /*
James Smart6d368e52011-05-24 11:44:12 -040016494 * Fetch the next logical rpi. Because this index is logical,
16495 * the driver starts at 0 each time.
James Smart6fb120a2009-05-22 14:52:59 -040016496 */
James Smart4902b382013-10-10 12:20:35 -040016497 spin_lock_irqsave(&phba->hbalock, iflag);
James Smartbe6bb942015-04-07 15:07:22 -040016498 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
16499 rpi_limit = phba->sli4_hba.next_rpi;
16500
James Smart6d368e52011-05-24 11:44:12 -040016501 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
16502 if (rpi >= rpi_limit)
James Smart6fb120a2009-05-22 14:52:59 -040016503 rpi = LPFC_RPI_ALLOC_ERROR;
16504 else {
16505 set_bit(rpi, phba->sli4_hba.rpi_bmask);
16506 phba->sli4_hba.max_cfg_param.rpi_used++;
16507 phba->sli4_hba.rpi_count++;
16508 }
James Smartbe6bb942015-04-07 15:07:22 -040016509 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
16510 "0001 rpi:%x max:%x lim:%x\n",
16511 (int) rpi, max_rpi, rpi_limit);
James Smart6fb120a2009-05-22 14:52:59 -040016512
16513 /*
16514 * Don't try to allocate more rpi header regions if the device limit
James Smart6d368e52011-05-24 11:44:12 -040016515 * has been exhausted.
James Smart6fb120a2009-05-22 14:52:59 -040016516 */
16517 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
16518 (phba->sli4_hba.rpi_count >= max_rpi)) {
James Smart4902b382013-10-10 12:20:35 -040016519 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040016520 return rpi;
16521 }
16522
16523 /*
James Smart6d368e52011-05-24 11:44:12 -040016524 * RPI header postings are not required for SLI4 ports capable of
16525 * extents.
16526 */
16527 if (!phba->sli4_hba.rpi_hdrs_in_use) {
James Smart4902b382013-10-10 12:20:35 -040016528 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6d368e52011-05-24 11:44:12 -040016529 return rpi;
16530 }
16531
16532 /*
James Smart6fb120a2009-05-22 14:52:59 -040016533 * If the driver is running low on rpi resources, allocate another
16534 * page now. Note that the next_rpi value is used because
16535 * it represents how many are actually in use whereas max_rpi notes
16536 * how many are supported max by the device.
16537 */
James Smart6d368e52011-05-24 11:44:12 -040016538 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
James Smart4902b382013-10-10 12:20:35 -040016539 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart6fb120a2009-05-22 14:52:59 -040016540 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
16541 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
16542 if (!rpi_hdr) {
16543 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16544 "2002 Error Could not grow rpi "
16545 "count\n");
16546 } else {
James Smart6d368e52011-05-24 11:44:12 -040016547 lrpi = rpi_hdr->start_rpi;
16548 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
James Smart6fb120a2009-05-22 14:52:59 -040016549 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
16550 }
16551 }
16552
16553 return rpi;
16554}
16555
16556/**
16557 * lpfc_sli4_free_rpi - Release an rpi for reuse.
16558 * @phba: pointer to lpfc hba data structure.
16559 *
16560 * This routine is invoked to release an rpi to the pool of
16561 * available rpis maintained by the driver.
16562 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016563static void
James Smartd7c47992010-06-08 18:31:54 -040016564__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16565{
16566 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
16567 phba->sli4_hba.rpi_count--;
16568 phba->sli4_hba.max_cfg_param.rpi_used--;
16569 }
16570}
16571
16572/**
16573 * lpfc_sli4_free_rpi - Release an rpi for reuse.
16574 * @phba: pointer to lpfc hba data structure.
16575 *
16576 * This routine is invoked to release an rpi to the pool of
16577 * available rpis maintained by the driver.
16578 **/
16579void
James Smart6fb120a2009-05-22 14:52:59 -040016580lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16581{
16582 spin_lock_irq(&phba->hbalock);
James Smartd7c47992010-06-08 18:31:54 -040016583 __lpfc_sli4_free_rpi(phba, rpi);
James Smart6fb120a2009-05-22 14:52:59 -040016584 spin_unlock_irq(&phba->hbalock);
16585}
16586
16587/**
16588 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
16589 * @phba: pointer to lpfc hba data structure.
16590 *
16591 * This routine is invoked to remove the memory region that
16592 * provided rpi via a bitmask.
16593 **/
16594void
16595lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
16596{
16597 kfree(phba->sli4_hba.rpi_bmask);
James Smart6d368e52011-05-24 11:44:12 -040016598 kfree(phba->sli4_hba.rpi_ids);
16599 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
James Smart6fb120a2009-05-22 14:52:59 -040016600}
16601
16602/**
16603 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
16604 * @phba: pointer to lpfc hba data structure.
16605 *
16606 * This routine is invoked to remove the memory region that
16607 * provided rpi via a bitmask.
16608 **/
16609int
James Smart6b5151f2012-01-18 16:24:06 -050016610lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
16611 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
James Smart6fb120a2009-05-22 14:52:59 -040016612{
16613 LPFC_MBOXQ_t *mboxq;
16614 struct lpfc_hba *phba = ndlp->phba;
16615 int rc;
16616
16617 /* The port is notified of the header region via a mailbox command. */
16618 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16619 if (!mboxq)
16620 return -ENOMEM;
16621
16622 /* Post all rpi memory regions to the port. */
16623 lpfc_resume_rpi(mboxq, ndlp);
James Smart6b5151f2012-01-18 16:24:06 -050016624 if (cmpl) {
16625 mboxq->mbox_cmpl = cmpl;
16626 mboxq->context1 = arg;
16627 mboxq->context2 = ndlp;
James Smart72859902012-01-18 16:25:38 -050016628 } else
16629 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart6b5151f2012-01-18 16:24:06 -050016630 mboxq->vport = ndlp->vport;
James Smart6fb120a2009-05-22 14:52:59 -040016631 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16632 if (rc == MBX_NOT_FINISHED) {
16633 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16634 "2010 Resume RPI Mailbox failed "
16635 "status %d, mbxStatus x%x\n", rc,
16636 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16637 mempool_free(mboxq, phba->mbox_mem_pool);
16638 return -EIO;
16639 }
16640 return 0;
16641}
16642
16643/**
16644 * lpfc_sli4_init_vpi - Initialize a vpi with the port
James Smart76a95d72010-11-20 23:11:48 -050016645 * @vport: Pointer to the vport for which the vpi is being initialized
James Smart6fb120a2009-05-22 14:52:59 -040016646 *
James Smart76a95d72010-11-20 23:11:48 -050016647 * This routine is invoked to activate a vpi with the port.
James Smart6fb120a2009-05-22 14:52:59 -040016648 *
16649 * Returns:
16650 * 0 success
16651 * -Evalue otherwise
16652 **/
16653int
James Smart76a95d72010-11-20 23:11:48 -050016654lpfc_sli4_init_vpi(struct lpfc_vport *vport)
James Smart6fb120a2009-05-22 14:52:59 -040016655{
16656 LPFC_MBOXQ_t *mboxq;
16657 int rc = 0;
James Smart6a9c52c2009-10-02 15:16:51 -040016658 int retval = MBX_SUCCESS;
James Smart6fb120a2009-05-22 14:52:59 -040016659 uint32_t mbox_tmo;
James Smart76a95d72010-11-20 23:11:48 -050016660 struct lpfc_hba *phba = vport->phba;
James Smart6fb120a2009-05-22 14:52:59 -040016661 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16662 if (!mboxq)
16663 return -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -050016664 lpfc_init_vpi(phba, mboxq, vport->vpi);
James Smarta183a152011-10-10 21:32:43 -040016665 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
James Smart6fb120a2009-05-22 14:52:59 -040016666 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
James Smart6fb120a2009-05-22 14:52:59 -040016667 if (rc != MBX_SUCCESS) {
James Smart76a95d72010-11-20 23:11:48 -050016668 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
James Smart6fb120a2009-05-22 14:52:59 -040016669 "2022 INIT VPI Mailbox failed "
16670 "status %d, mbxStatus x%x\n", rc,
16671 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart6a9c52c2009-10-02 15:16:51 -040016672 retval = -EIO;
James Smart6fb120a2009-05-22 14:52:59 -040016673 }
James Smart6a9c52c2009-10-02 15:16:51 -040016674 if (rc != MBX_TIMEOUT)
James Smart76a95d72010-11-20 23:11:48 -050016675 mempool_free(mboxq, vport->phba->mbox_mem_pool);
James Smart6a9c52c2009-10-02 15:16:51 -040016676
16677 return retval;
James Smart6fb120a2009-05-22 14:52:59 -040016678}
16679
16680/**
16681 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
16682 * @phba: pointer to lpfc hba data structure.
16683 * @mboxq: Pointer to mailbox object.
16684 *
16685 * This routine is invoked to manually add a single FCF record. The caller
16686 * must pass a completely initialized FCF_Record. This routine takes
16687 * care of the nonembedded mailbox operations.
16688 **/
16689static void
16690lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
16691{
16692 void *virt_addr;
16693 union lpfc_sli4_cfg_shdr *shdr;
16694 uint32_t shdr_status, shdr_add_status;
16695
16696 virt_addr = mboxq->sge_array->addr[0];
16697 /* The IOCTL status is embedded in the mailbox subheader. */
16698 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
16699 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16700 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16701
16702 if ((shdr_status || shdr_add_status) &&
16703 (shdr_status != STATUS_FCF_IN_USE))
16704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16705 "2558 ADD_FCF_RECORD mailbox failed with "
16706 "status x%x add_status x%x\n",
16707 shdr_status, shdr_add_status);
16708
16709 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16710}
16711
16712/**
16713 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
16714 * @phba: pointer to lpfc hba data structure.
16715 * @fcf_record: pointer to the initialized fcf record to add.
16716 *
16717 * This routine is invoked to manually add a single FCF record. The caller
16718 * must pass a completely initialized FCF_Record. This routine takes
16719 * care of the nonembedded mailbox operations.
16720 **/
16721int
16722lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
16723{
16724 int rc = 0;
16725 LPFC_MBOXQ_t *mboxq;
16726 uint8_t *bytep;
16727 void *virt_addr;
James Smart6fb120a2009-05-22 14:52:59 -040016728 struct lpfc_mbx_sge sge;
16729 uint32_t alloc_len, req_len;
16730 uint32_t fcfindex;
16731
16732 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16733 if (!mboxq) {
16734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16735 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
16736 return -ENOMEM;
16737 }
16738
16739 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
16740 sizeof(uint32_t);
16741
16742 /* Allocate DMA memory and set up the non-embedded mailbox command */
16743 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16744 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
16745 req_len, LPFC_SLI4_MBX_NEMBED);
16746 if (alloc_len < req_len) {
16747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16748 "2523 Allocated DMA memory size (x%x) is "
16749 "less than the requested DMA memory "
16750 "size (x%x)\n", alloc_len, req_len);
16751 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16752 return -ENOMEM;
16753 }
16754
16755 /*
16756 * Get the first SGE entry from the non-embedded DMA memory. This
16757 * routine only uses a single SGE.
16758 */
16759 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
James Smart6fb120a2009-05-22 14:52:59 -040016760 virt_addr = mboxq->sge_array->addr[0];
16761 /*
16762 * Configure the FCF record for FCFI 0. This is the driver's
16763 * hardcoded default and gets used in nonFIP mode.
16764 */
16765 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16766 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16767 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16768
16769 /*
16770 * Copy the fcf_index and the FCF Record Data. The data starts after
16771 * the FCoE header plus word10. The data copy needs to be endian
16772 * correct.
16773 */
16774 bytep += sizeof(uint32_t);
16775 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16776 mboxq->vport = phba->pport;
16777 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16778 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16779 if (rc == MBX_NOT_FINISHED) {
16780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16781 "2515 ADD_FCF_RECORD mailbox failed with "
16782 "status 0x%x\n", rc);
16783 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16784 rc = -EIO;
16785 } else
16786 rc = 0;
16787
16788 return rc;
16789}
16790
16791/**
16792 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
16793 * @phba: pointer to lpfc hba data structure.
16794 * @fcf_record: pointer to the fcf record to write the default data.
16795 * @fcf_index: FCF table entry index.
16796 *
16797 * This routine is invoked to build the driver's default FCF record. The
16798 * values used are hardcoded. This routine handles memory initialization.
16799 *
16800 **/
16801void
16802lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16803 struct fcf_record *fcf_record,
16804 uint16_t fcf_index)
16805{
16806 memset(fcf_record, 0, sizeof(struct fcf_record));
16807 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16808 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16809 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16810 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16811 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16812 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16813 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16814 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16815 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16816 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16817 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16818 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16819 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
James Smart0c287582009-06-10 17:22:56 -040016820 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
James Smart6fb120a2009-05-22 14:52:59 -040016821 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16822 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16823 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16824 /* Set the VLAN bit map */
16825 if (phba->valid_vlan) {
16826 fcf_record->vlan_bitmap[phba->vlan_id / 8]
16827 = 1 << (phba->vlan_id % 8);
16828 }
16829}
16830
16831/**
James Smart0c9ab6f2010-02-26 14:15:57 -050016832 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
James Smart6fb120a2009-05-22 14:52:59 -040016833 * @phba: pointer to lpfc hba data structure.
16834 * @fcf_index: FCF table entry offset.
16835 *
James Smart0c9ab6f2010-02-26 14:15:57 -050016836 * This routine is invoked to scan the entire FCF table by reading FCF
16837 * record and processing it one at a time starting from the @fcf_index
16838 * for initial FCF discovery or fast FCF failover rediscovery.
16839 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030016840 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050016841 * otherwise.
James Smart6fb120a2009-05-22 14:52:59 -040016842 **/
16843int
James Smart0c9ab6f2010-02-26 14:15:57 -050016844lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
James Smart6fb120a2009-05-22 14:52:59 -040016845{
16846 int rc = 0, error;
16847 LPFC_MBOXQ_t *mboxq;
James Smart6fb120a2009-05-22 14:52:59 -040016848
James Smart32b97932009-07-19 10:01:21 -040016849 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
James Smart80c17842012-03-01 22:35:45 -050016850 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
James Smart6fb120a2009-05-22 14:52:59 -040016851 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16852 if (!mboxq) {
16853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16854 "2000 Failed to allocate mbox for "
16855 "READ_FCF cmd\n");
James Smart4d9ab992009-10-02 15:16:39 -040016856 error = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -050016857 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040016858 }
James Smartecfd03c2010-02-12 14:41:27 -050016859 /* Construct the read FCF record mailbox command */
James Smart0c9ab6f2010-02-26 14:15:57 -050016860 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
James Smartecfd03c2010-02-12 14:41:27 -050016861 if (rc) {
16862 error = -EINVAL;
James Smart0c9ab6f2010-02-26 14:15:57 -050016863 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040016864 }
James Smartecfd03c2010-02-12 14:41:27 -050016865 /* Issue the mailbox command asynchronously */
James Smart6fb120a2009-05-22 14:52:59 -040016866 mboxq->vport = phba->pport;
James Smart0c9ab6f2010-02-26 14:15:57 -050016867 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
James Smarta93ff372010-10-22 11:06:08 -040016868
16869 spin_lock_irq(&phba->hbalock);
16870 phba->hba_flag |= FCF_TS_INPROG;
16871 spin_unlock_irq(&phba->hbalock);
16872
James Smart6fb120a2009-05-22 14:52:59 -040016873 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
James Smartecfd03c2010-02-12 14:41:27 -050016874 if (rc == MBX_NOT_FINISHED)
James Smart6fb120a2009-05-22 14:52:59 -040016875 error = -EIO;
James Smartecfd03c2010-02-12 14:41:27 -050016876 else {
James Smart38b92ef2010-08-04 16:11:39 -040016877 /* Reset eligible FCF count for new scan */
16878 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
James Smart999d8132010-03-15 11:24:56 -040016879 phba->fcf.eligible_fcf_cnt = 0;
James Smart6fb120a2009-05-22 14:52:59 -040016880 error = 0;
James Smart32b97932009-07-19 10:01:21 -040016881 }
James Smart0c9ab6f2010-02-26 14:15:57 -050016882fail_fcf_scan:
James Smart4d9ab992009-10-02 15:16:39 -040016883 if (error) {
16884 if (mboxq)
16885 lpfc_sli4_mbox_cmd_free(phba, mboxq);
James Smarta93ff372010-10-22 11:06:08 -040016886 /* FCF scan failed, clear FCF_TS_INPROG flag */
James Smart4d9ab992009-10-02 15:16:39 -040016887 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -040016888 phba->hba_flag &= ~FCF_TS_INPROG;
James Smart4d9ab992009-10-02 15:16:39 -040016889 spin_unlock_irq(&phba->hbalock);
16890 }
James Smart6fb120a2009-05-22 14:52:59 -040016891 return error;
16892}
James Smarta0c87cb2009-07-19 10:01:10 -040016893
16894/**
James Smarta93ff372010-10-22 11:06:08 -040016895 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
James Smart0c9ab6f2010-02-26 14:15:57 -050016896 * @phba: pointer to lpfc hba data structure.
16897 * @fcf_index: FCF table entry offset.
16898 *
16899 * This routine is invoked to read an FCF record indicated by @fcf_index
James Smarta93ff372010-10-22 11:06:08 -040016900 * and to use it for FLOGI roundrobin FCF failover.
James Smart0c9ab6f2010-02-26 14:15:57 -050016901 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030016902 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050016903 * otherwise.
16904 **/
16905int
16906lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16907{
16908 int rc = 0, error;
16909 LPFC_MBOXQ_t *mboxq;
16910
16911 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16912 if (!mboxq) {
16913 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16914 "2763 Failed to allocate mbox for "
16915 "READ_FCF cmd\n");
16916 error = -ENOMEM;
16917 goto fail_fcf_read;
16918 }
16919 /* Construct the read FCF record mailbox command */
16920 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16921 if (rc) {
16922 error = -EINVAL;
16923 goto fail_fcf_read;
16924 }
16925 /* Issue the mailbox command asynchronously */
16926 mboxq->vport = phba->pport;
16927 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16928 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16929 if (rc == MBX_NOT_FINISHED)
16930 error = -EIO;
16931 else
16932 error = 0;
16933
16934fail_fcf_read:
16935 if (error && mboxq)
16936 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16937 return error;
16938}
16939
16940/**
16941 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16942 * @phba: pointer to lpfc hba data structure.
16943 * @fcf_index: FCF table entry offset.
16944 *
16945 * This routine is invoked to read an FCF record indicated by @fcf_index to
James Smarta93ff372010-10-22 11:06:08 -040016946 * determine whether it's eligible for FLOGI roundrobin failover list.
James Smart0c9ab6f2010-02-26 14:15:57 -050016947 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030016948 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050016949 * otherwise.
16950 **/
16951int
16952lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16953{
16954 int rc = 0, error;
16955 LPFC_MBOXQ_t *mboxq;
16956
16957 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16958 if (!mboxq) {
16959 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16960 "2758 Failed to allocate mbox for "
16961 "READ_FCF cmd\n");
16962 error = -ENOMEM;
16963 goto fail_fcf_read;
16964 }
16965 /* Construct the read FCF record mailbox command */
16966 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16967 if (rc) {
16968 error = -EINVAL;
16969 goto fail_fcf_read;
16970 }
16971 /* Issue the mailbox command asynchronously */
16972 mboxq->vport = phba->pport;
16973 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16974 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16975 if (rc == MBX_NOT_FINISHED)
16976 error = -EIO;
16977 else
16978 error = 0;
16979
16980fail_fcf_read:
16981 if (error && mboxq)
16982 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16983 return error;
16984}
16985
16986/**
James Smartf5cb5302015-12-16 18:11:52 -050016987 * lpfc_check_next_fcf_pri_level
James Smart7d791df2011-07-22 18:37:52 -040016988 * phba pointer to the lpfc_hba struct for this port.
16989 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16990 * routine when the rr_bmask is empty. The FCF indecies are put into the
16991 * rr_bmask based on their priority level. Starting from the highest priority
16992 * to the lowest. The most likely FCF candidate will be in the highest
16993 * priority group. When this routine is called it searches the fcf_pri list for
16994 * next lowest priority group and repopulates the rr_bmask with only those
16995 * fcf_indexes.
16996 * returns:
16997 * 1=success 0=failure
16998 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040016999static int
James Smart7d791df2011-07-22 18:37:52 -040017000lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
17001{
17002 uint16_t next_fcf_pri;
17003 uint16_t last_index;
17004 struct lpfc_fcf_pri *fcf_pri;
17005 int rc;
17006 int ret = 0;
17007
17008 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
17009 LPFC_SLI4_FCF_TBL_INDX_MAX);
17010 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17011 "3060 Last IDX %d\n", last_index);
James Smart25626692013-03-01 16:36:54 -050017012
17013 /* Verify the priority list has 2 or more entries */
17014 spin_lock_irq(&phba->hbalock);
17015 if (list_empty(&phba->fcf.fcf_pri_list) ||
17016 list_is_singular(&phba->fcf.fcf_pri_list)) {
17017 spin_unlock_irq(&phba->hbalock);
James Smart7d791df2011-07-22 18:37:52 -040017018 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17019 "3061 Last IDX %d\n", last_index);
17020 return 0; /* Empty rr list */
17021 }
James Smart25626692013-03-01 16:36:54 -050017022 spin_unlock_irq(&phba->hbalock);
17023
James Smart7d791df2011-07-22 18:37:52 -040017024 next_fcf_pri = 0;
17025 /*
17026 * Clear the rr_bmask and set all of the bits that are at this
17027 * priority.
17028 */
17029 memset(phba->fcf.fcf_rr_bmask, 0,
17030 sizeof(*phba->fcf.fcf_rr_bmask));
17031 spin_lock_irq(&phba->hbalock);
17032 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17033 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
17034 continue;
17035 /*
17036 * the 1st priority that has not FLOGI failed
17037 * will be the highest.
17038 */
17039 if (!next_fcf_pri)
17040 next_fcf_pri = fcf_pri->fcf_rec.priority;
17041 spin_unlock_irq(&phba->hbalock);
17042 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17043 rc = lpfc_sli4_fcf_rr_index_set(phba,
17044 fcf_pri->fcf_rec.fcf_index);
17045 if (rc)
17046 return 0;
17047 }
17048 spin_lock_irq(&phba->hbalock);
17049 }
17050 /*
17051 * if next_fcf_pri was not set above and the list is not empty then
17052 * we have failed flogis on all of them. So reset flogi failed
Anatol Pomozov4907cb72012-09-01 10:31:09 -070017053 * and start at the beginning.
James Smart7d791df2011-07-22 18:37:52 -040017054 */
17055 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
17056 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17057 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
17058 /*
17059 * the 1st priority that has not FLOGI failed
17060 * will be the highest.
17061 */
17062 if (!next_fcf_pri)
17063 next_fcf_pri = fcf_pri->fcf_rec.priority;
17064 spin_unlock_irq(&phba->hbalock);
17065 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17066 rc = lpfc_sli4_fcf_rr_index_set(phba,
17067 fcf_pri->fcf_rec.fcf_index);
17068 if (rc)
17069 return 0;
17070 }
17071 spin_lock_irq(&phba->hbalock);
17072 }
17073 } else
17074 ret = 1;
17075 spin_unlock_irq(&phba->hbalock);
17076
17077 return ret;
17078}
17079/**
James Smart0c9ab6f2010-02-26 14:15:57 -050017080 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
17081 * @phba: pointer to lpfc hba data structure.
17082 *
17083 * This routine is to get the next eligible FCF record index in a round
17084 * robin fashion. If the next eligible FCF record index equals to the
James Smarta93ff372010-10-22 11:06:08 -040017085 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
James Smart0c9ab6f2010-02-26 14:15:57 -050017086 * shall be returned, otherwise, the next eligible FCF record's index
17087 * shall be returned.
17088 **/
17089uint16_t
17090lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
17091{
17092 uint16_t next_fcf_index;
17093
James Smart421c6622013-01-03 15:44:16 -050017094initial_priority:
James Smart3804dc82010-07-14 15:31:37 -040017095 /* Search start from next bit of currently registered FCF index */
James Smart421c6622013-01-03 15:44:16 -050017096 next_fcf_index = phba->fcf.current_rec.fcf_indx;
17097
James Smart7d791df2011-07-22 18:37:52 -040017098next_priority:
James Smart421c6622013-01-03 15:44:16 -050017099 /* Determine the next fcf index to check */
17100 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
James Smart0c9ab6f2010-02-26 14:15:57 -050017101 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17102 LPFC_SLI4_FCF_TBL_INDX_MAX,
James Smart3804dc82010-07-14 15:31:37 -040017103 next_fcf_index);
17104
James Smart0c9ab6f2010-02-26 14:15:57 -050017105 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
James Smart7d791df2011-07-22 18:37:52 -040017106 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17107 /*
17108 * If we have wrapped then we need to clear the bits that
17109 * have been tested so that we can detect when we should
17110 * change the priority level.
17111 */
James Smart0c9ab6f2010-02-26 14:15:57 -050017112 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17113 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
James Smart7d791df2011-07-22 18:37:52 -040017114 }
17115
James Smart0c9ab6f2010-02-26 14:15:57 -050017116
James Smart3804dc82010-07-14 15:31:37 -040017117 /* Check roundrobin failover list empty condition */
James Smart7d791df2011-07-22 18:37:52 -040017118 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
17119 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
17120 /*
17121 * If next fcf index is not found check if there are lower
17122 * Priority level fcf's in the fcf_priority list.
17123 * Set up the rr_bmask with all of the avaiable fcf bits
17124 * at that level and continue the selection process.
17125 */
17126 if (lpfc_check_next_fcf_pri_level(phba))
James Smart421c6622013-01-03 15:44:16 -050017127 goto initial_priority;
James Smart3804dc82010-07-14 15:31:37 -040017128 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17129 "2844 No roundrobin failover FCF available\n");
James Smart7d791df2011-07-22 18:37:52 -040017130 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
17131 return LPFC_FCOE_FCF_NEXT_NONE;
17132 else {
17133 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17134 "3063 Only FCF available idx %d, flag %x\n",
17135 next_fcf_index,
17136 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
17137 return next_fcf_index;
17138 }
James Smart3804dc82010-07-14 15:31:37 -040017139 }
17140
James Smart7d791df2011-07-22 18:37:52 -040017141 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
17142 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
James Smartf5cb5302015-12-16 18:11:52 -050017143 LPFC_FCF_FLOGI_FAILED) {
17144 if (list_is_singular(&phba->fcf.fcf_pri_list))
17145 return LPFC_FCOE_FCF_NEXT_NONE;
17146
James Smart7d791df2011-07-22 18:37:52 -040017147 goto next_priority;
James Smartf5cb5302015-12-16 18:11:52 -050017148 }
James Smart7d791df2011-07-22 18:37:52 -040017149
James Smart3804dc82010-07-14 15:31:37 -040017150 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017151 "2845 Get next roundrobin failover FCF (x%x)\n",
17152 next_fcf_index);
17153
James Smart0c9ab6f2010-02-26 14:15:57 -050017154 return next_fcf_index;
17155}
17156
17157/**
17158 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
17159 * @phba: pointer to lpfc hba data structure.
17160 *
17161 * This routine sets the FCF record index in to the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040017162 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050017163 * does not go beyond the range of the driver allocated bmask dimension
17164 * before setting the bit.
17165 *
17166 * Returns 0 if the index bit successfully set, otherwise, it returns
17167 * -EINVAL.
17168 **/
17169int
17170lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
17171{
17172 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17173 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017174 "2610 FCF (x%x) reached driver's book "
17175 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050017176 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17177 return -EINVAL;
17178 }
17179 /* Set the eligible FCF record index bmask */
17180 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
17181
James Smart3804dc82010-07-14 15:31:37 -040017182 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017183 "2790 Set FCF (x%x) to roundrobin FCF failover "
James Smart3804dc82010-07-14 15:31:37 -040017184 "bmask\n", fcf_index);
17185
James Smart0c9ab6f2010-02-26 14:15:57 -050017186 return 0;
17187}
17188
17189/**
James Smart3804dc82010-07-14 15:31:37 -040017190 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
James Smart0c9ab6f2010-02-26 14:15:57 -050017191 * @phba: pointer to lpfc hba data structure.
17192 *
17193 * This routine clears the FCF record index from the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040017194 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050017195 * does not go beyond the range of the driver allocated bmask dimension
17196 * before clearing the bit.
17197 **/
17198void
17199lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
17200{
James Smart9a803a72013-09-06 12:17:56 -040017201 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
James Smart0c9ab6f2010-02-26 14:15:57 -050017202 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17203 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017204 "2762 FCF (x%x) reached driver's book "
17205 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050017206 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17207 return;
17208 }
17209 /* Clear the eligible FCF record index bmask */
James Smart7d791df2011-07-22 18:37:52 -040017210 spin_lock_irq(&phba->hbalock);
James Smart9a803a72013-09-06 12:17:56 -040017211 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
17212 list) {
James Smart7d791df2011-07-22 18:37:52 -040017213 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
17214 list_del_init(&fcf_pri->list);
17215 break;
17216 }
17217 }
17218 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017219 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
James Smart3804dc82010-07-14 15:31:37 -040017220
17221 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017222 "2791 Clear FCF (x%x) from roundrobin failover "
James Smart3804dc82010-07-14 15:31:37 -040017223 "bmask\n", fcf_index);
James Smart0c9ab6f2010-02-26 14:15:57 -050017224}
17225
17226/**
James Smartecfd03c2010-02-12 14:41:27 -050017227 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
17228 * @phba: pointer to lpfc hba data structure.
17229 *
17230 * This routine is the completion routine for the rediscover FCF table mailbox
17231 * command. If the mailbox command returned failure, it will try to stop the
17232 * FCF rediscover wait timer.
17233 **/
Rashika Kheria5d8b8162014-09-03 12:55:04 -040017234static void
James Smartecfd03c2010-02-12 14:41:27 -050017235lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
17236{
17237 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17238 uint32_t shdr_status, shdr_add_status;
17239
17240 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17241
17242 shdr_status = bf_get(lpfc_mbox_hdr_status,
17243 &redisc_fcf->header.cfg_shdr.response);
17244 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
17245 &redisc_fcf->header.cfg_shdr.response);
17246 if (shdr_status || shdr_add_status) {
James Smart0c9ab6f2010-02-26 14:15:57 -050017247 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smartecfd03c2010-02-12 14:41:27 -050017248 "2746 Requesting for FCF rediscovery failed "
17249 "status x%x add_status x%x\n",
17250 shdr_status, shdr_add_status);
James Smart0c9ab6f2010-02-26 14:15:57 -050017251 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
James Smartfc2b9892010-02-26 14:15:29 -050017252 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017253 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050017254 spin_unlock_irq(&phba->hbalock);
17255 /*
17256 * CVL event triggered FCF rediscover request failed,
17257 * last resort to re-try current registered FCF entry.
17258 */
17259 lpfc_retry_pport_discovery(phba);
17260 } else {
17261 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050017262 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050017263 spin_unlock_irq(&phba->hbalock);
17264 /*
17265 * DEAD FCF event triggered FCF rediscover request
17266 * failed, last resort to fail over as a link down
17267 * to FCF registration.
17268 */
17269 lpfc_sli4_fcf_dead_failthrough(phba);
17270 }
James Smart0c9ab6f2010-02-26 14:15:57 -050017271 } else {
17272 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040017273 "2775 Start FCF rediscover quiescent timer\n");
James Smartecfd03c2010-02-12 14:41:27 -050017274 /*
17275 * Start FCF rediscovery wait timer for pending FCF
17276 * before rescan FCF record table.
17277 */
17278 lpfc_fcf_redisc_wait_start_timer(phba);
James Smart0c9ab6f2010-02-26 14:15:57 -050017279 }
James Smartecfd03c2010-02-12 14:41:27 -050017280
17281 mempool_free(mbox, phba->mbox_mem_pool);
17282}
17283
17284/**
James Smart3804dc82010-07-14 15:31:37 -040017285 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
James Smartecfd03c2010-02-12 14:41:27 -050017286 * @phba: pointer to lpfc hba data structure.
17287 *
17288 * This routine is invoked to request for rediscovery of the entire FCF table
17289 * by the port.
17290 **/
17291int
17292lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
17293{
17294 LPFC_MBOXQ_t *mbox;
17295 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17296 int rc, length;
17297
James Smart0c9ab6f2010-02-26 14:15:57 -050017298 /* Cancel retry delay timers to all vports before FCF rediscover */
17299 lpfc_cancel_all_vport_retry_delay_timer(phba);
17300
James Smartecfd03c2010-02-12 14:41:27 -050017301 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17302 if (!mbox) {
17303 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17304 "2745 Failed to allocate mbox for "
17305 "requesting FCF rediscover.\n");
17306 return -ENOMEM;
17307 }
17308
17309 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
17310 sizeof(struct lpfc_sli4_cfg_mhdr));
17311 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17312 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
17313 length, LPFC_SLI4_MBX_EMBED);
17314
17315 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17316 /* Set count to 0 for invalidating the entire FCF database */
17317 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
17318
17319 /* Issue the mailbox command asynchronously */
17320 mbox->vport = phba->pport;
17321 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
17322 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
17323
17324 if (rc == MBX_NOT_FINISHED) {
17325 mempool_free(mbox, phba->mbox_mem_pool);
17326 return -EIO;
17327 }
17328 return 0;
17329}
17330
17331/**
James Smartfc2b9892010-02-26 14:15:29 -050017332 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
17333 * @phba: pointer to lpfc hba data structure.
17334 *
17335 * This function is the failover routine as a last resort to the FCF DEAD
17336 * event when driver failed to perform fast FCF failover.
17337 **/
17338void
17339lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
17340{
17341 uint32_t link_state;
17342
17343 /*
17344 * Last resort as FCF DEAD event failover will treat this as
17345 * a link down, but save the link state because we don't want
17346 * it to be changed to Link Down unless it is already down.
17347 */
17348 link_state = phba->link_state;
17349 lpfc_linkdown(phba);
17350 phba->link_state = link_state;
17351
17352 /* Unregister FCF if no devices connected to it */
17353 lpfc_unregister_unused_fcf(phba);
17354}
17355
17356/**
James Smart026abb82011-12-13 13:20:45 -050017357 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040017358 * @phba: pointer to lpfc hba data structure.
James Smart026abb82011-12-13 13:20:45 -050017359 * @rgn23_data: pointer to configure region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040017360 *
James Smart026abb82011-12-13 13:20:45 -050017361 * This function gets SLI3 port configure region 23 data through memory dump
17362 * mailbox command. When it successfully retrieves data, the size of the data
17363 * will be returned, otherwise, 0 will be returned.
James Smarta0c87cb2009-07-19 10:01:10 -040017364 **/
James Smart026abb82011-12-13 13:20:45 -050017365static uint32_t
17366lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
James Smarta0c87cb2009-07-19 10:01:10 -040017367{
17368 LPFC_MBOXQ_t *pmb = NULL;
17369 MAILBOX_t *mb;
James Smart026abb82011-12-13 13:20:45 -050017370 uint32_t offset = 0;
James Smarta0c87cb2009-07-19 10:01:10 -040017371 int rc;
17372
James Smart026abb82011-12-13 13:20:45 -050017373 if (!rgn23_data)
17374 return 0;
17375
James Smarta0c87cb2009-07-19 10:01:10 -040017376 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17377 if (!pmb) {
17378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050017379 "2600 failed to allocate mailbox memory\n");
17380 return 0;
James Smarta0c87cb2009-07-19 10:01:10 -040017381 }
17382 mb = &pmb->u.mb;
17383
James Smarta0c87cb2009-07-19 10:01:10 -040017384 do {
17385 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
17386 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
17387
17388 if (rc != MBX_SUCCESS) {
17389 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050017390 "2601 failed to read config "
17391 "region 23, rc 0x%x Status 0x%x\n",
17392 rc, mb->mbxStatus);
James Smarta0c87cb2009-07-19 10:01:10 -040017393 mb->un.varDmp.word_cnt = 0;
17394 }
17395 /*
17396 * dump mem may return a zero when finished or we got a
17397 * mailbox error, either way we are done.
17398 */
17399 if (mb->un.varDmp.word_cnt == 0)
17400 break;
17401 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
17402 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
17403
17404 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smart026abb82011-12-13 13:20:45 -050017405 rgn23_data + offset,
17406 mb->un.varDmp.word_cnt);
James Smarta0c87cb2009-07-19 10:01:10 -040017407 offset += mb->un.varDmp.word_cnt;
17408 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
17409
James Smart026abb82011-12-13 13:20:45 -050017410 mempool_free(pmb, phba->mbox_mem_pool);
17411 return offset;
17412}
17413
17414/**
17415 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
17416 * @phba: pointer to lpfc hba data structure.
17417 * @rgn23_data: pointer to configure region 23 data.
17418 *
17419 * This function gets SLI4 port configure region 23 data through memory dump
17420 * mailbox command. When it successfully retrieves data, the size of the data
17421 * will be returned, otherwise, 0 will be returned.
17422 **/
17423static uint32_t
17424lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
17425{
17426 LPFC_MBOXQ_t *mboxq = NULL;
17427 struct lpfc_dmabuf *mp = NULL;
17428 struct lpfc_mqe *mqe;
17429 uint32_t data_length = 0;
17430 int rc;
17431
17432 if (!rgn23_data)
17433 return 0;
17434
17435 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17436 if (!mboxq) {
17437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17438 "3105 failed to allocate mailbox memory\n");
17439 return 0;
17440 }
17441
17442 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
17443 goto out;
17444 mqe = &mboxq->u.mqe;
17445 mp = (struct lpfc_dmabuf *) mboxq->context1;
17446 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17447 if (rc)
17448 goto out;
17449 data_length = mqe->un.mb_words[5];
17450 if (data_length == 0)
17451 goto out;
17452 if (data_length > DMP_RGN23_SIZE) {
17453 data_length = 0;
17454 goto out;
17455 }
17456 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
17457out:
17458 mempool_free(mboxq, phba->mbox_mem_pool);
17459 if (mp) {
17460 lpfc_mbuf_free(phba, mp->virt, mp->phys);
17461 kfree(mp);
17462 }
17463 return data_length;
17464}
17465
17466/**
17467 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
17468 * @phba: pointer to lpfc hba data structure.
17469 *
17470 * This function read region 23 and parse TLV for port status to
17471 * decide if the user disaled the port. If the TLV indicates the
17472 * port is disabled, the hba_flag is set accordingly.
17473 **/
17474void
17475lpfc_sli_read_link_ste(struct lpfc_hba *phba)
17476{
17477 uint8_t *rgn23_data = NULL;
17478 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
17479 uint32_t offset = 0;
17480
17481 /* Get adapter Region 23 data */
17482 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
17483 if (!rgn23_data)
17484 goto out;
17485
17486 if (phba->sli_rev < LPFC_SLI_REV4)
17487 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
17488 else {
17489 if_type = bf_get(lpfc_sli_intf_if_type,
17490 &phba->sli4_hba.sli_intf);
17491 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
17492 goto out;
17493 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
17494 }
James Smarta0c87cb2009-07-19 10:01:10 -040017495
17496 if (!data_size)
17497 goto out;
17498
17499 /* Check the region signature first */
17500 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
17501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17502 "2619 Config region 23 has bad signature\n");
17503 goto out;
17504 }
17505 offset += 4;
17506
17507 /* Check the data structure version */
17508 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
17509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17510 "2620 Config region 23 has bad version\n");
17511 goto out;
17512 }
17513 offset += 4;
17514
17515 /* Parse TLV entries in the region */
17516 while (offset < data_size) {
17517 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
17518 break;
17519 /*
17520 * If the TLV is not driver specific TLV or driver id is
17521 * not linux driver id, skip the record.
17522 */
17523 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
17524 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
17525 (rgn23_data[offset + 3] != 0)) {
17526 offset += rgn23_data[offset + 1] * 4 + 4;
17527 continue;
17528 }
17529
17530 /* Driver found a driver specific TLV in the config region */
17531 sub_tlv_len = rgn23_data[offset + 1] * 4;
17532 offset += 4;
17533 tlv_offset = 0;
17534
17535 /*
17536 * Search for configured port state sub-TLV.
17537 */
17538 while ((offset < data_size) &&
17539 (tlv_offset < sub_tlv_len)) {
17540 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
17541 offset += 4;
17542 tlv_offset += 4;
17543 break;
17544 }
17545 if (rgn23_data[offset] != PORT_STE_TYPE) {
17546 offset += rgn23_data[offset + 1] * 4 + 4;
17547 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
17548 continue;
17549 }
17550
17551 /* This HBA contains PORT_STE configured */
17552 if (!rgn23_data[offset + 2])
17553 phba->hba_flag |= LINK_DISABLED;
17554
17555 goto out;
17556 }
17557 }
James Smart026abb82011-12-13 13:20:45 -050017558
James Smarta0c87cb2009-07-19 10:01:10 -040017559out:
James Smarta0c87cb2009-07-19 10:01:10 -040017560 kfree(rgn23_data);
17561 return;
17562}
James Smart695a8142010-01-26 23:08:03 -050017563
17564/**
James Smart52d52442011-05-24 11:42:45 -040017565 * lpfc_wr_object - write an object to the firmware
17566 * @phba: HBA structure that indicates port to create a queue on.
17567 * @dmabuf_list: list of dmabufs to write to the port.
17568 * @size: the total byte value of the objects to write to the port.
17569 * @offset: the current offset to be used to start the transfer.
17570 *
17571 * This routine will create a wr_object mailbox command to send to the port.
17572 * the mailbox command will be constructed using the dma buffers described in
17573 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
17574 * BDEs that the imbedded mailbox can support. The @offset variable will be
17575 * used to indicate the starting offset of the transfer and will also return
17576 * the offset after the write object mailbox has completed. @size is used to
17577 * determine the end of the object and whether the eof bit should be set.
17578 *
17579 * Return 0 is successful and offset will contain the the new offset to use
17580 * for the next write.
17581 * Return negative value for error cases.
17582 **/
17583int
17584lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
17585 uint32_t size, uint32_t *offset)
17586{
17587 struct lpfc_mbx_wr_object *wr_object;
17588 LPFC_MBOXQ_t *mbox;
17589 int rc = 0, i = 0;
17590 uint32_t shdr_status, shdr_add_status;
17591 uint32_t mbox_tmo;
17592 union lpfc_sli4_cfg_shdr *shdr;
17593 struct lpfc_dmabuf *dmabuf;
17594 uint32_t written = 0;
17595
17596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17597 if (!mbox)
17598 return -ENOMEM;
17599
17600 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17601 LPFC_MBOX_OPCODE_WRITE_OBJECT,
17602 sizeof(struct lpfc_mbx_wr_object) -
17603 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17604
17605 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
17606 wr_object->u.request.write_offset = *offset;
17607 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
17608 wr_object->u.request.object_name[0] =
17609 cpu_to_le32(wr_object->u.request.object_name[0]);
17610 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
17611 list_for_each_entry(dmabuf, dmabuf_list, list) {
17612 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
17613 break;
17614 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
17615 wr_object->u.request.bde[i].addrHigh =
17616 putPaddrHigh(dmabuf->phys);
17617 if (written + SLI4_PAGE_SIZE >= size) {
17618 wr_object->u.request.bde[i].tus.f.bdeSize =
17619 (size - written);
17620 written += (size - written);
17621 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
17622 } else {
17623 wr_object->u.request.bde[i].tus.f.bdeSize =
17624 SLI4_PAGE_SIZE;
17625 written += SLI4_PAGE_SIZE;
17626 }
17627 i++;
17628 }
17629 wr_object->u.request.bde_count = i;
17630 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
17631 if (!phba->sli4_hba.intr_enable)
17632 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17633 else {
James Smarta183a152011-10-10 21:32:43 -040017634 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart52d52442011-05-24 11:42:45 -040017635 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17636 }
17637 /* The IOCTL status is embedded in the mailbox subheader. */
17638 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
17639 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17640 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17641 if (rc != MBX_TIMEOUT)
17642 mempool_free(mbox, phba->mbox_mem_pool);
17643 if (shdr_status || shdr_add_status || rc) {
17644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17645 "3025 Write Object mailbox failed with "
17646 "status x%x add_status x%x, mbx status x%x\n",
17647 shdr_status, shdr_add_status, rc);
17648 rc = -ENXIO;
17649 } else
17650 *offset += wr_object->u.response.actual_write_length;
17651 return rc;
17652}
17653
17654/**
James Smart695a8142010-01-26 23:08:03 -050017655 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
17656 * @vport: pointer to vport data structure.
17657 *
17658 * This function iterate through the mailboxq and clean up all REG_LOGIN
17659 * and REG_VPI mailbox commands associated with the vport. This function
17660 * is called when driver want to restart discovery of the vport due to
17661 * a Clear Virtual Link event.
17662 **/
17663void
17664lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
17665{
17666 struct lpfc_hba *phba = vport->phba;
17667 LPFC_MBOXQ_t *mb, *nextmb;
17668 struct lpfc_dmabuf *mp;
James Smart78730cf2010-04-06 15:06:30 -040017669 struct lpfc_nodelist *ndlp;
James Smartd439d282010-09-29 11:18:45 -040017670 struct lpfc_nodelist *act_mbx_ndlp = NULL;
James Smart589a52d2010-07-14 15:30:54 -040017671 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
James Smartd439d282010-09-29 11:18:45 -040017672 LIST_HEAD(mbox_cmd_list);
James Smart63e801c2010-11-20 23:14:19 -050017673 uint8_t restart_loop;
James Smart695a8142010-01-26 23:08:03 -050017674
James Smartd439d282010-09-29 11:18:45 -040017675 /* Clean up internally queued mailbox commands with the vport */
James Smart695a8142010-01-26 23:08:03 -050017676 spin_lock_irq(&phba->hbalock);
17677 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
17678 if (mb->vport != vport)
17679 continue;
17680
17681 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17682 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17683 continue;
17684
James Smartd439d282010-09-29 11:18:45 -040017685 list_del(&mb->list);
17686 list_add_tail(&mb->list, &mbox_cmd_list);
17687 }
17688 /* Clean up active mailbox command with the vport */
17689 mb = phba->sli.mbox_active;
17690 if (mb && (mb->vport == vport)) {
17691 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
17692 (mb->u.mb.mbxCommand == MBX_REG_VPI))
17693 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17694 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17695 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
17696 /* Put reference count for delayed processing */
17697 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
17698 /* Unregister the RPI when mailbox complete */
17699 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17700 }
17701 }
James Smart63e801c2010-11-20 23:14:19 -050017702 /* Cleanup any mailbox completions which are not yet processed */
17703 do {
17704 restart_loop = 0;
17705 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
17706 /*
17707 * If this mailox is already processed or it is
17708 * for another vport ignore it.
17709 */
17710 if ((mb->vport != vport) ||
17711 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
17712 continue;
17713
17714 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17715 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17716 continue;
17717
17718 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17719 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17720 ndlp = (struct lpfc_nodelist *)mb->context2;
17721 /* Unregister the RPI when mailbox complete */
17722 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17723 restart_loop = 1;
17724 spin_unlock_irq(&phba->hbalock);
17725 spin_lock(shost->host_lock);
17726 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17727 spin_unlock(shost->host_lock);
17728 spin_lock_irq(&phba->hbalock);
17729 break;
17730 }
17731 }
17732 } while (restart_loop);
17733
James Smartd439d282010-09-29 11:18:45 -040017734 spin_unlock_irq(&phba->hbalock);
17735
17736 /* Release the cleaned-up mailbox commands */
17737 while (!list_empty(&mbox_cmd_list)) {
17738 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
James Smart695a8142010-01-26 23:08:03 -050017739 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17740 mp = (struct lpfc_dmabuf *) (mb->context1);
17741 if (mp) {
17742 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
17743 kfree(mp);
17744 }
James Smart78730cf2010-04-06 15:06:30 -040017745 ndlp = (struct lpfc_nodelist *) mb->context2;
James Smartd439d282010-09-29 11:18:45 -040017746 mb->context2 = NULL;
James Smart78730cf2010-04-06 15:06:30 -040017747 if (ndlp) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020017748 spin_lock(shost->host_lock);
James Smart589a52d2010-07-14 15:30:54 -040017749 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
Dan Carpenterec21b3b2010-08-08 00:15:17 +020017750 spin_unlock(shost->host_lock);
James Smart78730cf2010-04-06 15:06:30 -040017751 lpfc_nlp_put(ndlp);
James Smart78730cf2010-04-06 15:06:30 -040017752 }
James Smart695a8142010-01-26 23:08:03 -050017753 }
James Smart695a8142010-01-26 23:08:03 -050017754 mempool_free(mb, phba->mbox_mem_pool);
17755 }
James Smartd439d282010-09-29 11:18:45 -040017756
17757 /* Release the ndlp with the cleaned-up active mailbox command */
17758 if (act_mbx_ndlp) {
17759 spin_lock(shost->host_lock);
17760 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17761 spin_unlock(shost->host_lock);
17762 lpfc_nlp_put(act_mbx_ndlp);
James Smart695a8142010-01-26 23:08:03 -050017763 }
James Smart695a8142010-01-26 23:08:03 -050017764}
17765
James Smart2a9bf3d2010-06-07 15:24:45 -040017766/**
17767 * lpfc_drain_txq - Drain the txq
17768 * @phba: Pointer to HBA context object.
17769 *
17770 * This function attempt to submit IOCBs on the txq
17771 * to the adapter. For SLI4 adapters, the txq contains
17772 * ELS IOCBs that have been deferred because the there
17773 * are no SGLs. This congestion can occur with large
17774 * vport counts during node discovery.
17775 **/
17776
17777uint32_t
17778lpfc_drain_txq(struct lpfc_hba *phba)
17779{
17780 LIST_HEAD(completions);
James Smart895427b2017-02-12 13:52:30 -080017781 struct lpfc_sli_ring *pring;
Daeseok Youn2e706372014-02-21 09:03:32 +090017782 struct lpfc_iocbq *piocbq = NULL;
James Smart2a9bf3d2010-06-07 15:24:45 -040017783 unsigned long iflags = 0;
17784 char *fail_msg = NULL;
17785 struct lpfc_sglq *sglq;
James Smart2f077842016-12-19 15:07:29 -080017786 union lpfc_wqe128 wqe128;
17787 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
James Smarta2fc4aef2014-09-03 12:57:55 -040017788 uint32_t txq_cnt = 0;
James Smart2a9bf3d2010-06-07 15:24:45 -040017789
James Smart895427b2017-02-12 13:52:30 -080017790 pring = lpfc_phba_elsring(phba);
17791
James Smart398d81c2013-05-31 17:04:19 -040017792 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart0e9bb8d2013-03-01 16:35:12 -050017793 list_for_each_entry(piocbq, &pring->txq, list) {
17794 txq_cnt++;
17795 }
17796
17797 if (txq_cnt > pring->txq_max)
17798 pring->txq_max = txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040017799
James Smart398d81c2013-05-31 17:04:19 -040017800 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040017801
James Smart0e9bb8d2013-03-01 16:35:12 -050017802 while (!list_empty(&pring->txq)) {
James Smart398d81c2013-05-31 17:04:19 -040017803 spin_lock_irqsave(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040017804
James Smart19ca7602010-11-20 23:11:55 -050017805 piocbq = lpfc_sli_ringtx_get(phba, pring);
James Smarta6298522012-06-12 13:54:11 -040017806 if (!piocbq) {
James Smart398d81c2013-05-31 17:04:19 -040017807 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smarta6298522012-06-12 13:54:11 -040017808 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17809 "2823 txq empty and txq_cnt is %d\n ",
James Smart0e9bb8d2013-03-01 16:35:12 -050017810 txq_cnt);
James Smarta6298522012-06-12 13:54:11 -040017811 break;
17812 }
James Smart895427b2017-02-12 13:52:30 -080017813 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040017814 if (!sglq) {
James Smart19ca7602010-11-20 23:11:55 -050017815 __lpfc_sli_ringtx_put(phba, pring, piocbq);
James Smart398d81c2013-05-31 17:04:19 -040017816 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040017817 break;
James Smart2a9bf3d2010-06-07 15:24:45 -040017818 }
James Smart0e9bb8d2013-03-01 16:35:12 -050017819 txq_cnt--;
James Smart2a9bf3d2010-06-07 15:24:45 -040017820
17821 /* The xri and iocb resources secured,
17822 * attempt to issue request
17823 */
James Smart6d368e52011-05-24 11:44:12 -040017824 piocbq->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040017825 piocbq->sli4_xritag = sglq->sli4_xritag;
17826 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17827 fail_msg = "to convert bpl to sgl";
James Smart2f077842016-12-19 15:07:29 -080017828 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040017829 fail_msg = "to convert iocb to wqe";
James Smart2f077842016-12-19 15:07:29 -080017830 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
James Smart2a9bf3d2010-06-07 15:24:45 -040017831 fail_msg = " - Wq is full";
17832 else
17833 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17834
17835 if (fail_msg) {
17836 /* Failed means we can't issue and need to cancel */
17837 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17838 "2822 IOCB failed %s iotag 0x%x "
17839 "xri 0x%x\n",
17840 fail_msg,
17841 piocbq->iotag, piocbq->sli4_xritag);
17842 list_add_tail(&piocbq->list, &completions);
17843 }
James Smart398d81c2013-05-31 17:04:19 -040017844 spin_unlock_irqrestore(&pring->ring_lock, iflags);
James Smart2a9bf3d2010-06-07 15:24:45 -040017845 }
17846
James Smart2a9bf3d2010-06-07 15:24:45 -040017847 /* Cancel all the IOCBs that cannot be issued */
17848 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17849 IOERR_SLI_ABORTED);
17850
James Smart0e9bb8d2013-03-01 16:35:12 -050017851 return txq_cnt;
James Smart2a9bf3d2010-06-07 15:24:45 -040017852}
James Smart895427b2017-02-12 13:52:30 -080017853
17854/**
17855 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
17856 * @phba: Pointer to HBA context object.
17857 * @pwqe: Pointer to command WQE.
17858 * @sglq: Pointer to the scatter gather queue object.
17859 *
17860 * This routine converts the bpl or bde that is in the WQE
17861 * to a sgl list for the sli4 hardware. The physical address
17862 * of the bpl/bde is converted back to a virtual address.
17863 * If the WQE contains a BPL then the list of BDE's is
17864 * converted to sli4_sge's. If the WQE contains a single
17865 * BDE then it is converted to a single sli_sge.
17866 * The WQE is still in cpu endianness so the contents of
17867 * the bpl can be used without byte swapping.
17868 *
17869 * Returns valid XRI = Success, NO_XRI = Failure.
17870 */
17871static uint16_t
17872lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
17873 struct lpfc_sglq *sglq)
17874{
17875 uint16_t xritag = NO_XRI;
17876 struct ulp_bde64 *bpl = NULL;
17877 struct ulp_bde64 bde;
17878 struct sli4_sge *sgl = NULL;
17879 struct lpfc_dmabuf *dmabuf;
17880 union lpfc_wqe *wqe;
17881 int numBdes = 0;
17882 int i = 0;
17883 uint32_t offset = 0; /* accumulated offset in the sg request list */
17884 int inbound = 0; /* number of sg reply entries inbound from firmware */
17885 uint32_t cmd;
17886
17887 if (!pwqeq || !sglq)
17888 return xritag;
17889
17890 sgl = (struct sli4_sge *)sglq->sgl;
17891 wqe = &pwqeq->wqe;
17892 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
17893
17894 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
17895 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
17896 return sglq->sli4_xritag;
17897 numBdes = pwqeq->rsvd2;
17898 if (numBdes) {
17899 /* The addrHigh and addrLow fields within the WQE
17900 * have not been byteswapped yet so there is no
17901 * need to swap them back.
17902 */
17903 if (pwqeq->context3)
17904 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
17905 else
17906 return xritag;
17907
17908 bpl = (struct ulp_bde64 *)dmabuf->virt;
17909 if (!bpl)
17910 return xritag;
17911
17912 for (i = 0; i < numBdes; i++) {
17913 /* Should already be byte swapped. */
17914 sgl->addr_hi = bpl->addrHigh;
17915 sgl->addr_lo = bpl->addrLow;
17916
17917 sgl->word2 = le32_to_cpu(sgl->word2);
17918 if ((i+1) == numBdes)
17919 bf_set(lpfc_sli4_sge_last, sgl, 1);
17920 else
17921 bf_set(lpfc_sli4_sge_last, sgl, 0);
17922 /* swap the size field back to the cpu so we
17923 * can assign it to the sgl.
17924 */
17925 bde.tus.w = le32_to_cpu(bpl->tus.w);
17926 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
17927 /* The offsets in the sgl need to be accumulated
17928 * separately for the request and reply lists.
17929 * The request is always first, the reply follows.
17930 */
17931 switch (cmd) {
17932 case CMD_GEN_REQUEST64_WQE:
17933 /* add up the reply sg entries */
17934 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
17935 inbound++;
17936 /* first inbound? reset the offset */
17937 if (inbound == 1)
17938 offset = 0;
17939 bf_set(lpfc_sli4_sge_offset, sgl, offset);
17940 bf_set(lpfc_sli4_sge_type, sgl,
17941 LPFC_SGE_TYPE_DATA);
17942 offset += bde.tus.f.bdeSize;
17943 break;
17944 case CMD_FCP_TRSP64_WQE:
17945 bf_set(lpfc_sli4_sge_offset, sgl, 0);
17946 bf_set(lpfc_sli4_sge_type, sgl,
17947 LPFC_SGE_TYPE_DATA);
17948 break;
17949 case CMD_FCP_TSEND64_WQE:
17950 case CMD_FCP_TRECEIVE64_WQE:
17951 bf_set(lpfc_sli4_sge_type, sgl,
17952 bpl->tus.f.bdeFlags);
17953 if (i < 3)
17954 offset = 0;
17955 else
17956 offset += bde.tus.f.bdeSize;
17957 bf_set(lpfc_sli4_sge_offset, sgl, offset);
17958 break;
17959 }
17960 sgl->word2 = cpu_to_le32(sgl->word2);
17961 bpl++;
17962 sgl++;
17963 }
17964 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
17965 /* The addrHigh and addrLow fields of the BDE have not
17966 * been byteswapped yet so they need to be swapped
17967 * before putting them in the sgl.
17968 */
17969 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
17970 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
17971 sgl->word2 = le32_to_cpu(sgl->word2);
17972 bf_set(lpfc_sli4_sge_last, sgl, 1);
17973 sgl->word2 = cpu_to_le32(sgl->word2);
17974 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
17975 }
17976 return sglq->sli4_xritag;
17977}
17978
17979/**
17980 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
17981 * @phba: Pointer to HBA context object.
17982 * @ring_number: Base sli ring number
17983 * @pwqe: Pointer to command WQE.
17984 **/
17985int
17986lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
17987 struct lpfc_iocbq *pwqe)
17988{
17989 union lpfc_wqe *wqe = &pwqe->wqe;
James Smartf358dd02017-02-12 13:52:34 -080017990 struct lpfc_nvmet_rcv_ctx *ctxp;
James Smart895427b2017-02-12 13:52:30 -080017991 struct lpfc_queue *wq;
17992 struct lpfc_sglq *sglq;
17993 struct lpfc_sli_ring *pring;
17994 unsigned long iflags;
17995
17996 /* NVME_LS and NVME_LS ABTS requests. */
17997 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
17998 pring = phba->sli4_hba.nvmels_wq->pring;
17999 spin_lock_irqsave(&pring->ring_lock, iflags);
18000 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
18001 if (!sglq) {
18002 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18003 return WQE_BUSY;
18004 }
18005 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18006 pwqe->sli4_xritag = sglq->sli4_xritag;
18007 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
18008 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18009 return WQE_ERROR;
18010 }
18011 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18012 pwqe->sli4_xritag);
18013 if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
18014 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18015 return WQE_ERROR;
18016 }
18017 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18018 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18019 return 0;
18020 }
18021
18022 /* NVME_FCREQ and NVME_ABTS requests */
18023 if (pwqe->iocb_flag & LPFC_IO_NVME) {
18024 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18025 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18026
18027 spin_lock_irqsave(&pring->ring_lock, iflags);
18028 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18029 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18030 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18031 if (lpfc_sli4_wq_put(wq, wqe)) {
18032 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18033 return WQE_ERROR;
18034 }
18035 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18036 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18037 return 0;
18038 }
18039
James Smartf358dd02017-02-12 13:52:34 -080018040 /* NVMET requests */
18041 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
18042 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18043 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18044
18045 spin_lock_irqsave(&pring->ring_lock, iflags);
18046 ctxp = pwqe->context2;
18047 sglq = ctxp->rqb_buffer->sglq;
18048 if (pwqe->sli4_xritag == NO_XRI) {
18049 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18050 pwqe->sli4_xritag = sglq->sli4_xritag;
18051 }
18052 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18053 pwqe->sli4_xritag);
18054 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18055 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18056 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18057 if (lpfc_sli4_wq_put(wq, wqe)) {
18058 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18059 return WQE_ERROR;
18060 }
18061 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18062 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18063 return 0;
18064 }
James Smart895427b2017-02-12 13:52:30 -080018065 return WQE_ERROR;
18066}