blob: 0a0a1b92d01d6a08d3baa04d5e47cd08ccd2494e [file] [log] [blame]
James Smart01649562017-02-12 13:52:32 -08001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
James Smartd080abe2017-02-12 13:52:39 -08004 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
James Smart01649562017-02-12 13:52:32 -08006 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
James Smartd080abe2017-02-12 13:52:39 -08008 * www.broadcom.com *
James Smart01649562017-02-12 13:52:32 -08009 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include <linux/nvme.h>
40#include <linux/nvme-fc-driver.h>
41#include <linux/nvme-fc.h>
42#include "lpfc_version.h"
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_nvme.h"
51#include "lpfc_scsi.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_crtn.h"
54#include "lpfc_vport.h"
James Smartbd2cdd52017-02-12 13:52:33 -080055#include "lpfc_debugfs.h"
James Smart01649562017-02-12 13:52:32 -080056
57/* NVME initiator-based functions */
58
59static struct lpfc_nvme_buf *
60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
61
62static void
63lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
64
65
66/**
67 * lpfc_nvme_create_queue -
68 * @lpfc_pnvme: Pointer to the driver's nvme instance data
69 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
70 * @handle: An opaque driver handle used in follow-up calls.
71 *
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75 *
76 * Return value :
77 * 0 - Success
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
80 **/
81static int
82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85{
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
92 vport = lport->vport;
93 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
94 if (qhandle == NULL)
95 return -ENOMEM;
96
97 qhandle->cpu_id = smp_processor_id();
98 qhandle->qidx = qidx;
99 /*
100 * NVME qidx == 0 is the admin queue, so both admin queue
101 * and first IO queue will use MSI-X vector and associated
102 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
103 */
104 if (qidx) {
105 str = "IO "; /* IO queue */
106 qhandle->index = ((qidx - 1) %
107 vport->phba->cfg_nvme_io_channel);
108 } else {
109 str = "ADM"; /* Admin queue */
110 qhandle->index = qidx;
111 }
112
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
114 "6073 Binding %s HdwQueue %d (cpu %d) to "
115 "io_channel %d qhandle %p\n", str,
116 qidx, qhandle->cpu_id, qhandle->index, qhandle);
117 *handle = (void *)qhandle;
118 return 0;
119}
120
121/**
122 * lpfc_nvme_delete_queue -
123 * @lpfc_pnvme: Pointer to the driver's nvme instance data
124 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
125 * @handle: An opaque driver handle from lpfc_nvme_create_queue
126 *
127 * Driver registers this routine to free
128 * any internal data structures to bind the @qidx to its internal
129 * IO queues.
130 *
131 * Return value :
132 * 0 - Success
133 * TODO: What are the failure codes.
134 **/
135static void
136lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
137 unsigned int qidx,
138 void *handle)
139{
140 struct lpfc_nvme_lport *lport;
141 struct lpfc_vport *vport;
142
143 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
144 vport = lport->vport;
145
146 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
147 "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
148 lport, qidx, handle);
149 kfree(handle);
150}
151
152static void
153lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
154{
155 struct lpfc_nvme_lport *lport = localport->private;
156
157 /* release any threads waiting for the unreg to complete */
158 complete(&lport->lport_unreg_done);
159}
160
161/* lpfc_nvme_remoteport_delete
162 *
163 * @remoteport: Pointer to an nvme transport remoteport instance.
164 *
165 * This is a template downcall. NVME transport calls this function
166 * when it has completed the unregistration of a previously
167 * registered remoteport.
168 *
169 * Return value :
170 * None
171 */
172void
173lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
174{
175 struct lpfc_nvme_rport *rport = remoteport->private;
176 struct lpfc_vport *vport;
177 struct lpfc_nodelist *ndlp;
178
179 ndlp = rport->ndlp;
180 if (!ndlp)
181 goto rport_err;
182
183 vport = ndlp->vport;
184 if (!vport)
185 goto rport_err;
186
187 /* Remove this rport from the lport's list - memory is owned by the
188 * transport. Remove the ndlp reference for the NVME transport before
James Smart7a06dcd2017-06-01 21:06:55 -0700189 * calling state machine to remove the node.
James Smart01649562017-02-12 13:52:32 -0800190 */
191 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
192 "6146 remoteport delete complete %p\n",
193 remoteport);
James Smart7a06dcd2017-06-01 21:06:55 -0700194 ndlp->nrport = NULL;
James Smart01649562017-02-12 13:52:32 -0800195 lpfc_nlp_put(ndlp);
196
197 rport_err:
198 /* This call has to execute as long as the rport is valid.
199 * Release any threads waiting for the unreg to complete.
200 */
201 complete(&rport->rport_unreg_done);
202}
203
204static void
205lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
206 struct lpfc_wcqe_complete *wcqe)
207{
208 struct lpfc_vport *vport = cmdwqe->vport;
209 uint32_t status;
210 struct nvmefc_ls_req *pnvme_lsreq;
211 struct lpfc_dmabuf *buf_ptr;
212 struct lpfc_nodelist *ndlp;
213
James Smart2cee7802017-06-01 21:07:02 -0700214 atomic_inc(&vport->phba->fc4NvmeLsCmpls);
James Smart01649562017-02-12 13:52:32 -0800215
216 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
217 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
218 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
219 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
220 "6047 nvme cmpl Enter "
221 "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
222 "bmp:%p ndlp:%p\n",
223 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
224 cmdwqe->sli4_xritag, status,
225 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
226
James Smartbd2cdd52017-02-12 13:52:33 -0800227 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
228 cmdwqe->sli4_xritag, status, wcqe->parameter);
229
James Smart01649562017-02-12 13:52:32 -0800230 if (cmdwqe->context3) {
231 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
232 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
233 kfree(buf_ptr);
234 cmdwqe->context3 = NULL;
235 }
236 if (pnvme_lsreq->done)
237 pnvme_lsreq->done(pnvme_lsreq, status);
238 else
239 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
240 "6046 nvme cmpl without done call back? "
241 "Data %p DID %x Xri: %x status %x\n",
242 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
243 cmdwqe->sli4_xritag, status);
244 if (ndlp) {
245 lpfc_nlp_put(ndlp);
246 cmdwqe->context1 = NULL;
247 }
248 lpfc_sli_release_iocbq(phba, cmdwqe);
249}
250
251static int
252lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
253 struct lpfc_dmabuf *inp,
254 struct nvmefc_ls_req *pnvme_lsreq,
255 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
256 struct lpfc_wcqe_complete *),
257 struct lpfc_nodelist *ndlp, uint32_t num_entry,
258 uint32_t tmo, uint8_t retry)
259{
260 struct lpfc_hba *phba = vport->phba;
261 union lpfc_wqe *wqe;
262 struct lpfc_iocbq *genwqe;
263 struct ulp_bde64 *bpl;
264 struct ulp_bde64 bde;
265 int i, rc, xmit_len, first_len;
266
267 /* Allocate buffer for command WQE */
268 genwqe = lpfc_sli_get_iocbq(phba);
269 if (genwqe == NULL)
270 return 1;
271
272 wqe = &genwqe->wqe;
273 memset(wqe, 0, sizeof(union lpfc_wqe));
274
275 genwqe->context3 = (uint8_t *)bmp;
276 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
277
278 /* Save for completion so we can release these resources */
279 genwqe->context1 = lpfc_nlp_get(ndlp);
280 genwqe->context2 = (uint8_t *)pnvme_lsreq;
281 /* Fill in payload, bp points to frame payload */
282
283 if (!tmo)
284 /* FC spec states we need 3 * ratov for CT requests */
285 tmo = (3 * phba->fc_ratov);
286
287 /* For this command calculate the xmit length of the request bde. */
288 xmit_len = 0;
289 first_len = 0;
290 bpl = (struct ulp_bde64 *)bmp->virt;
291 for (i = 0; i < num_entry; i++) {
292 bde.tus.w = bpl[i].tus.w;
293 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
294 break;
295 xmit_len += bde.tus.f.bdeSize;
296 if (i == 0)
297 first_len = xmit_len;
298 }
299
300 genwqe->rsvd2 = num_entry;
301 genwqe->hba_wqidx = 0;
302
303 /* Words 0 - 2 */
304 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
305 wqe->generic.bde.tus.f.bdeSize = first_len;
306 wqe->generic.bde.addrLow = bpl[0].addrLow;
307 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
308
309 /* Word 3 */
310 wqe->gen_req.request_payload_len = first_len;
311
312 /* Word 4 */
313
314 /* Word 5 */
315 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
316 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
317 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
James Smart8b361632017-03-04 09:30:26 -0800318 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
James Smart01649562017-02-12 13:52:32 -0800319 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
320
321 /* Word 6 */
322 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
323 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
324 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
325
326 /* Word 7 */
327 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
328 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
329 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
330 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
331
332 /* Word 8 */
333 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
334
335 /* Word 9 */
336 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
337
338 /* Word 10 */
339 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
340 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
341 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
342 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
343 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
344
345 /* Word 11 */
346 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
347 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
348
349
350 /* Issue GEN REQ WQE for NPORT <did> */
351 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
352 "6050 Issue GEN REQ WQE to NPORT x%x "
353 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
354 ndlp->nlp_DID, genwqe->iotag,
355 vport->port_state,
356 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
357 genwqe->wqe_cmpl = cmpl;
358 genwqe->iocb_cmpl = NULL;
359 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
360 genwqe->vport = vport;
361 genwqe->retry = retry;
362
James Smartbd2cdd52017-02-12 13:52:33 -0800363 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
364 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
365
James Smart01649562017-02-12 13:52:32 -0800366 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
367 if (rc == WQE_ERROR) {
368 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
369 "6045 Issue GEN REQ WQE to NPORT x%x "
370 "Data: x%x x%x\n",
371 ndlp->nlp_DID, genwqe->iotag,
372 vport->port_state);
373 lpfc_sli_release_iocbq(phba, genwqe);
374 return 1;
375 }
376 return 0;
377}
378
379/**
380 * lpfc_nvme_ls_req - Issue an Link Service request
381 * @lpfc_pnvme: Pointer to the driver's nvme instance data
382 * @lpfc_nvme_lport: Pointer to the driver's local port data
383 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
384 *
385 * Driver registers this routine to handle any link service request
386 * from the nvme_fc transport to a remote nvme-aware port.
387 *
388 * Return value :
389 * 0 - Success
390 * TODO: What are the failure codes.
391 **/
392static int
393lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
394 struct nvme_fc_remote_port *pnvme_rport,
395 struct nvmefc_ls_req *pnvme_lsreq)
396{
397 int ret = 0;
398 struct lpfc_nvme_lport *lport;
399 struct lpfc_vport *vport;
400 struct lpfc_nodelist *ndlp;
401 struct ulp_bde64 *bpl;
402 struct lpfc_dmabuf *bmp;
James Smartba43c4d2017-04-21 16:04:47 -0700403 uint16_t ntype, nstate;
James Smart01649562017-02-12 13:52:32 -0800404
405 /* there are two dma buf in the request, actually there is one and
406 * the second one is just the start address + cmd size.
407 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
408 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
409 * because the nvem layer owns the data bufs.
410 * We do not have to break these packets open, we don't care what is in
411 * them. And we do not have to look at the resonse data, we only care
412 * that we got a response. All of the caring is going to happen in the
413 * nvme-fc layer.
414 */
415
416 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
417 vport = lport->vport;
418
419 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
James Smartba43c4d2017-04-21 16:04:47 -0700420 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
421 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
422 "6051 DID x%06x not an active rport.\n",
James Smart01649562017-02-12 13:52:32 -0800423 pnvme_rport->port_id);
James Smartba43c4d2017-04-21 16:04:47 -0700424 return -ENODEV;
425 }
426
427 /* The remote node has to be a mapped nvme target or an
428 * unmapped nvme initiator or it's an error.
429 */
430 ntype = ndlp->nlp_type;
431 nstate = ndlp->nlp_state;
432 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
433 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
434 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
435 "6088 DID x%06x not ready for "
436 "IO. State x%x, Type x%x\n",
437 pnvme_rport->port_id,
438 ndlp->nlp_state, ndlp->nlp_type);
439 return -ENODEV;
James Smart01649562017-02-12 13:52:32 -0800440 }
441 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
442 if (!bmp) {
443
444 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
445 "6044 Could not find node for DID %x\n",
446 pnvme_rport->port_id);
447 return 2;
448 }
449 INIT_LIST_HEAD(&bmp->list);
450 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
451 if (!bmp->virt) {
452 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
453 "6042 Could not find node for DID %x\n",
454 pnvme_rport->port_id);
455 kfree(bmp);
456 return 3;
457 }
458 bpl = (struct ulp_bde64 *)bmp->virt;
459 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
460 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
461 bpl->tus.f.bdeFlags = 0;
462 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
463 bpl->tus.w = le32_to_cpu(bpl->tus.w);
464 bpl++;
465
466 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
467 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
468 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
469 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
470 bpl->tus.w = le32_to_cpu(bpl->tus.w);
471
472 /* Expand print to include key fields. */
473 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
James Smartba43c4d2017-04-21 16:04:47 -0700474 "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
Arnd Bergmann825c6ab2017-02-27 21:37:12 +0100475 "rsplen:%d %pad %pad\n",
James Smart01649562017-02-12 13:52:32 -0800476 pnvme_lport, pnvme_rport,
477 pnvme_lsreq, pnvme_lsreq->rqstlen,
Arnd Bergmann825c6ab2017-02-27 21:37:12 +0100478 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
479 &pnvme_lsreq->rspdma);
James Smart01649562017-02-12 13:52:32 -0800480
James Smart2cee7802017-06-01 21:07:02 -0700481 atomic_inc(&vport->phba->fc4NvmeLsRequests);
James Smart01649562017-02-12 13:52:32 -0800482
483 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
484 * This code allows it all to work.
485 */
486 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
487 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
488 ndlp, 2, 30, 0);
489 if (ret != WQE_SUCCESS) {
490 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
491 "6052 EXIT. issue ls wqe failed lport %p, "
492 "rport %p lsreq%p Status %x DID %x\n",
493 pnvme_lport, pnvme_rport, pnvme_lsreq,
494 ret, ndlp->nlp_DID);
495 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
496 kfree(bmp);
497 return ret;
498 }
499
500 /* Stub in routine and return 0 for now. */
501 return ret;
502}
503
504/**
505 * lpfc_nvme_ls_abort - Issue an Link Service request
506 * @lpfc_pnvme: Pointer to the driver's nvme instance data
507 * @lpfc_nvme_lport: Pointer to the driver's local port data
508 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
509 *
510 * Driver registers this routine to handle any link service request
511 * from the nvme_fc transport to a remote nvme-aware port.
512 *
513 * Return value :
514 * 0 - Success
515 * TODO: What are the failure codes.
516 **/
517static void
518lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
519 struct nvme_fc_remote_port *pnvme_rport,
520 struct nvmefc_ls_req *pnvme_lsreq)
521{
522 struct lpfc_nvme_lport *lport;
523 struct lpfc_vport *vport;
524 struct lpfc_hba *phba;
525 struct lpfc_nodelist *ndlp;
526 LIST_HEAD(abort_list);
527 struct lpfc_sli_ring *pring;
528 struct lpfc_iocbq *wqe, *next_wqe;
529
530 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
531 vport = lport->vport;
532 phba = vport->phba;
533
534 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
535 if (!ndlp) {
536 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
537 "6049 Could not find node for DID %x\n",
538 pnvme_rport->port_id);
539 return;
540 }
541
542 /* Expand print to include key fields. */
543 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
544 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
Arnd Bergmann825c6ab2017-02-27 21:37:12 +0100545 "rsplen:%d %pad %pad\n",
James Smart01649562017-02-12 13:52:32 -0800546 pnvme_lport, pnvme_rport,
547 pnvme_lsreq, pnvme_lsreq->rqstlen,
Arnd Bergmann825c6ab2017-02-27 21:37:12 +0100548 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
549 &pnvme_lsreq->rspdma);
James Smart01649562017-02-12 13:52:32 -0800550
551 /*
552 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
553 * that need an ABTS. The IOs need to stay on the txcmplq so that
554 * the abort operation completes them successfully.
555 */
556 pring = phba->sli4_hba.nvmels_wq->pring;
557 spin_lock_irq(&phba->hbalock);
558 spin_lock(&pring->ring_lock);
559 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
560 /* Add to abort_list on on NDLP match. */
561 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
562 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
563 list_add_tail(&wqe->dlist, &abort_list);
564 }
565 }
566 spin_unlock(&pring->ring_lock);
567 spin_unlock_irq(&phba->hbalock);
568
569 /* Abort the targeted IOs and remove them from the abort list. */
570 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
571 spin_lock_irq(&phba->hbalock);
572 list_del_init(&wqe->dlist);
573 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
574 spin_unlock_irq(&phba->hbalock);
575 }
576}
577
578/* Fix up the existing sgls for NVME IO. */
579static void
580lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
581 struct lpfc_nvme_buf *lpfc_ncmd,
582 struct nvmefc_fcp_req *nCmd)
583{
584 struct sli4_sge *sgl;
585 union lpfc_wqe128 *wqe;
586 uint32_t *wptr, *dptr;
587
588 /*
589 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
590 * match NVME. NVME sends 96 bytes. Also, use the
591 * nvme commands command and response dma addresses
592 * rather than the virtual memory to ease the restore
593 * operation.
594 */
595 sgl = lpfc_ncmd->nvme_sgl;
596 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
597
598 sgl++;
599
600 /* Setup the physical region for the FCP RSP */
601 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
602 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
603 sgl->word2 = le32_to_cpu(sgl->word2);
604 if (nCmd->sg_cnt)
605 bf_set(lpfc_sli4_sge_last, sgl, 0);
606 else
607 bf_set(lpfc_sli4_sge_last, sgl, 1);
608 sgl->word2 = cpu_to_le32(sgl->word2);
609 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
610
611 /*
612 * Get a local pointer to the built-in wqe and correct
613 * the cmd size to match NVME's 96 bytes and fix
614 * the dma address.
615 */
616
617 /* 128 byte wqe support here */
618 wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
619
620 /* Word 0-2 - NVME CMND IU (embedded payload) */
621 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
622 wqe->generic.bde.tus.f.bdeSize = 60;
623 wqe->generic.bde.addrHigh = 0;
624 wqe->generic.bde.addrLow = 64; /* Word 16 */
625
626 /* Word 3 */
627 bf_set(payload_offset_len, &wqe->fcp_icmd,
628 (nCmd->rsplen + nCmd->cmdlen));
629
630 /* Word 10 */
631 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
632 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
633
634 /*
635 * Embed the payload in the last half of the WQE
636 * WQE words 16-30 get the NVME CMD IU payload
637 *
James Smartb06a6222017-03-04 09:30:27 -0800638 * WQE words 16-19 get payload Words 1-4
James Smart01649562017-02-12 13:52:32 -0800639 * WQE words 20-21 get payload Words 6-7
640 * WQE words 22-29 get payload Words 16-23
641 */
James Smartb06a6222017-03-04 09:30:27 -0800642 wptr = &wqe->words[16]; /* WQE ptr */
James Smart01649562017-02-12 13:52:32 -0800643 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
James Smartb06a6222017-03-04 09:30:27 -0800644 dptr++; /* Skip Word 0 in payload */
James Smart01649562017-02-12 13:52:32 -0800645
James Smartb06a6222017-03-04 09:30:27 -0800646 *wptr++ = *dptr++; /* Word 1 */
James Smart01649562017-02-12 13:52:32 -0800647 *wptr++ = *dptr++; /* Word 2 */
648 *wptr++ = *dptr++; /* Word 3 */
649 *wptr++ = *dptr++; /* Word 4 */
650 dptr++; /* Skip Word 5 in payload */
651 *wptr++ = *dptr++; /* Word 6 */
652 *wptr++ = *dptr++; /* Word 7 */
653 dptr += 8; /* Skip Words 8-15 in payload */
654 *wptr++ = *dptr++; /* Word 16 */
655 *wptr++ = *dptr++; /* Word 17 */
656 *wptr++ = *dptr++; /* Word 18 */
657 *wptr++ = *dptr++; /* Word 19 */
658 *wptr++ = *dptr++; /* Word 20 */
659 *wptr++ = *dptr++; /* Word 21 */
660 *wptr++ = *dptr++; /* Word 22 */
661 *wptr = *dptr; /* Word 23 */
662}
663
James Smartbd2cdd52017-02-12 13:52:33 -0800664#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
665static void
666lpfc_nvme_ktime(struct lpfc_hba *phba,
667 struct lpfc_nvme_buf *lpfc_ncmd)
668{
669 uint64_t seg1, seg2, seg3, seg4;
670
671 if (!phba->ktime_on)
672 return;
673 if (!lpfc_ncmd->ts_last_cmd ||
674 !lpfc_ncmd->ts_cmd_start ||
675 !lpfc_ncmd->ts_cmd_wqput ||
676 !lpfc_ncmd->ts_isr_cmpl ||
677 !lpfc_ncmd->ts_data_nvme)
678 return;
679 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
680 return;
681 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
682 return;
683 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
684 return;
685 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
686 return;
687 /*
688 * Segment 1 - Time from Last FCP command cmpl is handed
689 * off to NVME Layer to start of next command.
690 * Segment 2 - Time from Driver receives a IO cmd start
691 * from NVME Layer to WQ put is done on IO cmd.
692 * Segment 3 - Time from Driver WQ put is done on IO cmd
693 * to MSI-X ISR for IO cmpl.
694 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
695 * cmpl is handled off to the NVME Layer.
696 */
697 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
698 if (seg1 > 5000000) /* 5 ms - for sequential IOs */
699 return;
700
701 /* Calculate times relative to start of IO */
702 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
703 seg3 = (lpfc_ncmd->ts_isr_cmpl -
704 lpfc_ncmd->ts_cmd_start) - seg2;
705 seg4 = (lpfc_ncmd->ts_data_nvme -
706 lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
707 phba->ktime_data_samples++;
708 phba->ktime_seg1_total += seg1;
709 if (seg1 < phba->ktime_seg1_min)
710 phba->ktime_seg1_min = seg1;
711 else if (seg1 > phba->ktime_seg1_max)
712 phba->ktime_seg1_max = seg1;
713 phba->ktime_seg2_total += seg2;
714 if (seg2 < phba->ktime_seg2_min)
715 phba->ktime_seg2_min = seg2;
716 else if (seg2 > phba->ktime_seg2_max)
717 phba->ktime_seg2_max = seg2;
718 phba->ktime_seg3_total += seg3;
719 if (seg3 < phba->ktime_seg3_min)
720 phba->ktime_seg3_min = seg3;
721 else if (seg3 > phba->ktime_seg3_max)
722 phba->ktime_seg3_max = seg3;
723 phba->ktime_seg4_total += seg4;
724 if (seg4 < phba->ktime_seg4_min)
725 phba->ktime_seg4_min = seg4;
726 else if (seg4 > phba->ktime_seg4_max)
727 phba->ktime_seg4_max = seg4;
728
729 lpfc_ncmd->ts_last_cmd = 0;
730 lpfc_ncmd->ts_cmd_start = 0;
731 lpfc_ncmd->ts_cmd_wqput = 0;
732 lpfc_ncmd->ts_isr_cmpl = 0;
733 lpfc_ncmd->ts_data_nvme = 0;
734}
735#endif
736
James Smart01649562017-02-12 13:52:32 -0800737/**
738 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
739 * @lpfc_pnvme: Pointer to the driver's nvme instance data
740 * @lpfc_nvme_lport: Pointer to the driver's local port data
741 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
742 *
743 * Driver registers this routine as it io request handler. This
744 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
745 * data structure to the rport indicated in @lpfc_nvme_rport.
746 *
747 * Return value :
748 * 0 - Success
749 * TODO: What are the failure codes.
750 **/
751static void
752lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
753 struct lpfc_wcqe_complete *wcqe)
754{
755 struct lpfc_nvme_buf *lpfc_ncmd =
756 (struct lpfc_nvme_buf *)pwqeIn->context1;
757 struct lpfc_vport *vport = pwqeIn->vport;
758 struct nvmefc_fcp_req *nCmd;
759 struct nvme_fc_ersp_iu *ep;
760 struct nvme_fc_cmd_iu *cp;
761 struct lpfc_nvme_rport *rport;
762 struct lpfc_nodelist *ndlp;
James Smartbbe30122017-04-21 17:49:08 -0700763 struct lpfc_nvme_fcpreq_priv *freqpriv;
James Smart01649562017-02-12 13:52:32 -0800764 unsigned long flags;
765 uint32_t code;
766 uint16_t cid, sqhd, data;
767 uint32_t *ptr;
768
769 /* Sanity check on return of outstanding command */
770 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
771 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
772 "6071 Completion pointers bad on wqe %p.\n",
773 wcqe);
774 return;
775 }
James Smart2cee7802017-06-01 21:07:02 -0700776 atomic_inc(&phba->fc4NvmeIoCmpls);
James Smart01649562017-02-12 13:52:32 -0800777
778 nCmd = lpfc_ncmd->nvmeCmd;
779 rport = lpfc_ncmd->nrport;
780
James Smartbd2cdd52017-02-12 13:52:33 -0800781 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
782 lpfc_ncmd->cur_iocbq.sli4_xritag,
783 bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
James Smart01649562017-02-12 13:52:32 -0800784 /*
785 * Catch race where our node has transitioned, but the
786 * transport is still transitioning.
787 */
788 ndlp = rport->ndlp;
789 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
790 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
James Smartc154e752017-04-21 16:04:48 -0700791 "6061 rport %p, DID x%06x node not ready.\n",
792 rport, rport->remoteport->port_id);
James Smart01649562017-02-12 13:52:32 -0800793
794 ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
795 if (!ndlp) {
796 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
797 "6062 Ignoring NVME cmpl. No ndlp\n");
798 goto out_err;
799 }
800 }
801
802 code = bf_get(lpfc_wcqe_c_code, wcqe);
803 if (code == CQE_CODE_NVME_ERSP) {
804 /* For this type of CQE, we need to rebuild the rsp */
805 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
806
807 /*
808 * Get Command Id from cmd to plug into response. This
809 * code is not needed in the next NVME Transport drop.
810 */
811 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
812 cid = cp->sqe.common.command_id;
813
814 /*
815 * RSN is in CQE word 2
816 * SQHD is in CQE Word 3 bits 15:0
817 * Cmd Specific info is in CQE Word 1
818 * and in CQE Word 0 bits 15:0
819 */
820 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
821
822 /* Now lets build the NVME ERSP IU */
823 ep->iu_len = cpu_to_be16(8);
824 ep->rsn = wcqe->parameter;
825 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
826 ep->rsvd12 = 0;
827 ptr = (uint32_t *)&ep->cqe.result.u64;
828 *ptr++ = wcqe->total_data_placed;
829 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
830 *ptr = (uint32_t)data;
831 ep->cqe.sq_head = sqhd;
832 ep->cqe.sq_id = nCmd->sqid;
833 ep->cqe.command_id = cid;
834 ep->cqe.status = 0;
835
836 lpfc_ncmd->status = IOSTAT_SUCCESS;
837 lpfc_ncmd->result = 0;
838 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
839 nCmd->transferred_length = nCmd->payload_length;
840 } else {
841 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
842 LPFC_IOCB_STATUS_MASK);
843 lpfc_ncmd->result = wcqe->parameter;
844
845 /* For NVME, the only failure path that results in an
846 * IO error is when the adapter rejects it. All other
847 * conditions are a success case and resolved by the
848 * transport.
849 * IOSTAT_FCP_RSP_ERROR means:
850 * 1. Length of data received doesn't match total
851 * transfer length in WQE
852 * 2. If the RSP payload does NOT match these cases:
853 * a. RSP length 12/24 bytes and all zeros
854 * b. NVME ERSP
855 */
856 switch (lpfc_ncmd->status) {
857 case IOSTAT_SUCCESS:
858 nCmd->transferred_length = wcqe->total_data_placed;
859 nCmd->rcv_rsplen = 0;
860 nCmd->status = 0;
861 break;
862 case IOSTAT_FCP_RSP_ERROR:
863 nCmd->transferred_length = wcqe->total_data_placed;
864 nCmd->rcv_rsplen = wcqe->parameter;
865 nCmd->status = 0;
866 /* Sanity check */
867 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
868 break;
869 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
870 "6081 NVME Completion Protocol Error: "
James Smart86c67372017-04-21 16:05:04 -0700871 "xri %x status x%x result x%x "
872 "placed x%x\n",
873 lpfc_ncmd->cur_iocbq.sli4_xritag,
James Smart01649562017-02-12 13:52:32 -0800874 lpfc_ncmd->status, lpfc_ncmd->result,
875 wcqe->total_data_placed);
876 break;
877 default:
878out_err:
879 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
James Smart86c67372017-04-21 16:05:04 -0700880 "6072 NVME Completion Error: xri %x "
James Smart01649562017-02-12 13:52:32 -0800881 "status x%x result x%x placed x%x\n",
James Smart86c67372017-04-21 16:05:04 -0700882 lpfc_ncmd->cur_iocbq.sli4_xritag,
James Smart01649562017-02-12 13:52:32 -0800883 lpfc_ncmd->status, lpfc_ncmd->result,
884 wcqe->total_data_placed);
885 nCmd->transferred_length = 0;
886 nCmd->rcv_rsplen = 0;
887 nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
888 }
889 }
890
891 /* pick up SLI4 exhange busy condition */
892 if (bf_get(lpfc_wcqe_c_xb, wcqe))
893 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
894 else
895 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
896
897 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
898 atomic_dec(&ndlp->cmd_pending);
899
900 /* Update stats and complete the IO. There is
901 * no need for dma unprep because the nvme_transport
902 * owns the dma address.
903 */
James Smartbd2cdd52017-02-12 13:52:33 -0800904#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905 if (phba->ktime_on) {
906 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
907 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
908 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
909 lpfc_nvme_ktime(phba, lpfc_ncmd);
910 }
911 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
912 if (lpfc_ncmd->cpu != smp_processor_id())
913 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
914 "6701 CPU Check cmpl: "
915 "cpu %d expect %d\n",
916 smp_processor_id(), lpfc_ncmd->cpu);
917 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
918 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
919 }
920#endif
James Smartbbe30122017-04-21 17:49:08 -0700921 freqpriv = nCmd->private;
922 freqpriv->nvme_buf = NULL;
James Smart01649562017-02-12 13:52:32 -0800923 nCmd->done(nCmd);
924
925 spin_lock_irqsave(&phba->hbalock, flags);
926 lpfc_ncmd->nrport = NULL;
927 spin_unlock_irqrestore(&phba->hbalock, flags);
928
929 lpfc_release_nvme_buf(phba, lpfc_ncmd);
930}
931
932
933/**
934 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
935 * @lpfc_pnvme: Pointer to the driver's nvme instance data
936 * @lpfc_nvme_lport: Pointer to the driver's local port data
937 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
938 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
939 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
940 *
941 * Driver registers this routine as it io request handler. This
942 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
943 * data structure to the rport indicated in @lpfc_nvme_rport.
944 *
945 * Return value :
946 * 0 - Success
947 * TODO: What are the failure codes.
948 **/
949static int
950lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
951 struct lpfc_nvme_buf *lpfc_ncmd,
952 struct lpfc_nodelist *pnode)
953{
954 struct lpfc_hba *phba = vport->phba;
955 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
956 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
957 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
958 uint32_t req_len;
959
960 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
961 return -EINVAL;
962
963 /*
964 * There are three possibilities here - use scatter-gather segment, use
965 * the single mapping, or neither.
966 */
967 wqe->fcp_iwrite.initial_xfer_len = 0;
968 if (nCmd->sg_cnt) {
969 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
970 /* Word 5 */
971 if ((phba->cfg_nvme_enable_fb) &&
972 (pnode->nlp_flag & NLP_FIRSTBURST)) {
973 req_len = lpfc_ncmd->nvmeCmd->payload_length;
974 if (req_len < pnode->nvme_fb_size)
975 wqe->fcp_iwrite.initial_xfer_len =
976 req_len;
977 else
978 wqe->fcp_iwrite.initial_xfer_len =
979 pnode->nvme_fb_size;
980 }
981
982 /* Word 7 */
983 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
984 CMD_FCP_IWRITE64_WQE);
985 bf_set(wqe_pu, &wqe->generic.wqe_com,
986 PARM_READ_CHECK);
987
988 /* Word 10 */
989 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
990 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
991 LPFC_WQE_IOD_WRITE);
992 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
993 LPFC_WQE_LENLOC_WORD4);
994 if (phba->cfg_nvme_oas)
995 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
996
997 /* Word 11 */
998 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
999 NVME_WRITE_CMD);
1000
James Smart2cee7802017-06-01 21:07:02 -07001001 atomic_inc(&phba->fc4NvmeOutputRequests);
James Smart01649562017-02-12 13:52:32 -08001002 } else {
1003 /* Word 7 */
1004 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
1005 CMD_FCP_IREAD64_WQE);
1006 bf_set(wqe_pu, &wqe->generic.wqe_com,
1007 PARM_READ_CHECK);
1008
1009 /* Word 10 */
1010 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
1011 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1012 LPFC_WQE_IOD_READ);
1013 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
1014 LPFC_WQE_LENLOC_WORD4);
1015 if (phba->cfg_nvme_oas)
1016 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
1017
1018 /* Word 11 */
1019 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1020 NVME_READ_CMD);
1021
James Smart2cee7802017-06-01 21:07:02 -07001022 atomic_inc(&phba->fc4NvmeInputRequests);
James Smart01649562017-02-12 13:52:32 -08001023 }
1024 } else {
1025 /* Word 4 */
1026 wqe->fcp_icmd.rsrvd4 = 0;
1027
1028 /* Word 7 */
1029 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
1030 bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
1031
1032 /* Word 10 */
1033 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
1034 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
1035 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
1036 LPFC_WQE_LENLOC_NONE);
1037 if (phba->cfg_nvme_oas)
1038 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
1039
1040 /* Word 11 */
1041 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1042
James Smart2cee7802017-06-01 21:07:02 -07001043 atomic_inc(&phba->fc4NvmeControlRequests);
James Smart01649562017-02-12 13:52:32 -08001044 }
1045 /*
1046 * Finish initializing those WQE fields that are independent
1047 * of the nvme_cmnd request_buffer
1048 */
1049
1050 /* Word 6 */
1051 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1052 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1053 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1054
1055 /* Word 7 */
1056 /* Preserve Class data in the ndlp. */
1057 bf_set(wqe_class, &wqe->generic.wqe_com,
1058 (pnode->nlp_fcp_info & 0x0f));
1059
1060 /* Word 8 */
1061 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1062
1063 /* Word 9 */
1064 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1065
1066 /* Word 11 */
1067 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1068
1069 pwqeq->vport = vport;
1070 return 0;
1071}
1072
1073
1074/**
1075 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1076 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1077 * @lpfc_nvme_lport: Pointer to the driver's local port data
1078 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1079 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1080 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1081 *
1082 * Driver registers this routine as it io request handler. This
1083 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1084 * data structure to the rport indicated in @lpfc_nvme_rport.
1085 *
1086 * Return value :
1087 * 0 - Success
1088 * TODO: What are the failure codes.
1089 **/
1090static int
1091lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1092 struct lpfc_nvme_buf *lpfc_ncmd)
1093{
1094 struct lpfc_hba *phba = vport->phba;
1095 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1096 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
1097 struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1098 struct scatterlist *data_sg;
1099 struct sli4_sge *first_data_sgl;
1100 dma_addr_t physaddr;
1101 uint32_t num_bde = 0;
1102 uint32_t dma_len;
1103 uint32_t dma_offset = 0;
1104 int nseg, i;
1105
1106 /* Fix up the command and response DMA stuff. */
1107 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1108
1109 /*
1110 * There are three possibilities here - use scatter-gather segment, use
1111 * the single mapping, or neither.
1112 */
1113 if (nCmd->sg_cnt) {
1114 /*
1115 * Jump over the cmd and rsp SGEs. The fix routine
1116 * has already adjusted for this.
1117 */
1118 sgl += 2;
1119
1120 first_data_sgl = sgl;
1121 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
James Smart4d4c4a42017-04-21 16:05:01 -07001122 if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
James Smart01649562017-02-12 13:52:32 -08001123 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1124 "6058 Too many sg segments from "
1125 "NVME Transport. Max %d, "
1126 "nvmeIO sg_cnt %d\n",
James Smart4d4c4a42017-04-21 16:05:01 -07001127 phba->cfg_nvme_seg_cnt,
James Smart01649562017-02-12 13:52:32 -08001128 lpfc_ncmd->seg_cnt);
1129 lpfc_ncmd->seg_cnt = 0;
1130 return 1;
1131 }
1132
1133 /*
1134 * The driver established a maximum scatter-gather segment count
1135 * during probe that limits the number of sg elements in any
1136 * single nvme command. Just run through the seg_cnt and format
1137 * the sge's.
1138 */
1139 nseg = nCmd->sg_cnt;
1140 data_sg = nCmd->first_sgl;
1141 for (i = 0; i < nseg; i++) {
1142 if (data_sg == NULL) {
1143 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1144 "6059 dptr err %d, nseg %d\n",
1145 i, nseg);
1146 lpfc_ncmd->seg_cnt = 0;
1147 return 1;
1148 }
1149 physaddr = data_sg->dma_address;
1150 dma_len = data_sg->length;
1151 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1152 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1153 sgl->word2 = le32_to_cpu(sgl->word2);
1154 if ((num_bde + 1) == nseg)
1155 bf_set(lpfc_sli4_sge_last, sgl, 1);
1156 else
1157 bf_set(lpfc_sli4_sge_last, sgl, 0);
1158 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1159 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1160 sgl->word2 = cpu_to_le32(sgl->word2);
1161 sgl->sge_len = cpu_to_le32(dma_len);
1162
1163 dma_offset += dma_len;
1164 data_sg = sg_next(data_sg);
1165 sgl++;
1166 }
1167 } else {
1168 /* For this clause to be valid, the payload_length
1169 * and sg_cnt must zero.
1170 */
1171 if (nCmd->payload_length != 0) {
1172 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1173 "6063 NVME DMA Prep Err: sg_cnt %d "
1174 "payload_length x%x\n",
1175 nCmd->sg_cnt, nCmd->payload_length);
1176 return 1;
1177 }
1178 }
1179
1180 /*
1181 * Due to difference in data length between DIF/non-DIF paths,
1182 * we need to set word 4 of WQE here
1183 */
1184 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1185 return 0;
1186}
1187
1188/**
1189 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1190 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1191 * @lpfc_nvme_lport: Pointer to the driver's local port data
1192 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1193 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1194 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1195 *
1196 * Driver registers this routine as it io request handler. This
1197 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1198 * data structure to the rport
1199 indicated in @lpfc_nvme_rport.
1200 *
1201 * Return value :
1202 * 0 - Success
1203 * TODO: What are the failure codes.
1204 **/
1205static int
1206lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1207 struct nvme_fc_remote_port *pnvme_rport,
1208 void *hw_queue_handle,
1209 struct nvmefc_fcp_req *pnvme_fcreq)
1210{
1211 int ret = 0;
1212 struct lpfc_nvme_lport *lport;
1213 struct lpfc_vport *vport;
1214 struct lpfc_hba *phba;
1215 struct lpfc_nodelist *ndlp;
1216 struct lpfc_nvme_buf *lpfc_ncmd;
1217 struct lpfc_nvme_rport *rport;
1218 struct lpfc_nvme_qhandle *lpfc_queue_info;
James Smartbbe30122017-04-21 17:49:08 -07001219 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
James Smartbd2cdd52017-02-12 13:52:33 -08001220#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1221 uint64_t start = 0;
1222#endif
James Smart01649562017-02-12 13:52:32 -08001223
1224 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1225 vport = lport->vport;
1226 phba = vport->phba;
1227
James Smartbd2cdd52017-02-12 13:52:33 -08001228#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1229 if (phba->ktime_on)
1230 start = ktime_get_ns();
1231#endif
James Smart01649562017-02-12 13:52:32 -08001232 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1233 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1234
1235 /*
1236 * Catch race where our node has transitioned, but the
1237 * transport is still transitioning.
1238 */
1239 ndlp = rport->ndlp;
1240 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1241 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1242 "6053 rport %p, ndlp %p, DID x%06x "
1243 "ndlp not ready.\n",
1244 rport, ndlp, pnvme_rport->port_id);
1245
1246 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1247 if (!ndlp) {
1248 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1249 "6066 Missing node for DID %x\n",
1250 pnvme_rport->port_id);
1251 ret = -ENODEV;
1252 goto out_fail;
1253 }
1254 }
1255
1256 /* The remote node has to be a mapped target or it's an error. */
1257 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1258 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1259 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1260 "6036 rport %p, DID x%06x not ready for "
1261 "IO. State x%x, Type x%x\n",
1262 rport, pnvme_rport->port_id,
1263 ndlp->nlp_state, ndlp->nlp_type);
1264 ret = -ENODEV;
1265 goto out_fail;
1266
1267 }
1268
1269 /* The node is shared with FCP IO, make sure the IO pending count does
1270 * not exceed the programmed depth.
1271 */
1272 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
1273 ret = -EAGAIN;
1274 goto out_fail;
1275 }
1276
1277 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
1278 if (lpfc_ncmd == NULL) {
1279 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1280 "6065 driver's buffer pool is empty, "
1281 "IO failed\n");
1282 ret = -ENOMEM;
1283 goto out_fail;
1284 }
James Smartbd2cdd52017-02-12 13:52:33 -08001285#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1286 if (phba->ktime_on) {
1287 lpfc_ncmd->ts_cmd_start = start;
1288 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1289 }
1290#endif
James Smart01649562017-02-12 13:52:32 -08001291
1292 /*
1293 * Store the data needed by the driver to issue, abort, and complete
1294 * an IO.
1295 * Do not let the IO hang out forever. There is no midlayer issuing
1296 * an abort so inform the FW of the maximum IO pending time.
1297 */
James Smartbbe30122017-04-21 17:49:08 -07001298 freqpriv->nvme_buf = lpfc_ncmd;
James Smart01649562017-02-12 13:52:32 -08001299 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1300 lpfc_ncmd->nrport = rport;
James Smart318083a2017-03-04 09:30:30 -08001301 lpfc_ncmd->ndlp = ndlp;
James Smart01649562017-02-12 13:52:32 -08001302 lpfc_ncmd->start_time = jiffies;
1303
1304 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
1305 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1306 if (ret) {
1307 ret = -ENOMEM;
1308 goto out_free_nvme_buf;
1309 }
1310
1311 atomic_inc(&ndlp->cmd_pending);
1312
1313 /*
1314 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1315 * This identfier was create in our hardware queue create callback
1316 * routine. The driver now is dependent on the IO queue steering from
1317 * the transport. We are trusting the upper NVME layers know which
1318 * index to use and that they have affinitized a CPU to this hardware
1319 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1320 */
1321 lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
1322
James Smartbd2cdd52017-02-12 13:52:33 -08001323 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1324 lpfc_ncmd->cur_iocbq.sli4_xritag,
1325 lpfc_queue_info->index, ndlp->nlp_DID);
1326
James Smart01649562017-02-12 13:52:32 -08001327 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1328 if (ret) {
1329 atomic_dec(&ndlp->cmd_pending);
1330 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1331 "6113 FCP could not issue WQE err %x "
1332 "sid: x%x did: x%x oxid: x%x\n",
1333 ret, vport->fc_myDID, ndlp->nlp_DID,
1334 lpfc_ncmd->cur_iocbq.sli4_xritag);
James Smarta5068b42017-03-04 09:30:28 -08001335 ret = -EBUSY;
James Smart01649562017-02-12 13:52:32 -08001336 goto out_free_nvme_buf;
1337 }
1338
James Smartbd2cdd52017-02-12 13:52:33 -08001339#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1340 if (phba->ktime_on)
1341 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1342
1343 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1344 lpfc_ncmd->cpu = smp_processor_id();
1345 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1346 /* Check for admin queue */
1347 if (lpfc_queue_info->qidx) {
1348 lpfc_printf_vlog(vport,
1349 KERN_ERR, LOG_NVME_IOERR,
1350 "6702 CPU Check cmd: "
1351 "cpu %d wq %d\n",
1352 lpfc_ncmd->cpu,
1353 lpfc_queue_info->index);
1354 }
1355 lpfc_ncmd->cpu = lpfc_queue_info->index;
1356 }
1357 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1358 phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1359 }
1360#endif
James Smart01649562017-02-12 13:52:32 -08001361 return 0;
1362
1363 out_free_nvme_buf:
James Smart2cee7802017-06-01 21:07:02 -07001364 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1365 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1366 atomic_dec(&phba->fc4NvmeOutputRequests);
1367 else
1368 atomic_dec(&phba->fc4NvmeInputRequests);
1369 } else
1370 atomic_dec(&phba->fc4NvmeControlRequests);
James Smart01649562017-02-12 13:52:32 -08001371 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1372 out_fail:
1373 return ret;
1374}
1375
1376/**
1377 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1378 * @phba: Pointer to HBA context object
1379 * @cmdiocb: Pointer to command iocb object.
1380 * @rspiocb: Pointer to response iocb object.
1381 *
1382 * This is the callback function for any NVME FCP IO that was aborted.
1383 *
1384 * Return value:
1385 * None
1386 **/
1387void
1388lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1389 struct lpfc_wcqe_complete *abts_cmpl)
1390{
1391 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1392 "6145 ABORT_XRI_CN completing on rpi x%x "
1393 "original iotag x%x, abort cmd iotag x%x "
1394 "req_tag x%x, status x%x, hwstatus x%x\n",
1395 cmdiocb->iocb.un.acxri.abortContextTag,
1396 cmdiocb->iocb.un.acxri.abortIoTag,
1397 cmdiocb->iotag,
1398 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1399 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1400 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1401 lpfc_sli_release_iocbq(phba, cmdiocb);
1402}
1403
1404/**
1405 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1406 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1407 * @lpfc_nvme_lport: Pointer to the driver's local port data
1408 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1409 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1410 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1411 *
1412 * Driver registers this routine as its nvme request io abort handler. This
1413 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1414 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1415 * is executed asynchronously - one the target is validated as "MAPPED" and
1416 * ready for IO, the driver issues the abort request and returns.
1417 *
1418 * Return value:
1419 * None
1420 **/
1421static void
1422lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1423 struct nvme_fc_remote_port *pnvme_rport,
1424 void *hw_queue_handle,
1425 struct nvmefc_fcp_req *pnvme_fcreq)
1426{
1427 struct lpfc_nvme_lport *lport;
1428 struct lpfc_vport *vport;
1429 struct lpfc_hba *phba;
James Smart01649562017-02-12 13:52:32 -08001430 struct lpfc_nvme_rport *rport;
1431 struct lpfc_nvme_buf *lpfc_nbuf;
1432 struct lpfc_iocbq *abts_buf;
1433 struct lpfc_iocbq *nvmereq_wqe;
James Smartbbe30122017-04-21 17:49:08 -07001434 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
James Smart01649562017-02-12 13:52:32 -08001435 union lpfc_wqe *abts_wqe;
1436 unsigned long flags;
1437 int ret_val;
1438
1439 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1440 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1441 vport = lport->vport;
1442 phba = vport->phba;
1443
1444 /* Announce entry to new IO submit field. */
James Smart86c67372017-04-21 16:05:04 -07001445 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001446 "6002 Abort Request to rport DID x%06x "
1447 "for nvme_fc_req %p\n",
1448 pnvme_rport->port_id,
1449 pnvme_fcreq);
1450
James Smart01649562017-02-12 13:52:32 -08001451 /* If the hba is getting reset, this flag is set. It is
1452 * cleared when the reset is complete and rings reestablished.
1453 */
1454 spin_lock_irqsave(&phba->hbalock, flags);
1455 /* driver queued commands are in process of being flushed */
1456 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001458 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001459 "6139 Driver in reset cleanup - flushing "
1460 "NVME Req now. hba_flag x%x\n",
1461 phba->hba_flag);
1462 return;
1463 }
1464
James Smartbbe30122017-04-21 17:49:08 -07001465 lpfc_nbuf = freqpriv->nvme_buf;
James Smart01649562017-02-12 13:52:32 -08001466 if (!lpfc_nbuf) {
1467 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001468 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001469 "6140 NVME IO req has no matching lpfc nvme "
1470 "io buffer. Skipping abort req.\n");
1471 return;
1472 } else if (!lpfc_nbuf->nvmeCmd) {
1473 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001474 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001475 "6141 lpfc NVME IO req has no nvme_fcreq "
1476 "io buffer. Skipping abort req.\n");
1477 return;
1478 }
James Smart2b7824d2017-04-21 16:04:59 -07001479 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
James Smart01649562017-02-12 13:52:32 -08001480
1481 /*
1482 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1483 * state must match the nvme_fcreq passed by the nvme
1484 * transport. If they don't match, it is likely the driver
1485 * has already completed the NVME IO and the nvme transport
1486 * has not seen it yet.
1487 */
1488 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1489 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001490 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001491 "6143 NVME req mismatch: "
1492 "lpfc_nbuf %p nvmeCmd %p, "
James Smart2b7824d2017-04-21 16:04:59 -07001493 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
James Smart01649562017-02-12 13:52:32 -08001494 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
James Smart2b7824d2017-04-21 16:04:59 -07001495 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
James Smart01649562017-02-12 13:52:32 -08001496 return;
1497 }
1498
1499 /* Don't abort IOs no longer on the pending queue. */
James Smart01649562017-02-12 13:52:32 -08001500 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1501 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001502 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001503 "6142 NVME IO req %p not queued - skipping "
James Smart2b7824d2017-04-21 16:04:59 -07001504 "abort req xri x%x\n",
1505 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
James Smart01649562017-02-12 13:52:32 -08001506 return;
1507 }
1508
James Smartbd2cdd52017-02-12 13:52:33 -08001509 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1510 nvmereq_wqe->sli4_xritag,
James Smart00cefeb2017-06-21 10:37:58 -07001511 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
James Smartbd2cdd52017-02-12 13:52:33 -08001512
James Smart01649562017-02-12 13:52:32 -08001513 /* Outstanding abort is in progress */
1514 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1515 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001516 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001517 "6144 Outstanding NVME I/O Abort Request "
1518 "still pending on nvme_fcreq %p, "
James Smart2b7824d2017-04-21 16:04:59 -07001519 "lpfc_ncmd %p xri x%x\n",
1520 pnvme_fcreq, lpfc_nbuf,
1521 nvmereq_wqe->sli4_xritag);
James Smart01649562017-02-12 13:52:32 -08001522 return;
1523 }
1524
1525 abts_buf = __lpfc_sli_get_iocbq(phba);
1526 if (!abts_buf) {
1527 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smart86c67372017-04-21 16:05:04 -07001528 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001529 "6136 No available abort wqes. Skipping "
James Smart2b7824d2017-04-21 16:04:59 -07001530 "Abts req for nvme_fcreq %p xri x%x\n",
1531 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
James Smart01649562017-02-12 13:52:32 -08001532 return;
1533 }
1534
1535 /* Ready - mark outstanding as aborted by driver. */
1536 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1537
1538 /* Complete prepping the abort wqe and issue to the FW. */
1539 abts_wqe = &abts_buf->wqe;
1540
1541 /* WQEs are reused. Clear stale data and set key fields to
1542 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1543 */
1544 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1545 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1546
1547 /* word 7 */
1548 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1549 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1550 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1551 nvmereq_wqe->iocb.ulpClass);
1552
1553 /* word 8 - tell the FW to abort the IO associated with this
1554 * outstanding exchange ID.
1555 */
1556 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1557
1558 /* word 9 - this is the iotag for the abts_wqe completion. */
1559 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1560 abts_buf->iotag);
1561
1562 /* word 10 */
1563 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
1564 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1565 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1566
1567 /* word 11 */
1568 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1569 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1570 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1571
1572 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1573 abts_buf->iocb_flag |= LPFC_IO_NVME;
1574 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1575 abts_buf->vport = vport;
1576 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1577 ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1578 spin_unlock_irqrestore(&phba->hbalock, flags);
1579 if (ret_val == IOCB_ERROR) {
James Smart86c67372017-04-21 16:05:04 -07001580 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
James Smart01649562017-02-12 13:52:32 -08001581 "6137 Failed abts issue_wqe with status x%x "
1582 "for nvme_fcreq %p.\n",
1583 ret_val, pnvme_fcreq);
1584 lpfc_sli_release_iocbq(phba, abts_buf);
1585 return;
1586 }
1587
James Smart86c67372017-04-21 16:05:04 -07001588 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
James Smart2b7824d2017-04-21 16:04:59 -07001589 "6138 Transport Abort NVME Request Issued for "
James Smart01649562017-02-12 13:52:32 -08001590 "ox_id x%x on reqtag x%x\n",
1591 nvmereq_wqe->sli4_xritag,
1592 abts_buf->iotag);
1593}
1594
1595/* Declare and initialization an instance of the FC NVME template. */
1596static struct nvme_fc_port_template lpfc_nvme_template = {
1597 /* initiator-based functions */
1598 .localport_delete = lpfc_nvme_localport_delete,
1599 .remoteport_delete = lpfc_nvme_remoteport_delete,
1600 .create_queue = lpfc_nvme_create_queue,
1601 .delete_queue = lpfc_nvme_delete_queue,
1602 .ls_req = lpfc_nvme_ls_req,
1603 .fcp_io = lpfc_nvme_fcp_io_submit,
1604 .ls_abort = lpfc_nvme_ls_abort,
1605 .fcp_abort = lpfc_nvme_fcp_abort,
1606
1607 .max_hw_queues = 1,
1608 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1609 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1610 .dma_boundary = 0xFFFFFFFF,
1611
1612 /* Sizes of additional private data for data structures.
1613 * No use for the last two sizes at this time.
1614 */
1615 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1616 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1617 .lsrqst_priv_sz = 0,
James Smartbbe30122017-04-21 17:49:08 -07001618 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
James Smart01649562017-02-12 13:52:32 -08001619};
1620
1621/**
1622 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1623 * @phba: pointer to lpfc hba data structure.
1624 * @nblist: pointer to nvme buffer list.
1625 * @count: number of scsi buffers on the list.
1626 *
1627 * This routine is invoked to post a block of @count scsi sgl pages from a
1628 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1629 * No Lock is held.
1630 *
1631 **/
1632static int
1633lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1634 struct list_head *nblist,
1635 int count)
1636{
1637 struct lpfc_nvme_buf *lpfc_ncmd;
1638 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1639 struct sgl_page_pairs *sgl_pg_pairs;
1640 void *viraddr;
1641 LPFC_MBOXQ_t *mbox;
1642 uint32_t reqlen, alloclen, pg_pairs;
1643 uint32_t mbox_tmo;
1644 uint16_t xritag_start = 0;
1645 int rc = 0;
1646 uint32_t shdr_status, shdr_add_status;
1647 dma_addr_t pdma_phys_bpl1;
1648 union lpfc_sli4_cfg_shdr *shdr;
1649
1650 /* Calculate the requested length of the dma memory */
1651 reqlen = count * sizeof(struct sgl_page_pairs) +
1652 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1653 if (reqlen > SLI4_PAGE_SIZE) {
1654 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1655 "6118 Block sgl registration required DMA "
1656 "size (%d) great than a page\n", reqlen);
1657 return -ENOMEM;
1658 }
1659 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1660 if (!mbox) {
1661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1662 "6119 Failed to allocate mbox cmd memory\n");
1663 return -ENOMEM;
1664 }
1665
1666 /* Allocate DMA memory and set up the non-embedded mailbox command */
1667 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1668 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1669 LPFC_SLI4_MBX_NEMBED);
1670
1671 if (alloclen < reqlen) {
1672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1673 "6120 Allocated DMA memory size (%d) is "
1674 "less than the requested DMA memory "
1675 "size (%d)\n", alloclen, reqlen);
1676 lpfc_sli4_mbox_cmd_free(phba, mbox);
1677 return -ENOMEM;
1678 }
1679
1680 /* Get the first SGE entry from the non-embedded DMA memory */
1681 viraddr = mbox->sge_array->addr[0];
1682
1683 /* Set up the SGL pages in the non-embedded DMA pages */
1684 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1685 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1686
1687 pg_pairs = 0;
1688 list_for_each_entry(lpfc_ncmd, nblist, list) {
1689 /* Set up the sge entry */
1690 sgl_pg_pairs->sgl_pg0_addr_lo =
1691 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1692 sgl_pg_pairs->sgl_pg0_addr_hi =
1693 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1694 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1695 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
1696 SGL_PAGE_SIZE;
1697 else
1698 pdma_phys_bpl1 = 0;
1699 sgl_pg_pairs->sgl_pg1_addr_lo =
1700 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
1701 sgl_pg_pairs->sgl_pg1_addr_hi =
1702 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1703 /* Keep the first xritag on the list */
1704 if (pg_pairs == 0)
1705 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
1706 sgl_pg_pairs++;
1707 pg_pairs++;
1708 }
1709 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
1710 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
1711 /* Perform endian conversion if necessary */
1712 sgl->word0 = cpu_to_le32(sgl->word0);
1713
1714 if (!phba->sli4_hba.intr_enable)
1715 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1716 else {
1717 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
1718 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1719 }
1720 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
1721 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1722 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1723 if (rc != MBX_TIMEOUT)
1724 lpfc_sli4_mbox_cmd_free(phba, mbox);
1725 if (shdr_status || shdr_add_status || rc) {
1726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1727 "6125 POST_SGL_BLOCK mailbox command failed "
1728 "status x%x add_status x%x mbx status x%x\n",
1729 shdr_status, shdr_add_status, rc);
1730 rc = -ENXIO;
1731 }
1732 return rc;
1733}
1734
1735/**
1736 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
1737 * @phba: pointer to lpfc hba data structure.
1738 * @post_nblist: pointer to the nvme buffer list.
1739 *
1740 * This routine walks a list of nvme buffers that was passed in. It attempts
1741 * to construct blocks of nvme buffer sgls which contains contiguous xris and
1742 * uses the non-embedded SGL block post mailbox commands to post to the port.
1743 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
1744 * embedded SGL post mailbox command for posting. The @post_nblist passed in
1745 * must be local list, thus no lock is needed when manipulate the list.
1746 *
1747 * Returns: 0 = failure, non-zero number of successfully posted buffers.
1748 **/
1749static int
1750lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1751 struct list_head *post_nblist, int sb_count)
1752{
1753 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
1754 int status, sgl_size;
1755 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
1756 dma_addr_t pdma_phys_sgl1;
1757 int last_xritag = NO_XRI;
1758 int cur_xritag;
1759 LIST_HEAD(prep_nblist);
1760 LIST_HEAD(blck_nblist);
1761 LIST_HEAD(nvme_nblist);
1762
1763 /* sanity check */
1764 if (sb_count <= 0)
1765 return -EINVAL;
1766
1767 sgl_size = phba->cfg_sg_dma_buf_size;
1768
1769 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
1770 list_del_init(&lpfc_ncmd->list);
1771 block_cnt++;
1772 if ((last_xritag != NO_XRI) &&
1773 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
1774 /* a hole in xri block, form a sgl posting block */
1775 list_splice_init(&prep_nblist, &blck_nblist);
1776 post_cnt = block_cnt - 1;
1777 /* prepare list for next posting block */
1778 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1779 block_cnt = 1;
1780 } else {
1781 /* prepare list for next posting block */
1782 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1783 /* enough sgls for non-embed sgl mbox command */
1784 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
1785 list_splice_init(&prep_nblist, &blck_nblist);
1786 post_cnt = block_cnt;
1787 block_cnt = 0;
1788 }
1789 }
1790 num_posting++;
1791 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1792
1793 /* end of repost sgl list condition for NVME buffers */
1794 if (num_posting == sb_count) {
1795 if (post_cnt == 0) {
1796 /* last sgl posting block */
1797 list_splice_init(&prep_nblist, &blck_nblist);
1798 post_cnt = block_cnt;
1799 } else if (block_cnt == 1) {
1800 /* last single sgl with non-contiguous xri */
1801 if (sgl_size > SGL_PAGE_SIZE)
1802 pdma_phys_sgl1 =
1803 lpfc_ncmd->dma_phys_sgl +
1804 SGL_PAGE_SIZE;
1805 else
1806 pdma_phys_sgl1 = 0;
1807 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1808 status = lpfc_sli4_post_sgl(phba,
1809 lpfc_ncmd->dma_phys_sgl,
1810 pdma_phys_sgl1, cur_xritag);
1811 if (status) {
1812 /* failure, put on abort nvme list */
James Smart318083a2017-03-04 09:30:30 -08001813 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
James Smart01649562017-02-12 13:52:32 -08001814 } else {
1815 /* success, put on NVME buffer list */
James Smart318083a2017-03-04 09:30:30 -08001816 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
James Smart01649562017-02-12 13:52:32 -08001817 lpfc_ncmd->status = IOSTAT_SUCCESS;
1818 num_posted++;
1819 }
1820 /* success, put on NVME buffer sgl list */
1821 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1822 }
1823 }
1824
1825 /* continue until a nembed page worth of sgls */
1826 if (post_cnt == 0)
1827 continue;
1828
1829 /* post block of NVME buffer list sgls */
1830 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
1831 post_cnt);
1832
1833 /* don't reset xirtag due to hole in xri block */
1834 if (block_cnt == 0)
1835 last_xritag = NO_XRI;
1836
1837 /* reset NVME buffer post count for next round of posting */
1838 post_cnt = 0;
1839
1840 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
1841 while (!list_empty(&blck_nblist)) {
1842 list_remove_head(&blck_nblist, lpfc_ncmd,
1843 struct lpfc_nvme_buf, list);
1844 if (status) {
1845 /* failure, put on abort nvme list */
James Smart318083a2017-03-04 09:30:30 -08001846 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
James Smart01649562017-02-12 13:52:32 -08001847 } else {
1848 /* success, put on NVME buffer list */
James Smart318083a2017-03-04 09:30:30 -08001849 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
James Smart01649562017-02-12 13:52:32 -08001850 lpfc_ncmd->status = IOSTAT_SUCCESS;
1851 num_posted++;
1852 }
1853 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1854 }
1855 }
1856 /* Push NVME buffers with sgl posted to the available list */
1857 while (!list_empty(&nvme_nblist)) {
1858 list_remove_head(&nvme_nblist, lpfc_ncmd,
1859 struct lpfc_nvme_buf, list);
1860 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1861 }
1862 return num_posted;
1863}
1864
1865/**
1866 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
1867 * @phba: pointer to lpfc hba data structure.
1868 *
1869 * This routine walks the list of nvme buffers that have been allocated and
1870 * repost them to the port by using SGL block post. This is needed after a
1871 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
1872 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
1873 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
1874 *
1875 * Returns: 0 = success, non-zero failure.
1876 **/
1877int
1878lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
1879{
1880 LIST_HEAD(post_nblist);
1881 int num_posted, rc = 0;
1882
1883 /* get all NVME buffers need to repost to a local list */
1884 spin_lock_irq(&phba->nvme_buf_list_get_lock);
1885 spin_lock(&phba->nvme_buf_list_put_lock);
1886 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
1887 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
1888 spin_unlock(&phba->nvme_buf_list_put_lock);
1889 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
1890
1891 /* post the list of nvme buffer sgls to port if available */
1892 if (!list_empty(&post_nblist)) {
1893 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
1894 phba->sli4_hba.nvme_xri_cnt);
1895 /* failed to post any nvme buffer, return error */
1896 if (num_posted == 0)
1897 rc = -EIO;
1898 }
1899 return rc;
1900}
1901
1902/**
1903 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
1904 * @vport: The virtual port for which this call being executed.
1905 * @num_to_allocate: The requested number of buffers to allocate.
1906 *
1907 * This routine allocates nvme buffers for device with SLI-4 interface spec,
1908 * the nvme buffer contains all the necessary information needed to initiate
1909 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
1910 * them on a list, it post them to the port by using SGL block post.
1911 *
1912 * Return codes:
1913 * int - number of nvme buffers that were allocated and posted.
1914 * 0 = failure, less than num_to_alloc is a partial failure.
1915 **/
1916static int
1917lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
1918{
1919 struct lpfc_hba *phba = vport->phba;
1920 struct lpfc_nvme_buf *lpfc_ncmd;
1921 struct lpfc_iocbq *pwqeq;
1922 union lpfc_wqe128 *wqe;
1923 struct sli4_sge *sgl;
1924 dma_addr_t pdma_phys_sgl;
1925 uint16_t iotag, lxri = 0;
1926 int bcnt, num_posted, sgl_size;
1927 LIST_HEAD(prep_nblist);
1928 LIST_HEAD(post_nblist);
1929 LIST_HEAD(nvme_nblist);
1930
1931 sgl_size = phba->cfg_sg_dma_buf_size;
1932
1933 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
1934 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
1935 if (!lpfc_ncmd)
1936 break;
1937 /*
1938 * Get memory from the pci pool to map the virt space to
1939 * pci bus space for an I/O. The DMA buffer includes the
1940 * number of SGE's necessary to support the sg_tablesize.
1941 */
1942 lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
1943 GFP_KERNEL,
1944 &lpfc_ncmd->dma_handle);
1945 if (!lpfc_ncmd->data) {
1946 kfree(lpfc_ncmd);
1947 break;
1948 }
1949 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
1950
1951 lxri = lpfc_sli4_next_xritag(phba);
1952 if (lxri == NO_XRI) {
1953 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1954 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1955 kfree(lpfc_ncmd);
1956 break;
1957 }
1958 pwqeq = &(lpfc_ncmd->cur_iocbq);
1959 wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
1960
1961 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
1962 iotag = lpfc_sli_next_iotag(phba, pwqeq);
1963 if (iotag == 0) {
1964 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1965 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1966 kfree(lpfc_ncmd);
1967 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1968 "6121 Failed to allocated IOTAG for"
1969 " XRI:0x%x\n", lxri);
1970 lpfc_sli4_free_xri(phba, lxri);
1971 break;
1972 }
1973 pwqeq->sli4_lxritag = lxri;
1974 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1975 pwqeq->iocb_flag |= LPFC_IO_NVME;
1976 pwqeq->context1 = lpfc_ncmd;
1977 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1978
1979 /* Initialize local short-hand pointers. */
1980 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
1981 sgl = lpfc_ncmd->nvme_sgl;
1982 pdma_phys_sgl = lpfc_ncmd->dma_handle;
1983 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
1984
1985 /* Rsp SGE will be filled in when we rcv an IO
1986 * from the NVME Layer to be sent.
1987 * The cmd is going to be embedded so we need a SKIP SGE.
1988 */
1989 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1990 bf_set(lpfc_sli4_sge_last, sgl, 0);
1991 sgl->word2 = cpu_to_le32(sgl->word2);
1992 /* Fill in word 3 / sgl_len during cmd submission */
1993
1994 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
1995
1996 /* Word 7 */
1997 bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
1998 /* NVME upper layers will time things out, if needed */
1999 bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
2000
2001 /* Word 10 */
2002 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
2003 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
2004
2005 /* add the nvme buffer to a post list */
2006 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2007 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2008 phba->sli4_hba.nvme_xri_cnt++;
2009 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2010 }
2011 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2012 "6114 Allocate %d out of %d requested new NVME "
2013 "buffers\n", bcnt, num_to_alloc);
2014
2015 /* post the list of nvme buffer sgls to port if available */
2016 if (!list_empty(&post_nblist))
2017 num_posted = lpfc_post_nvme_sgl_list(phba,
2018 &post_nblist, bcnt);
2019 else
2020 num_posted = 0;
2021
2022 return num_posted;
2023}
2024
2025/**
2026 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2027 * @phba: The HBA for which this call is being executed.
2028 *
2029 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2030 * and returns to caller.
2031 *
2032 * Return codes:
2033 * NULL - Error
2034 * Pointer to lpfc_nvme_buf - Success
2035 **/
2036static struct lpfc_nvme_buf *
2037lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2038{
2039 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2040 unsigned long iflag = 0;
2041 int found = 0;
2042
2043 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2044 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2045 &phba->lpfc_nvme_buf_list_get, list) {
2046 if (lpfc_test_rrq_active(phba, ndlp,
2047 lpfc_ncmd->cur_iocbq.sli4_lxritag))
2048 continue;
James Smartbbe30122017-04-21 17:49:08 -07002049 list_del_init(&lpfc_ncmd->list);
James Smart01649562017-02-12 13:52:32 -08002050 found = 1;
2051 break;
2052 }
2053 if (!found) {
2054 spin_lock(&phba->nvme_buf_list_put_lock);
2055 list_splice(&phba->lpfc_nvme_buf_list_put,
2056 &phba->lpfc_nvme_buf_list_get);
2057 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2058 spin_unlock(&phba->nvme_buf_list_put_lock);
2059 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2060 &phba->lpfc_nvme_buf_list_get, list) {
2061 if (lpfc_test_rrq_active(
2062 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
2063 continue;
James Smartbbe30122017-04-21 17:49:08 -07002064 list_del_init(&lpfc_ncmd->list);
James Smart01649562017-02-12 13:52:32 -08002065 found = 1;
2066 break;
2067 }
2068 }
2069 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2070 if (!found)
2071 return NULL;
2072 return lpfc_ncmd;
2073}
2074
2075/**
2076 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2077 * @phba: The Hba for which this call is being executed.
2078 * @lpfc_ncmd: The nvme buffer which is being released.
2079 *
2080 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2081 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2082 * and cannot be reused for at least RA_TOV amount of time if it was
2083 * aborted.
2084 **/
2085static void
2086lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2087{
2088 unsigned long iflag = 0;
2089
2090 lpfc_ncmd->nonsg_phys = 0;
James Smart318083a2017-03-04 09:30:30 -08002091 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
James Smart86c67372017-04-21 16:05:04 -07002092 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2093 "6310 XB release deferred for "
2094 "ox_id x%x on reqtag x%x\n",
2095 lpfc_ncmd->cur_iocbq.sli4_xritag,
2096 lpfc_ncmd->cur_iocbq.iotag);
2097
James Smart01649562017-02-12 13:52:32 -08002098 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2099 iflag);
2100 lpfc_ncmd->nvmeCmd = NULL;
2101 list_add_tail(&lpfc_ncmd->list,
2102 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2103 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2104 iflag);
2105 } else {
2106 lpfc_ncmd->nvmeCmd = NULL;
2107 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2108 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2109 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2110 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2111 }
2112}
2113
2114/**
2115 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2116 * @pvport - the lpfc_vport instance requesting a localport.
2117 *
2118 * This routine is invoked to create an nvme localport instance to bind
2119 * to the nvme_fc_transport. It is called once during driver load
2120 * like lpfc_create_shost after all other services are initialized.
2121 * It requires a vport, vpi, and wwns at call time. Other localport
2122 * parameters are modified as the driver's FCID and the Fabric WWN
2123 * are established.
2124 *
2125 * Return codes
2126 * 0 - successful
2127 * -ENOMEM - no heap memory available
2128 * other values - from nvme registration upcall
2129 **/
2130int
2131lpfc_nvme_create_localport(struct lpfc_vport *vport)
2132{
James Smart166d7212017-03-04 09:30:33 -08002133 int ret = 0;
James Smart01649562017-02-12 13:52:32 -08002134 struct lpfc_hba *phba = vport->phba;
2135 struct nvme_fc_port_info nfcp_info;
2136 struct nvme_fc_local_port *localport;
2137 struct lpfc_nvme_lport *lport;
James Smart166d7212017-03-04 09:30:33 -08002138 int len;
James Smart01649562017-02-12 13:52:32 -08002139
2140 /* Initialize this localport instance. The vport wwn usage ensures
2141 * that NPIV is accounted for.
2142 */
2143 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2144 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2145 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2146 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2147
James Smart4d4c4a42017-04-21 16:05:01 -07002148 /* Limit to LPFC_MAX_NVME_SEG_CNT.
2149 * For now need + 1 to get around NVME transport logic.
2150 */
2151 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
2152 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
2153 "6300 Reducing sg segment cnt to %d\n",
2154 LPFC_MAX_NVME_SEG_CNT);
2155 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
2156 } else {
2157 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
2158 }
2159 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
James Smart01649562017-02-12 13:52:32 -08002160 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2161
2162 /* localport is allocated from the stack, but the registration
2163 * call allocates heap memory as well as the private area.
2164 */
James Smart7d708032017-03-08 14:36:01 -08002165#if (IS_ENABLED(CONFIG_NVME_FC))
James Smart01649562017-02-12 13:52:32 -08002166 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2167 &vport->phba->pcidev->dev, &localport);
James Smart166d7212017-03-04 09:30:33 -08002168#else
2169 ret = -ENOMEM;
2170#endif
James Smart01649562017-02-12 13:52:32 -08002171 if (!ret) {
2172 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2173 "6005 Successfully registered local "
2174 "NVME port num %d, localP %p, private %p, "
2175 "sg_seg %d\n",
2176 localport->port_num, localport,
2177 localport->private,
2178 lpfc_nvme_template.max_sgl_segments);
2179
2180 /* Private is our lport size declared in the template. */
2181 lport = (struct lpfc_nvme_lport *)localport->private;
2182 vport->localport = localport;
2183 lport->vport = vport;
James Smart01649562017-02-12 13:52:32 -08002184 vport->nvmei_support = 1;
James Smart3ebd9b42017-03-04 09:30:29 -08002185 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2186 vport->phba->total_nvme_bufs += len;
James Smart01649562017-02-12 13:52:32 -08002187 }
2188
James Smart01649562017-02-12 13:52:32 -08002189 return ret;
2190}
2191
2192/**
2193 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2194 * @pnvme: pointer to lpfc nvme data structure.
2195 *
2196 * This routine is invoked to destroy all lports bound to the phba.
2197 * The lport memory was allocated by the nvme fc transport and is
2198 * released there. This routine ensures all rports bound to the
2199 * lport have been disconnected.
2200 *
2201 **/
2202void
2203lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2204{
James Smart7d708032017-03-08 14:36:01 -08002205#if (IS_ENABLED(CONFIG_NVME_FC))
James Smart01649562017-02-12 13:52:32 -08002206 struct nvme_fc_local_port *localport;
2207 struct lpfc_nvme_lport *lport;
James Smart01649562017-02-12 13:52:32 -08002208 int ret;
2209
2210 if (vport->nvmei_support == 0)
2211 return;
2212
2213 localport = vport->localport;
2214 vport->localport = NULL;
2215 lport = (struct lpfc_nvme_lport *)localport->private;
2216
2217 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2218 "6011 Destroying NVME localport %p\n",
2219 localport);
James Smart166d7212017-03-04 09:30:33 -08002220
James Smart01649562017-02-12 13:52:32 -08002221 /* lport's rport list is clear. Unregister
2222 * lport and release resources.
2223 */
2224 init_completion(&lport->lport_unreg_done);
2225 ret = nvme_fc_unregister_localport(localport);
2226 wait_for_completion_timeout(&lport->lport_unreg_done, 5);
2227
2228 /* Regardless of the unregister upcall response, clear
2229 * nvmei_support. All rports are unregistered and the
2230 * driver will clean up.
2231 */
2232 vport->nvmei_support = 0;
2233 if (ret == 0) {
2234 lpfc_printf_vlog(vport,
2235 KERN_INFO, LOG_NVME_DISC,
2236 "6009 Unregistered lport Success\n");
2237 } else {
2238 lpfc_printf_vlog(vport,
2239 KERN_INFO, LOG_NVME_DISC,
2240 "6010 Unregistered lport "
2241 "Failed, status x%x\n",
2242 ret);
2243 }
James Smart166d7212017-03-04 09:30:33 -08002244#endif
James Smart01649562017-02-12 13:52:32 -08002245}
2246
2247void
2248lpfc_nvme_update_localport(struct lpfc_vport *vport)
2249{
James Smart4410a672017-04-21 16:04:57 -07002250#if (IS_ENABLED(CONFIG_NVME_FC))
James Smart01649562017-02-12 13:52:32 -08002251 struct nvme_fc_local_port *localport;
2252 struct lpfc_nvme_lport *lport;
2253
2254 localport = vport->localport;
James Smart4410a672017-04-21 16:04:57 -07002255 if (!localport) {
2256 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2257 "6710 Update NVME fail. No localport\n");
2258 return;
2259 }
James Smart01649562017-02-12 13:52:32 -08002260 lport = (struct lpfc_nvme_lport *)localport->private;
James Smart4410a672017-04-21 16:04:57 -07002261 if (!lport) {
2262 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2263 "6171 Update NVME fail. localP %p, No lport\n",
2264 localport);
2265 return;
2266 }
James Smart01649562017-02-12 13:52:32 -08002267 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2268 "6012 Update NVME lport %p did x%x\n",
2269 localport, vport->fc_myDID);
2270
2271 localport->port_id = vport->fc_myDID;
2272 if (localport->port_id == 0)
2273 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2274 else
2275 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2276
2277 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2278 "6030 bound lport %p to DID x%06x\n",
2279 lport, localport->port_id);
James Smart4410a672017-04-21 16:04:57 -07002280#endif
James Smart01649562017-02-12 13:52:32 -08002281}
2282
2283int
2284lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2285{
James Smart7d708032017-03-08 14:36:01 -08002286#if (IS_ENABLED(CONFIG_NVME_FC))
James Smart01649562017-02-12 13:52:32 -08002287 int ret = 0;
2288 struct nvme_fc_local_port *localport;
2289 struct lpfc_nvme_lport *lport;
2290 struct lpfc_nvme_rport *rport;
2291 struct nvme_fc_remote_port *remote_port;
2292 struct nvme_fc_port_info rpinfo;
2293
2294 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2295 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2296 ndlp->nlp_DID, ndlp->nlp_type);
2297
2298 localport = vport->localport;
2299 lport = (struct lpfc_nvme_lport *)localport->private;
2300
James Smart7a06dcd2017-06-01 21:06:55 -07002301 /* NVME rports are not preserved across devloss.
2302 * Just register this instance. Note, rpinfo->dev_loss_tmo
2303 * is left 0 to indicate accept transport defaults. The
2304 * driver communicates port role capabilities consistent
2305 * with the PRLI response data.
2306 */
2307 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2308 rpinfo.port_id = ndlp->nlp_DID;
2309 if (ndlp->nlp_type & NLP_NVME_TARGET)
2310 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2311 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2312 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
James Smart01649562017-02-12 13:52:32 -08002313
James Smart7a06dcd2017-06-01 21:06:55 -07002314 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2315 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2316
2317 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2318 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2319 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2320 if (!ret) {
2321 /* If the ndlp already has an nrport, this is just
2322 * a resume of the existing rport. Else this is a
2323 * new rport.
James Smart01649562017-02-12 13:52:32 -08002324 */
James Smart7a06dcd2017-06-01 21:06:55 -07002325 rport = remote_port->private;
2326 if (ndlp->nrport == rport) {
2327 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2328 LOG_NVME_DISC,
2329 "6014 Rebinding lport to "
2330 "rport wwpn 0x%llx, "
2331 "Data: x%x x%x x%x x%06x\n",
2332 remote_port->port_name,
2333 remote_port->port_id,
2334 remote_port->port_role,
James Smart01649562017-02-12 13:52:32 -08002335 ndlp->nlp_type,
2336 ndlp->nlp_DID);
James Smart7a06dcd2017-06-01 21:06:55 -07002337 } else {
2338 /* New rport. */
James Smart01649562017-02-12 13:52:32 -08002339 rport->remoteport = remote_port;
2340 rport->lport = lport;
2341 rport->ndlp = lpfc_nlp_get(ndlp);
2342 if (!rport->ndlp)
2343 return -1;
2344 ndlp->nrport = rport;
James Smart01649562017-02-12 13:52:32 -08002345 lpfc_printf_vlog(vport, KERN_INFO,
2346 LOG_NVME_DISC | LOG_NODE,
James Smart7a06dcd2017-06-01 21:06:55 -07002347 "6022 Binding new rport to "
2348 "lport %p Rport WWNN 0x%llx, "
2349 "Rport WWPN 0x%llx DID "
2350 "x%06x Role x%x\n",
James Smart01649562017-02-12 13:52:32 -08002351 lport,
2352 rpinfo.node_name, rpinfo.port_name,
2353 rpinfo.port_id, rpinfo.port_role);
James Smart01649562017-02-12 13:52:32 -08002354 }
2355 } else {
James Smart7a06dcd2017-06-01 21:06:55 -07002356 lpfc_printf_vlog(vport, KERN_ERR,
2357 LOG_NVME_DISC | LOG_NODE,
2358 "6031 RemotePort Registration failed "
2359 "err: %d, DID x%06x\n",
2360 ret, ndlp->nlp_DID);
James Smart01649562017-02-12 13:52:32 -08002361 }
James Smart7a06dcd2017-06-01 21:06:55 -07002362
James Smart01649562017-02-12 13:52:32 -08002363 return ret;
James Smart166d7212017-03-04 09:30:33 -08002364#else
2365 return 0;
2366#endif
James Smart01649562017-02-12 13:52:32 -08002367}
2368
2369/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2370 *
2371 * There is no notion of Devloss or rport recovery from the current
2372 * nvme_transport perspective. Loss of an rport just means IO cannot
2373 * be sent and recovery is completely up to the initator.
2374 * For now, the driver just unbinds the DID and port_role so that
2375 * no further IO can be issued. Changes are planned for later.
2376 *
2377 * Notes - the ndlp reference count is not decremented here since
2378 * since there is no nvme_transport api for devloss. Node ref count
2379 * is only adjusted in driver unload.
2380 */
2381void
2382lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2383{
James Smart7d708032017-03-08 14:36:01 -08002384#if (IS_ENABLED(CONFIG_NVME_FC))
James Smart01649562017-02-12 13:52:32 -08002385 int ret;
2386 struct nvme_fc_local_port *localport;
2387 struct lpfc_nvme_lport *lport;
2388 struct lpfc_nvme_rport *rport;
2389 struct nvme_fc_remote_port *remoteport;
2390
2391 localport = vport->localport;
2392
2393 /* This is fundamental error. The localport is always
2394 * available until driver unload. Just exit.
2395 */
2396 if (!localport)
2397 return;
2398
2399 lport = (struct lpfc_nvme_lport *)localport->private;
2400 if (!lport)
2401 goto input_err;
2402
2403 rport = ndlp->nrport;
2404 if (!rport)
2405 goto input_err;
2406
2407 remoteport = rport->remoteport;
2408 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2409 "6033 Unreg nvme remoteport %p, portname x%llx, "
2410 "port_id x%06x, portstate x%x port type x%x\n",
2411 remoteport, remoteport->port_name,
2412 remoteport->port_id, remoteport->port_state,
2413 ndlp->nlp_type);
2414
2415 /* Sanity check ndlp type. Only call for NVME ports. Don't
2416 * clear any rport state until the transport calls back.
2417 */
2418 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2419 init_completion(&rport->rport_unreg_done);
James Smart7a06dcd2017-06-01 21:06:55 -07002420
2421 /* No concern about the role change on the nvme remoteport.
2422 * The transport will update it.
2423 */
James Smart01649562017-02-12 13:52:32 -08002424 ret = nvme_fc_unregister_remoteport(remoteport);
2425 if (ret != 0) {
2426 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2427 "6167 NVME unregister failed %d "
2428 "port_state x%x\n",
2429 ret, remoteport->port_state);
2430 }
2431
James Smart01649562017-02-12 13:52:32 -08002432 }
2433 return;
2434
2435 input_err:
James Smart166d7212017-03-04 09:30:33 -08002436#endif
James Smart01649562017-02-12 13:52:32 -08002437 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
James Smart2b7824d2017-04-21 16:04:59 -07002438 "6168 State error: lport %p, rport%p FCID x%06x\n",
James Smart01649562017-02-12 13:52:32 -08002439 vport->localport, ndlp->rport, ndlp->nlp_DID);
2440}
James Smart318083a2017-03-04 09:30:30 -08002441
2442/**
2443 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2444 * @phba: pointer to lpfc hba data structure.
2445 * @axri: pointer to the fcp xri abort wcqe structure.
2446 *
2447 * This routine is invoked by the worker thread to process a SLI4 fast-path
2448 * FCP aborted xri.
2449 **/
2450void
2451lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2452 struct sli4_wcqe_xri_aborted *axri)
2453{
2454 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2455 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2456 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2457 struct lpfc_nodelist *ndlp;
2458 unsigned long iflag = 0;
2459 int rrq_empty = 0;
2460
2461 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2462 return;
2463 spin_lock_irqsave(&phba->hbalock, iflag);
2464 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2465 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2466 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2467 list) {
2468 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
James Smartbbe30122017-04-21 17:49:08 -07002469 list_del_init(&lpfc_ncmd->list);
James Smart318083a2017-03-04 09:30:30 -08002470 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2471 lpfc_ncmd->status = IOSTAT_SUCCESS;
2472 spin_unlock(
2473 &phba->sli4_hba.abts_nvme_buf_list_lock);
2474
2475 rrq_empty = list_empty(&phba->active_rrq_list);
2476 spin_unlock_irqrestore(&phba->hbalock, iflag);
2477 ndlp = lpfc_ncmd->ndlp;
2478 if (ndlp) {
2479 lpfc_set_rrq_active(
2480 phba, ndlp,
2481 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2482 rxid, 1);
2483 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2484 }
James Smart86c67372017-04-21 16:05:04 -07002485
2486 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2487 "6311 XRI Aborted xri x%x tag x%x "
2488 "released\n",
2489 xri, lpfc_ncmd->cur_iocbq.iotag);
2490
James Smart318083a2017-03-04 09:30:30 -08002491 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2492 if (rrq_empty)
2493 lpfc_worker_wake_up(phba);
2494 return;
2495 }
2496 }
2497 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2498 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart86c67372017-04-21 16:05:04 -07002499
2500 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2501 "6312 XRI Aborted xri x%x not found\n", xri);
2502
James Smart318083a2017-03-04 09:30:30 -08002503}