blob: a0c5e5420c41b547d593d2792fcd2d03ec9c8420 [file] [log] [blame]
James Smarte3994412016-12-02 00:28:42 -08001/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22
23#include "nvme.h"
24#include "fabrics.h"
25#include <linux/nvme-fc-driver.h>
26#include <linux/nvme-fc.h>
27
28
29/* *************************** Data Structures/Defines ****************** */
30
31
32/*
33 * We handle AEN commands ourselves and don't even let the
34 * block layer know about them.
35 */
36#define NVME_FC_NR_AEN_COMMANDS 1
37#define NVME_FC_AQ_BLKMQ_DEPTH \
38 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
39#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
40
41enum nvme_fc_queue_flags {
42 NVME_FC_Q_CONNECTED = (1 << 0),
43};
44
45#define NVMEFC_QUEUE_DELAY 3 /* ms units */
46
47struct nvme_fc_queue {
48 struct nvme_fc_ctrl *ctrl;
49 struct device *dev;
50 struct blk_mq_hw_ctx *hctx;
51 void *lldd_handle;
52 int queue_size;
53 size_t cmnd_capsule_len;
54 u32 qnum;
55 u32 rqcnt;
56 u32 seqno;
57
58 u64 connection_id;
59 atomic_t csn;
60
61 unsigned long flags;
62} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
63
64struct nvmefc_ls_req_op {
65 struct nvmefc_ls_req ls_req;
66
James Smartc913a8b2017-04-11 11:35:08 -070067 struct nvme_fc_rport *rport;
James Smarte3994412016-12-02 00:28:42 -080068 struct nvme_fc_queue *queue;
69 struct request *rq;
70
71 int ls_error;
72 struct completion ls_done;
James Smartc913a8b2017-04-11 11:35:08 -070073 struct list_head lsreq_list; /* rport->ls_req_list */
James Smarte3994412016-12-02 00:28:42 -080074 bool req_queued;
75};
76
77enum nvme_fcpop_state {
78 FCPOP_STATE_UNINIT = 0,
79 FCPOP_STATE_IDLE = 1,
80 FCPOP_STATE_ACTIVE = 2,
81 FCPOP_STATE_ABORTED = 3,
82};
83
84struct nvme_fc_fcp_op {
85 struct nvme_request nreq; /*
86 * nvme/host/core.c
87 * requires this to be
88 * the 1st element in the
89 * private structure
90 * associated with the
91 * request.
92 */
93 struct nvmefc_fcp_req fcp_req;
94
95 struct nvme_fc_ctrl *ctrl;
96 struct nvme_fc_queue *queue;
97 struct request *rq;
98
99 atomic_t state;
100 u32 rqno;
101 u32 nents;
102
103 struct nvme_fc_cmd_iu cmd_iu;
104 struct nvme_fc_ersp_iu rsp_iu;
105};
106
107struct nvme_fc_lport {
108 struct nvme_fc_local_port localport;
109
110 struct ida endp_cnt;
111 struct list_head port_list; /* nvme_fc_port_list */
112 struct list_head endp_list;
113 struct device *dev; /* physical device for dma */
114 struct nvme_fc_port_template *ops;
115 struct kref ref;
116} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
117
118struct nvme_fc_rport {
119 struct nvme_fc_remote_port remoteport;
120
121 struct list_head endp_list; /* for lport->endp_list */
122 struct list_head ctrl_list;
James Smartc913a8b2017-04-11 11:35:08 -0700123 struct list_head ls_req_list;
124 struct device *dev; /* physical device for dma */
125 struct nvme_fc_lport *lport;
James Smarte3994412016-12-02 00:28:42 -0800126 spinlock_t lock;
127 struct kref ref;
128} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
129
130enum nvme_fcctrl_state {
131 FCCTRL_INIT = 0,
132 FCCTRL_ACTIVE = 1,
133};
134
135struct nvme_fc_ctrl {
136 spinlock_t lock;
137 struct nvme_fc_queue *queues;
138 u32 queue_count;
139
140 struct device *dev;
141 struct nvme_fc_lport *lport;
142 struct nvme_fc_rport *rport;
143 u32 cnum;
144
145 u64 association_id;
146
147 u64 cap;
148
149 struct list_head ctrl_list; /* rport->ctrl_list */
James Smarte3994412016-12-02 00:28:42 -0800150
151 struct blk_mq_tag_set admin_tag_set;
152 struct blk_mq_tag_set tag_set;
153
154 struct work_struct delete_work;
155 struct kref ref;
156 int state;
157
158 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
159
160 struct nvme_ctrl ctrl;
161};
162
163static inline struct nvme_fc_ctrl *
164to_fc_ctrl(struct nvme_ctrl *ctrl)
165{
166 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
167}
168
169static inline struct nvme_fc_lport *
170localport_to_lport(struct nvme_fc_local_port *portptr)
171{
172 return container_of(portptr, struct nvme_fc_lport, localport);
173}
174
175static inline struct nvme_fc_rport *
176remoteport_to_rport(struct nvme_fc_remote_port *portptr)
177{
178 return container_of(portptr, struct nvme_fc_rport, remoteport);
179}
180
181static inline struct nvmefc_ls_req_op *
182ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
183{
184 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
185}
186
187static inline struct nvme_fc_fcp_op *
188fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
189{
190 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
191}
192
193
194
195/* *************************** Globals **************************** */
196
197
198static DEFINE_SPINLOCK(nvme_fc_lock);
199
200static LIST_HEAD(nvme_fc_lport_list);
201static DEFINE_IDA(nvme_fc_local_port_cnt);
202static DEFINE_IDA(nvme_fc_ctrl_cnt);
203
204static struct workqueue_struct *nvme_fc_wq;
205
206
207
208/* *********************** FC-NVME Port Management ************************ */
209
210static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
211static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
212 struct nvme_fc_queue *, unsigned int);
213
214
215/**
216 * nvme_fc_register_localport - transport entry point called by an
217 * LLDD to register the existence of a NVME
218 * host FC port.
219 * @pinfo: pointer to information about the port to be registered
220 * @template: LLDD entrypoints and operational parameters for the port
221 * @dev: physical hardware device node port corresponds to. Will be
222 * used for DMA mappings
223 * @lport_p: pointer to a local port pointer. Upon success, the routine
224 * will allocate a nvme_fc_local_port structure and place its
225 * address in the local port pointer. Upon failure, local port
226 * pointer will be set to 0.
227 *
228 * Returns:
229 * a completion status. Must be 0 upon success; a negative errno
230 * (ex: -ENXIO) upon failure.
231 */
232int
233nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
234 struct nvme_fc_port_template *template,
235 struct device *dev,
236 struct nvme_fc_local_port **portptr)
237{
238 struct nvme_fc_lport *newrec;
239 unsigned long flags;
240 int ret, idx;
241
242 if (!template->localport_delete || !template->remoteport_delete ||
243 !template->ls_req || !template->fcp_io ||
244 !template->ls_abort || !template->fcp_abort ||
245 !template->max_hw_queues || !template->max_sgl_segments ||
246 !template->max_dif_sgl_segments || !template->dma_boundary) {
247 ret = -EINVAL;
248 goto out_reghost_failed;
249 }
250
251 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
252 GFP_KERNEL);
253 if (!newrec) {
254 ret = -ENOMEM;
255 goto out_reghost_failed;
256 }
257
258 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
259 if (idx < 0) {
260 ret = -ENOSPC;
261 goto out_fail_kfree;
262 }
263
264 if (!get_device(dev) && dev) {
265 ret = -ENODEV;
266 goto out_ida_put;
267 }
268
269 INIT_LIST_HEAD(&newrec->port_list);
270 INIT_LIST_HEAD(&newrec->endp_list);
271 kref_init(&newrec->ref);
272 newrec->ops = template;
273 newrec->dev = dev;
274 ida_init(&newrec->endp_cnt);
275 newrec->localport.private = &newrec[1];
276 newrec->localport.node_name = pinfo->node_name;
277 newrec->localport.port_name = pinfo->port_name;
278 newrec->localport.port_role = pinfo->port_role;
279 newrec->localport.port_id = pinfo->port_id;
280 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
281 newrec->localport.port_num = idx;
282
283 spin_lock_irqsave(&nvme_fc_lock, flags);
284 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
285 spin_unlock_irqrestore(&nvme_fc_lock, flags);
286
287 if (dev)
288 dma_set_seg_boundary(dev, template->dma_boundary);
289
290 *portptr = &newrec->localport;
291 return 0;
292
293out_ida_put:
294 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
295out_fail_kfree:
296 kfree(newrec);
297out_reghost_failed:
298 *portptr = NULL;
299
300 return ret;
301}
302EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
303
304static void
305nvme_fc_free_lport(struct kref *ref)
306{
307 struct nvme_fc_lport *lport =
308 container_of(ref, struct nvme_fc_lport, ref);
309 unsigned long flags;
310
311 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
312 WARN_ON(!list_empty(&lport->endp_list));
313
314 /* remove from transport list */
315 spin_lock_irqsave(&nvme_fc_lock, flags);
316 list_del(&lport->port_list);
317 spin_unlock_irqrestore(&nvme_fc_lock, flags);
318
319 /* let the LLDD know we've finished tearing it down */
320 lport->ops->localport_delete(&lport->localport);
321
322 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
323 ida_destroy(&lport->endp_cnt);
324
325 put_device(lport->dev);
326
327 kfree(lport);
328}
329
330static void
331nvme_fc_lport_put(struct nvme_fc_lport *lport)
332{
333 kref_put(&lport->ref, nvme_fc_free_lport);
334}
335
336static int
337nvme_fc_lport_get(struct nvme_fc_lport *lport)
338{
339 return kref_get_unless_zero(&lport->ref);
340}
341
342/**
343 * nvme_fc_unregister_localport - transport entry point called by an
344 * LLDD to deregister/remove a previously
345 * registered a NVME host FC port.
346 * @localport: pointer to the (registered) local port that is to be
347 * deregistered.
348 *
349 * Returns:
350 * a completion status. Must be 0 upon success; a negative errno
351 * (ex: -ENXIO) upon failure.
352 */
353int
354nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
355{
356 struct nvme_fc_lport *lport = localport_to_lport(portptr);
357 unsigned long flags;
358
359 if (!portptr)
360 return -EINVAL;
361
362 spin_lock_irqsave(&nvme_fc_lock, flags);
363
364 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
365 spin_unlock_irqrestore(&nvme_fc_lock, flags);
366 return -EINVAL;
367 }
368 portptr->port_state = FC_OBJSTATE_DELETED;
369
370 spin_unlock_irqrestore(&nvme_fc_lock, flags);
371
372 nvme_fc_lport_put(lport);
373
374 return 0;
375}
376EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
377
378/**
379 * nvme_fc_register_remoteport - transport entry point called by an
380 * LLDD to register the existence of a NVME
381 * subsystem FC port on its fabric.
382 * @localport: pointer to the (registered) local port that the remote
383 * subsystem port is connected to.
384 * @pinfo: pointer to information about the port to be registered
385 * @rport_p: pointer to a remote port pointer. Upon success, the routine
386 * will allocate a nvme_fc_remote_port structure and place its
387 * address in the remote port pointer. Upon failure, remote port
388 * pointer will be set to 0.
389 *
390 * Returns:
391 * a completion status. Must be 0 upon success; a negative errno
392 * (ex: -ENXIO) upon failure.
393 */
394int
395nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
396 struct nvme_fc_port_info *pinfo,
397 struct nvme_fc_remote_port **portptr)
398{
399 struct nvme_fc_lport *lport = localport_to_lport(localport);
400 struct nvme_fc_rport *newrec;
401 unsigned long flags;
402 int ret, idx;
403
404 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
405 GFP_KERNEL);
406 if (!newrec) {
407 ret = -ENOMEM;
408 goto out_reghost_failed;
409 }
410
411 if (!nvme_fc_lport_get(lport)) {
412 ret = -ESHUTDOWN;
413 goto out_kfree_rport;
414 }
415
416 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
417 if (idx < 0) {
418 ret = -ENOSPC;
419 goto out_lport_put;
420 }
421
422 INIT_LIST_HEAD(&newrec->endp_list);
423 INIT_LIST_HEAD(&newrec->ctrl_list);
James Smartc913a8b2017-04-11 11:35:08 -0700424 INIT_LIST_HEAD(&newrec->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800425 kref_init(&newrec->ref);
426 spin_lock_init(&newrec->lock);
427 newrec->remoteport.localport = &lport->localport;
James Smartc913a8b2017-04-11 11:35:08 -0700428 newrec->dev = lport->dev;
429 newrec->lport = lport;
James Smarte3994412016-12-02 00:28:42 -0800430 newrec->remoteport.private = &newrec[1];
431 newrec->remoteport.port_role = pinfo->port_role;
432 newrec->remoteport.node_name = pinfo->node_name;
433 newrec->remoteport.port_name = pinfo->port_name;
434 newrec->remoteport.port_id = pinfo->port_id;
435 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
436 newrec->remoteport.port_num = idx;
437
438 spin_lock_irqsave(&nvme_fc_lock, flags);
439 list_add_tail(&newrec->endp_list, &lport->endp_list);
440 spin_unlock_irqrestore(&nvme_fc_lock, flags);
441
442 *portptr = &newrec->remoteport;
443 return 0;
444
445out_lport_put:
446 nvme_fc_lport_put(lport);
447out_kfree_rport:
448 kfree(newrec);
449out_reghost_failed:
450 *portptr = NULL;
451 return ret;
James Smarte3994412016-12-02 00:28:42 -0800452}
453EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
454
455static void
456nvme_fc_free_rport(struct kref *ref)
457{
458 struct nvme_fc_rport *rport =
459 container_of(ref, struct nvme_fc_rport, ref);
460 struct nvme_fc_lport *lport =
461 localport_to_lport(rport->remoteport.localport);
462 unsigned long flags;
463
464 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
465 WARN_ON(!list_empty(&rport->ctrl_list));
466
467 /* remove from lport list */
468 spin_lock_irqsave(&nvme_fc_lock, flags);
469 list_del(&rport->endp_list);
470 spin_unlock_irqrestore(&nvme_fc_lock, flags);
471
472 /* let the LLDD know we've finished tearing it down */
473 lport->ops->remoteport_delete(&rport->remoteport);
474
475 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
476
477 kfree(rport);
478
479 nvme_fc_lport_put(lport);
480}
481
482static void
483nvme_fc_rport_put(struct nvme_fc_rport *rport)
484{
485 kref_put(&rport->ref, nvme_fc_free_rport);
486}
487
488static int
489nvme_fc_rport_get(struct nvme_fc_rport *rport)
490{
491 return kref_get_unless_zero(&rport->ref);
492}
493
494/**
495 * nvme_fc_unregister_remoteport - transport entry point called by an
496 * LLDD to deregister/remove a previously
497 * registered a NVME subsystem FC port.
498 * @remoteport: pointer to the (registered) remote port that is to be
499 * deregistered.
500 *
501 * Returns:
502 * a completion status. Must be 0 upon success; a negative errno
503 * (ex: -ENXIO) upon failure.
504 */
505int
506nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
507{
508 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
509 struct nvme_fc_ctrl *ctrl;
510 unsigned long flags;
511
512 if (!portptr)
513 return -EINVAL;
514
515 spin_lock_irqsave(&rport->lock, flags);
516
517 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
518 spin_unlock_irqrestore(&rport->lock, flags);
519 return -EINVAL;
520 }
521 portptr->port_state = FC_OBJSTATE_DELETED;
522
523 /* tear down all associations to the remote port */
524 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
525 __nvme_fc_del_ctrl(ctrl);
526
527 spin_unlock_irqrestore(&rport->lock, flags);
528
529 nvme_fc_rport_put(rport);
530 return 0;
531}
532EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
533
534
535/* *********************** FC-NVME DMA Handling **************************** */
536
537/*
538 * The fcloop device passes in a NULL device pointer. Real LLD's will
539 * pass in a valid device pointer. If NULL is passed to the dma mapping
540 * routines, depending on the platform, it may or may not succeed, and
541 * may crash.
542 *
543 * As such:
544 * Wrapper all the dma routines and check the dev pointer.
545 *
546 * If simple mappings (return just a dma address, we'll noop them,
547 * returning a dma address of 0.
548 *
549 * On more complex mappings (dma_map_sg), a pseudo routine fills
550 * in the scatter list, setting all dma addresses to 0.
551 */
552
553static inline dma_addr_t
554fc_dma_map_single(struct device *dev, void *ptr, size_t size,
555 enum dma_data_direction dir)
556{
557 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
558}
559
560static inline int
561fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
562{
563 return dev ? dma_mapping_error(dev, dma_addr) : 0;
564}
565
566static inline void
567fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
568 enum dma_data_direction dir)
569{
570 if (dev)
571 dma_unmap_single(dev, addr, size, dir);
572}
573
574static inline void
575fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
576 enum dma_data_direction dir)
577{
578 if (dev)
579 dma_sync_single_for_cpu(dev, addr, size, dir);
580}
581
582static inline void
583fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
584 enum dma_data_direction dir)
585{
586 if (dev)
587 dma_sync_single_for_device(dev, addr, size, dir);
588}
589
590/* pseudo dma_map_sg call */
591static int
592fc_map_sg(struct scatterlist *sg, int nents)
593{
594 struct scatterlist *s;
595 int i;
596
597 WARN_ON(nents == 0 || sg[0].length == 0);
598
599 for_each_sg(sg, s, nents, i) {
600 s->dma_address = 0L;
601#ifdef CONFIG_NEED_SG_DMA_LENGTH
602 s->dma_length = s->length;
603#endif
604 }
605 return nents;
606}
607
608static inline int
609fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
610 enum dma_data_direction dir)
611{
612 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
613}
614
615static inline void
616fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
617 enum dma_data_direction dir)
618{
619 if (dev)
620 dma_unmap_sg(dev, sg, nents, dir);
621}
622
623
624/* *********************** FC-NVME LS Handling **************************** */
625
626static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
627static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
628
629
630static void
James Smartc913a8b2017-04-11 11:35:08 -0700631__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800632{
James Smartc913a8b2017-04-11 11:35:08 -0700633 struct nvme_fc_rport *rport = lsop->rport;
James Smarte3994412016-12-02 00:28:42 -0800634 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
635 unsigned long flags;
636
James Smartc913a8b2017-04-11 11:35:08 -0700637 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800638
639 if (!lsop->req_queued) {
James Smartc913a8b2017-04-11 11:35:08 -0700640 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800641 return;
642 }
643
644 list_del(&lsop->lsreq_list);
645
646 lsop->req_queued = false;
647
James Smartc913a8b2017-04-11 11:35:08 -0700648 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800649
James Smartc913a8b2017-04-11 11:35:08 -0700650 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
James Smarte3994412016-12-02 00:28:42 -0800651 (lsreq->rqstlen + lsreq->rsplen),
652 DMA_BIDIRECTIONAL);
653
James Smartc913a8b2017-04-11 11:35:08 -0700654 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800655}
656
657static int
James Smartc913a8b2017-04-11 11:35:08 -0700658__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800659 struct nvmefc_ls_req_op *lsop,
660 void (*done)(struct nvmefc_ls_req *req, int status))
661{
662 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
663 unsigned long flags;
James Smartc913a8b2017-04-11 11:35:08 -0700664 int ret = 0;
James Smarte3994412016-12-02 00:28:42 -0800665
James Smartc913a8b2017-04-11 11:35:08 -0700666 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
667 return -ECONNREFUSED;
668
669 if (!nvme_fc_rport_get(rport))
James Smarte3994412016-12-02 00:28:42 -0800670 return -ESHUTDOWN;
671
672 lsreq->done = done;
James Smartc913a8b2017-04-11 11:35:08 -0700673 lsop->rport = rport;
James Smarte3994412016-12-02 00:28:42 -0800674 lsop->req_queued = false;
675 INIT_LIST_HEAD(&lsop->lsreq_list);
676 init_completion(&lsop->ls_done);
677
James Smartc913a8b2017-04-11 11:35:08 -0700678 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
James Smarte3994412016-12-02 00:28:42 -0800679 lsreq->rqstlen + lsreq->rsplen,
680 DMA_BIDIRECTIONAL);
James Smartc913a8b2017-04-11 11:35:08 -0700681 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
682 ret = -EFAULT;
683 goto out_putrport;
James Smarte3994412016-12-02 00:28:42 -0800684 }
685 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
686
James Smartc913a8b2017-04-11 11:35:08 -0700687 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800688
James Smartc913a8b2017-04-11 11:35:08 -0700689 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800690
691 lsop->req_queued = true;
692
James Smartc913a8b2017-04-11 11:35:08 -0700693 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800694
James Smartc913a8b2017-04-11 11:35:08 -0700695 ret = rport->lport->ops->ls_req(&rport->lport->localport,
696 &rport->remoteport, lsreq);
James Smarte3994412016-12-02 00:28:42 -0800697 if (ret)
James Smartc913a8b2017-04-11 11:35:08 -0700698 goto out_unlink;
699
700 return 0;
701
702out_unlink:
703 lsop->ls_error = ret;
704 spin_lock_irqsave(&rport->lock, flags);
705 lsop->req_queued = false;
706 list_del(&lsop->lsreq_list);
707 spin_unlock_irqrestore(&rport->lock, flags);
708 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
709 (lsreq->rqstlen + lsreq->rsplen),
710 DMA_BIDIRECTIONAL);
711out_putrport:
712 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800713
714 return ret;
715}
716
717static void
718nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
719{
720 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
721
722 lsop->ls_error = status;
723 complete(&lsop->ls_done);
724}
725
726static int
James Smartc913a8b2017-04-11 11:35:08 -0700727nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800728{
729 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
730 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
731 int ret;
732
James Smartc913a8b2017-04-11 11:35:08 -0700733 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
James Smarte3994412016-12-02 00:28:42 -0800734
James Smartc913a8b2017-04-11 11:35:08 -0700735 if (!ret) {
James Smarte3994412016-12-02 00:28:42 -0800736 /*
737 * No timeout/not interruptible as we need the struct
738 * to exist until the lldd calls us back. Thus mandate
739 * wait until driver calls back. lldd responsible for
740 * the timeout action
741 */
742 wait_for_completion(&lsop->ls_done);
743
James Smartc913a8b2017-04-11 11:35:08 -0700744 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -0800745
James Smartc913a8b2017-04-11 11:35:08 -0700746 ret = lsop->ls_error;
James Smarte3994412016-12-02 00:28:42 -0800747 }
748
James Smartc913a8b2017-04-11 11:35:08 -0700749 if (ret)
750 return ret;
751
James Smarte3994412016-12-02 00:28:42 -0800752 /* ACC or RJT payload ? */
753 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
754 return -ENXIO;
755
756 return 0;
757}
758
James Smartc913a8b2017-04-11 11:35:08 -0700759static int
760nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800761 struct nvmefc_ls_req_op *lsop,
762 void (*done)(struct nvmefc_ls_req *req, int status))
763{
James Smarte3994412016-12-02 00:28:42 -0800764 /* don't wait for completion */
765
James Smartc913a8b2017-04-11 11:35:08 -0700766 return __nvme_fc_send_ls_req(rport, lsop, done);
James Smarte3994412016-12-02 00:28:42 -0800767}
768
769/* Validation Error indexes into the string table below */
770enum {
771 VERR_NO_ERROR = 0,
772 VERR_LSACC = 1,
773 VERR_LSDESC_RQST = 2,
774 VERR_LSDESC_RQST_LEN = 3,
775 VERR_ASSOC_ID = 4,
776 VERR_ASSOC_ID_LEN = 5,
777 VERR_CONN_ID = 6,
778 VERR_CONN_ID_LEN = 7,
779 VERR_CR_ASSOC = 8,
780 VERR_CR_ASSOC_ACC_LEN = 9,
781 VERR_CR_CONN = 10,
782 VERR_CR_CONN_ACC_LEN = 11,
783 VERR_DISCONN = 12,
784 VERR_DISCONN_ACC_LEN = 13,
785};
786
787static char *validation_errors[] = {
788 "OK",
789 "Not LS_ACC",
790 "Not LSDESC_RQST",
791 "Bad LSDESC_RQST Length",
792 "Not Association ID",
793 "Bad Association ID Length",
794 "Not Connection ID",
795 "Bad Connection ID Length",
796 "Not CR_ASSOC Rqst",
797 "Bad CR_ASSOC ACC Length",
798 "Not CR_CONN Rqst",
799 "Bad CR_CONN ACC Length",
800 "Not Disconnect Rqst",
801 "Bad Disconnect ACC Length",
802};
803
804static int
805nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
806 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
807{
808 struct nvmefc_ls_req_op *lsop;
809 struct nvmefc_ls_req *lsreq;
810 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
811 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
812 int ret, fcret = 0;
813
814 lsop = kzalloc((sizeof(*lsop) +
815 ctrl->lport->ops->lsrqst_priv_sz +
816 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
817 if (!lsop) {
818 ret = -ENOMEM;
819 goto out_no_memory;
820 }
821 lsreq = &lsop->ls_req;
822
823 lsreq->private = (void *)&lsop[1];
824 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
825 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
826 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
827
828 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
829 assoc_rqst->desc_list_len =
830 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
831
832 assoc_rqst->assoc_cmd.desc_tag =
833 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
834 assoc_rqst->assoc_cmd.desc_len =
835 fcnvme_lsdesc_len(
836 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
837
838 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
839 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
840 /* Linux supports only Dynamic controllers */
841 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
842 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
843 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
844 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
845 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
846 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
847 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
848
849 lsop->queue = queue;
850 lsreq->rqstaddr = assoc_rqst;
851 lsreq->rqstlen = sizeof(*assoc_rqst);
852 lsreq->rspaddr = assoc_acc;
853 lsreq->rsplen = sizeof(*assoc_acc);
854 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
855
James Smartc913a8b2017-04-11 11:35:08 -0700856 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -0800857 if (ret)
858 goto out_free_buffer;
859
860 /* process connect LS completion */
861
862 /* validate the ACC response */
863 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
864 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -0700865 else if (assoc_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -0800866 fcnvme_lsdesc_len(
867 sizeof(struct fcnvme_ls_cr_assoc_acc)))
868 fcret = VERR_CR_ASSOC_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -0700869 else if (assoc_acc->hdr.rqst.desc_tag !=
870 cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -0800871 fcret = VERR_LSDESC_RQST;
872 else if (assoc_acc->hdr.rqst.desc_len !=
873 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
874 fcret = VERR_LSDESC_RQST_LEN;
875 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
876 fcret = VERR_CR_ASSOC;
877 else if (assoc_acc->associd.desc_tag !=
878 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
879 fcret = VERR_ASSOC_ID;
880 else if (assoc_acc->associd.desc_len !=
881 fcnvme_lsdesc_len(
882 sizeof(struct fcnvme_lsdesc_assoc_id)))
883 fcret = VERR_ASSOC_ID_LEN;
884 else if (assoc_acc->connectid.desc_tag !=
885 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
886 fcret = VERR_CONN_ID;
887 else if (assoc_acc->connectid.desc_len !=
888 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
889 fcret = VERR_CONN_ID_LEN;
890
891 if (fcret) {
892 ret = -EBADF;
893 dev_err(ctrl->dev,
894 "q %d connect failed: %s\n",
895 queue->qnum, validation_errors[fcret]);
896 } else {
897 ctrl->association_id =
898 be64_to_cpu(assoc_acc->associd.association_id);
899 queue->connection_id =
900 be64_to_cpu(assoc_acc->connectid.connection_id);
901 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
902 }
903
904out_free_buffer:
905 kfree(lsop);
906out_no_memory:
907 if (ret)
908 dev_err(ctrl->dev,
909 "queue %d connect admin queue failed (%d).\n",
910 queue->qnum, ret);
911 return ret;
912}
913
914static int
915nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
916 u16 qsize, u16 ersp_ratio)
917{
918 struct nvmefc_ls_req_op *lsop;
919 struct nvmefc_ls_req *lsreq;
920 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
921 struct fcnvme_ls_cr_conn_acc *conn_acc;
922 int ret, fcret = 0;
923
924 lsop = kzalloc((sizeof(*lsop) +
925 ctrl->lport->ops->lsrqst_priv_sz +
926 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
927 if (!lsop) {
928 ret = -ENOMEM;
929 goto out_no_memory;
930 }
931 lsreq = &lsop->ls_req;
932
933 lsreq->private = (void *)&lsop[1];
934 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
935 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
936 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
937
938 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
939 conn_rqst->desc_list_len = cpu_to_be32(
940 sizeof(struct fcnvme_lsdesc_assoc_id) +
941 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
942
943 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
944 conn_rqst->associd.desc_len =
945 fcnvme_lsdesc_len(
946 sizeof(struct fcnvme_lsdesc_assoc_id));
947 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
948 conn_rqst->connect_cmd.desc_tag =
949 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
950 conn_rqst->connect_cmd.desc_len =
951 fcnvme_lsdesc_len(
952 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
953 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
954 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
955 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
956
957 lsop->queue = queue;
958 lsreq->rqstaddr = conn_rqst;
959 lsreq->rqstlen = sizeof(*conn_rqst);
960 lsreq->rspaddr = conn_acc;
961 lsreq->rsplen = sizeof(*conn_acc);
962 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
963
James Smartc913a8b2017-04-11 11:35:08 -0700964 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -0800965 if (ret)
966 goto out_free_buffer;
967
968 /* process connect LS completion */
969
970 /* validate the ACC response */
971 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
972 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -0700973 else if (conn_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -0800974 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
975 fcret = VERR_CR_CONN_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -0700976 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -0800977 fcret = VERR_LSDESC_RQST;
978 else if (conn_acc->hdr.rqst.desc_len !=
979 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
980 fcret = VERR_LSDESC_RQST_LEN;
981 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
982 fcret = VERR_CR_CONN;
983 else if (conn_acc->connectid.desc_tag !=
984 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
985 fcret = VERR_CONN_ID;
986 else if (conn_acc->connectid.desc_len !=
987 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
988 fcret = VERR_CONN_ID_LEN;
989
990 if (fcret) {
991 ret = -EBADF;
992 dev_err(ctrl->dev,
993 "q %d connect failed: %s\n",
994 queue->qnum, validation_errors[fcret]);
995 } else {
996 queue->connection_id =
997 be64_to_cpu(conn_acc->connectid.connection_id);
998 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
999 }
1000
1001out_free_buffer:
1002 kfree(lsop);
1003out_no_memory:
1004 if (ret)
1005 dev_err(ctrl->dev,
1006 "queue %d connect command failed (%d).\n",
1007 queue->qnum, ret);
1008 return ret;
1009}
1010
1011static void
1012nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1013{
1014 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
James Smarte3994412016-12-02 00:28:42 -08001015
James Smartc913a8b2017-04-11 11:35:08 -07001016 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -08001017
1018 /* fc-nvme iniator doesn't care about success or failure of cmd */
1019
1020 kfree(lsop);
1021}
1022
1023/*
1024 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1025 * the FC-NVME Association. Terminating the association also
1026 * terminates the FC-NVME connections (per queue, both admin and io
1027 * queues) that are part of the association. E.g. things are torn
1028 * down, and the related FC-NVME Association ID and Connection IDs
1029 * become invalid.
1030 *
1031 * The behavior of the fc-nvme initiator is such that it's
1032 * understanding of the association and connections will implicitly
1033 * be torn down. The action is implicit as it may be due to a loss of
1034 * connectivity with the fc-nvme target, so you may never get a
1035 * response even if you tried. As such, the action of this routine
1036 * is to asynchronously send the LS, ignore any results of the LS, and
1037 * continue on with terminating the association. If the fc-nvme target
1038 * is present and receives the LS, it too can tear down.
1039 */
1040static void
1041nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1042{
1043 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1044 struct fcnvme_ls_disconnect_acc *discon_acc;
1045 struct nvmefc_ls_req_op *lsop;
1046 struct nvmefc_ls_req *lsreq;
James Smartc913a8b2017-04-11 11:35:08 -07001047 int ret;
James Smarte3994412016-12-02 00:28:42 -08001048
1049 lsop = kzalloc((sizeof(*lsop) +
1050 ctrl->lport->ops->lsrqst_priv_sz +
1051 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1052 GFP_KERNEL);
1053 if (!lsop)
1054 /* couldn't sent it... too bad */
1055 return;
1056
1057 lsreq = &lsop->ls_req;
1058
1059 lsreq->private = (void *)&lsop[1];
1060 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1061 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1062 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1063
1064 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1065 discon_rqst->desc_list_len = cpu_to_be32(
1066 sizeof(struct fcnvme_lsdesc_assoc_id) +
1067 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1068
1069 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1070 discon_rqst->associd.desc_len =
1071 fcnvme_lsdesc_len(
1072 sizeof(struct fcnvme_lsdesc_assoc_id));
1073
1074 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1075
1076 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1077 FCNVME_LSDESC_DISCONN_CMD);
1078 discon_rqst->discon_cmd.desc_len =
1079 fcnvme_lsdesc_len(
1080 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1081 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1082 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1083
1084 lsreq->rqstaddr = discon_rqst;
1085 lsreq->rqstlen = sizeof(*discon_rqst);
1086 lsreq->rspaddr = discon_acc;
1087 lsreq->rsplen = sizeof(*discon_acc);
1088 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1089
James Smartc913a8b2017-04-11 11:35:08 -07001090 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1091 nvme_fc_disconnect_assoc_done);
1092 if (ret)
1093 kfree(lsop);
James Smarte3994412016-12-02 00:28:42 -08001094
1095 /* only meaningful part to terminating the association */
1096 ctrl->association_id = 0;
1097}
1098
1099
1100/* *********************** NVME Ctrl Routines **************************** */
1101
1102
1103static int
1104nvme_fc_reinit_request(void *data, struct request *rq)
1105{
1106 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1107 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1108
1109 memset(cmdiu, 0, sizeof(*cmdiu));
1110 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1111 cmdiu->fc_id = NVME_CMD_FC_ID;
1112 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1113 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1114
1115 return 0;
1116}
1117
1118static void
1119__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1120 struct nvme_fc_fcp_op *op)
1121{
1122 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1123 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1124 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1125 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1126
1127 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1128}
1129
1130static void
1131nvme_fc_exit_request(void *data, struct request *rq,
1132 unsigned int hctx_idx, unsigned int rq_idx)
1133{
1134 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1135
1136 return __nvme_fc_exit_request(data, op);
1137}
1138
1139static void
1140nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
1141{
1142 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1143 int i;
1144
1145 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1146 if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
1147 continue;
1148 __nvme_fc_exit_request(ctrl, aen_op);
1149 nvme_fc_ctrl_put(ctrl);
1150 }
1151}
1152
1153void
1154nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1155{
1156 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1157 struct request *rq = op->rq;
1158 struct nvmefc_fcp_req *freq = &op->fcp_req;
1159 struct nvme_fc_ctrl *ctrl = op->ctrl;
1160 struct nvme_fc_queue *queue = op->queue;
1161 struct nvme_completion *cqe = &op->rsp_iu.cqe;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001162 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001163 union nvme_result result;
James Smarte3994412016-12-02 00:28:42 -08001164
1165 /*
1166 * WARNING:
1167 * The current linux implementation of a nvme controller
1168 * allocates a single tag set for all io queues and sizes
1169 * the io queues to fully hold all possible tags. Thus, the
1170 * implementation does not reference or care about the sqhd
1171 * value as it never needs to use the sqhd/sqtail pointers
1172 * for submission pacing.
1173 *
1174 * This affects the FC-NVME implementation in two ways:
1175 * 1) As the value doesn't matter, we don't need to waste
1176 * cycles extracting it from ERSPs and stamping it in the
1177 * cases where the transport fabricates CQEs on successful
1178 * completions.
1179 * 2) The FC-NVME implementation requires that delivery of
1180 * ERSP completions are to go back to the nvme layer in order
1181 * relative to the rsn, such that the sqhd value will always
1182 * be "in order" for the nvme layer. As the nvme layer in
1183 * linux doesn't care about sqhd, there's no need to return
1184 * them in order.
1185 *
1186 * Additionally:
1187 * As the core nvme layer in linux currently does not look at
1188 * every field in the cqe - in cases where the FC transport must
1189 * fabricate a CQE, the following fields will not be set as they
1190 * are not referenced:
1191 * cqe.sqid, cqe.sqhd, cqe.command_id
1192 */
1193
1194 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1195 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1196
1197 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001198 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
James Smart62eeacb2017-03-23 20:41:27 -07001199 else if (freq->status)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001200 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001201
1202 /*
1203 * For the linux implementation, if we have an unsuccesful
1204 * status, they blk-mq layer can typically be called with the
1205 * non-zero status and the content of the cqe isn't important.
1206 */
1207 if (status)
1208 goto done;
1209
1210 /*
1211 * command completed successfully relative to the wire
1212 * protocol. However, validate anything received and
1213 * extract the status and result from the cqe (create it
1214 * where necessary).
1215 */
1216
1217 switch (freq->rcv_rsplen) {
1218
1219 case 0:
1220 case NVME_FC_SIZEOF_ZEROS_RSP:
1221 /*
1222 * No response payload or 12 bytes of payload (which
1223 * should all be zeros) are considered successful and
1224 * no payload in the CQE by the transport.
1225 */
1226 if (freq->transferred_length !=
1227 be32_to_cpu(op->cmd_iu.data_len)) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001228 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001229 goto done;
1230 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001231 result.u64 = 0;
James Smarte3994412016-12-02 00:28:42 -08001232 break;
1233
1234 case sizeof(struct nvme_fc_ersp_iu):
1235 /*
1236 * The ERSP IU contains a full completion with CQE.
1237 * Validate ERSP IU and look at cqe.
1238 */
1239 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1240 (freq->rcv_rsplen / 4) ||
1241 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1242 freq->transferred_length ||
James Smart726a1082017-03-23 20:41:23 -07001243 op->rsp_iu.status_code ||
James Smarte3994412016-12-02 00:28:42 -08001244 op->rqno != le16_to_cpu(cqe->command_id))) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001245 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001246 goto done;
1247 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001248 result = cqe->result;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001249 status = cqe->status;
James Smarte3994412016-12-02 00:28:42 -08001250 break;
1251
1252 default:
Christoph Hellwigd663b692017-04-20 16:02:56 +02001253 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001254 goto done;
1255 }
1256
1257done:
1258 if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001259 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
James Smarte3994412016-12-02 00:28:42 -08001260 nvme_fc_ctrl_put(ctrl);
1261 return;
1262 }
1263
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001264 nvme_end_request(rq, status, result);
James Smarte3994412016-12-02 00:28:42 -08001265}
1266
1267static int
1268__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1269 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1270 struct request *rq, u32 rqno)
1271{
1272 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1273 int ret = 0;
1274
1275 memset(op, 0, sizeof(*op));
1276 op->fcp_req.cmdaddr = &op->cmd_iu;
1277 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1278 op->fcp_req.rspaddr = &op->rsp_iu;
1279 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1280 op->fcp_req.done = nvme_fc_fcpio_done;
1281 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1282 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1283 op->ctrl = ctrl;
1284 op->queue = queue;
1285 op->rq = rq;
1286 op->rqno = rqno;
1287
1288 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1289 cmdiu->fc_id = NVME_CMD_FC_ID;
1290 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1291
1292 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1293 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1294 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1295 dev_err(ctrl->dev,
1296 "FCP Op failed - cmdiu dma mapping failed.\n");
1297 ret = EFAULT;
1298 goto out_on_error;
1299 }
1300
1301 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1302 &op->rsp_iu, sizeof(op->rsp_iu),
1303 DMA_FROM_DEVICE);
1304 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1305 dev_err(ctrl->dev,
1306 "FCP Op failed - rspiu dma mapping failed.\n");
1307 ret = EFAULT;
1308 }
1309
1310 atomic_set(&op->state, FCPOP_STATE_IDLE);
1311out_on_error:
1312 return ret;
1313}
1314
1315static int
1316nvme_fc_init_request(void *data, struct request *rq,
1317 unsigned int hctx_idx, unsigned int rq_idx,
1318 unsigned int numa_node)
1319{
1320 struct nvme_fc_ctrl *ctrl = data;
1321 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1322 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1323
1324 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1325}
1326
1327static int
1328nvme_fc_init_admin_request(void *data, struct request *rq,
1329 unsigned int hctx_idx, unsigned int rq_idx,
1330 unsigned int numa_node)
1331{
1332 struct nvme_fc_ctrl *ctrl = data;
1333 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1334 struct nvme_fc_queue *queue = &ctrl->queues[0];
1335
1336 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1337}
1338
1339static int
1340nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1341{
1342 struct nvme_fc_fcp_op *aen_op;
1343 struct nvme_fc_cmd_iu *cmdiu;
1344 struct nvme_command *sqe;
1345 int i, ret;
1346
1347 aen_op = ctrl->aen_ops;
1348 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1349 cmdiu = &aen_op->cmd_iu;
1350 sqe = &cmdiu->sqe;
1351 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1352 aen_op, (struct request *)NULL,
1353 (AEN_CMDID_BASE + i));
1354 if (ret)
1355 return ret;
1356
1357 memset(sqe, 0, sizeof(*sqe));
1358 sqe->common.opcode = nvme_admin_async_event;
1359 sqe->common.command_id = AEN_CMDID_BASE + i;
1360 }
1361 return 0;
1362}
1363
1364
1365static inline void
1366__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1367 unsigned int qidx)
1368{
1369 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1370
1371 hctx->driver_data = queue;
1372 queue->hctx = hctx;
1373}
1374
1375static int
1376nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1377 unsigned int hctx_idx)
1378{
1379 struct nvme_fc_ctrl *ctrl = data;
1380
1381 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1382
1383 return 0;
1384}
1385
1386static int
1387nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1388 unsigned int hctx_idx)
1389{
1390 struct nvme_fc_ctrl *ctrl = data;
1391
1392 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1393
1394 return 0;
1395}
1396
1397static void
1398nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1399{
1400 struct nvme_fc_queue *queue;
1401
1402 queue = &ctrl->queues[idx];
1403 memset(queue, 0, sizeof(*queue));
1404 queue->ctrl = ctrl;
1405 queue->qnum = idx;
1406 atomic_set(&queue->csn, 1);
1407 queue->dev = ctrl->dev;
1408
1409 if (idx > 0)
1410 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1411 else
1412 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1413
1414 queue->queue_size = queue_size;
1415
1416 /*
1417 * Considered whether we should allocate buffers for all SQEs
1418 * and CQEs and dma map them - mapping their respective entries
1419 * into the request structures (kernel vm addr and dma address)
1420 * thus the driver could use the buffers/mappings directly.
1421 * It only makes sense if the LLDD would use them for its
1422 * messaging api. It's very unlikely most adapter api's would use
1423 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1424 * structures were used instead.
1425 */
1426}
1427
1428/*
1429 * This routine terminates a queue at the transport level.
1430 * The transport has already ensured that all outstanding ios on
1431 * the queue have been terminated.
1432 * The transport will send a Disconnect LS request to terminate
1433 * the queue's connection. Termination of the admin queue will also
1434 * terminate the association at the target.
1435 */
1436static void
1437nvme_fc_free_queue(struct nvme_fc_queue *queue)
1438{
1439 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1440 return;
1441
1442 /*
1443 * Current implementation never disconnects a single queue.
1444 * It always terminates a whole association. So there is never
1445 * a disconnect(queue) LS sent to the target.
1446 */
1447
1448 queue->connection_id = 0;
1449 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1450}
1451
1452static void
1453__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1454 struct nvme_fc_queue *queue, unsigned int qidx)
1455{
1456 if (ctrl->lport->ops->delete_queue)
1457 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1458 queue->lldd_handle);
1459 queue->lldd_handle = NULL;
1460}
1461
1462static void
1463nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
1464{
1465 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
1466 blk_cleanup_queue(ctrl->ctrl.admin_q);
1467 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1468 nvme_fc_free_queue(&ctrl->queues[0]);
1469}
1470
1471static void
1472nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1473{
1474 int i;
1475
1476 for (i = 1; i < ctrl->queue_count; i++)
1477 nvme_fc_free_queue(&ctrl->queues[i]);
1478}
1479
1480static int
1481__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1482 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1483{
1484 int ret = 0;
1485
1486 queue->lldd_handle = NULL;
1487 if (ctrl->lport->ops->create_queue)
1488 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1489 qidx, qsize, &queue->lldd_handle);
1490
1491 return ret;
1492}
1493
1494static void
1495nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1496{
1497 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1498 int i;
1499
1500 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1501 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1502}
1503
1504static int
1505nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1506{
1507 struct nvme_fc_queue *queue = &ctrl->queues[1];
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001508 int i, ret;
James Smarte3994412016-12-02 00:28:42 -08001509
1510 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1511 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001512 if (ret)
1513 goto delete_queues;
James Smarte3994412016-12-02 00:28:42 -08001514 }
1515
1516 return 0;
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001517
1518delete_queues:
1519 for (; i >= 0; i--)
1520 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1521 return ret;
James Smarte3994412016-12-02 00:28:42 -08001522}
1523
1524static int
1525nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1526{
1527 int i, ret = 0;
1528
1529 for (i = 1; i < ctrl->queue_count; i++) {
1530 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1531 (qsize / 5));
1532 if (ret)
1533 break;
1534 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1535 if (ret)
1536 break;
1537 }
1538
1539 return ret;
1540}
1541
1542static void
1543nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1544{
1545 int i;
1546
1547 for (i = 1; i < ctrl->queue_count; i++)
1548 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1549}
1550
1551static void
1552nvme_fc_ctrl_free(struct kref *ref)
1553{
1554 struct nvme_fc_ctrl *ctrl =
1555 container_of(ref, struct nvme_fc_ctrl, ref);
1556 unsigned long flags;
1557
1558 if (ctrl->state != FCCTRL_INIT) {
1559 /* remove from rport list */
1560 spin_lock_irqsave(&ctrl->rport->lock, flags);
1561 list_del(&ctrl->ctrl_list);
1562 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1563 }
1564
1565 put_device(ctrl->dev);
1566 nvme_fc_rport_put(ctrl->rport);
1567
1568 kfree(ctrl->queues);
1569 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1570 nvmf_free_options(ctrl->ctrl.opts);
1571 kfree(ctrl);
1572}
1573
1574static void
1575nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1576{
1577 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1578}
1579
1580static int
1581nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1582{
1583 return kref_get_unless_zero(&ctrl->ref);
1584}
1585
1586/*
1587 * All accesses from nvme core layer done - can now free the
1588 * controller. Called after last nvme_put_ctrl() call
1589 */
1590static void
1591nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
1592{
1593 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1594
1595 WARN_ON(nctrl != &ctrl->ctrl);
1596
1597 /*
1598 * Tear down the association, which will generate link
1599 * traffic to terminate connections
1600 */
1601
1602 if (ctrl->state != FCCTRL_INIT) {
1603 /* send a Disconnect(association) LS to fc-nvme target */
1604 nvme_fc_xmt_disconnect_assoc(ctrl);
1605
1606 if (ctrl->ctrl.tagset) {
1607 blk_cleanup_queue(ctrl->ctrl.connect_q);
1608 blk_mq_free_tag_set(&ctrl->tag_set);
1609 nvme_fc_delete_hw_io_queues(ctrl);
1610 nvme_fc_free_io_queues(ctrl);
1611 }
1612
1613 nvme_fc_exit_aen_ops(ctrl);
1614
1615 nvme_fc_destroy_admin_queue(ctrl);
1616 }
1617
1618 nvme_fc_ctrl_put(ctrl);
1619}
1620
1621
1622static int
1623__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1624{
1625 int state;
1626
1627 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1628 if (state != FCPOP_STATE_ACTIVE) {
1629 atomic_set(&op->state, state);
1630 return -ECANCELED; /* fail */
1631 }
1632
1633 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1634 &ctrl->rport->remoteport,
1635 op->queue->lldd_handle,
1636 &op->fcp_req);
1637
1638 return 0;
1639}
1640
1641enum blk_eh_timer_return
1642nvme_fc_timeout(struct request *rq, bool reserved)
1643{
1644 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1645 struct nvme_fc_ctrl *ctrl = op->ctrl;
1646 int ret;
1647
1648 if (reserved)
1649 return BLK_EH_RESET_TIMER;
1650
1651 ret = __nvme_fc_abort_op(ctrl, op);
1652 if (ret)
1653 /* io wasn't active to abort consider it done */
1654 return BLK_EH_HANDLED;
1655
1656 /*
1657 * TODO: force a controller reset
1658 * when that happens, queues will be torn down and outstanding
1659 * ios will be terminated, and the above abort, on a single io
1660 * will no longer be needed.
1661 */
1662
1663 return BLK_EH_HANDLED;
1664}
1665
1666static int
1667nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1668 struct nvme_fc_fcp_op *op)
1669{
1670 struct nvmefc_fcp_req *freq = &op->fcp_req;
James Smarte3994412016-12-02 00:28:42 -08001671 enum dma_data_direction dir;
1672 int ret;
1673
1674 freq->sg_cnt = 0;
1675
Christoph Hellwigb131c612017-01-13 12:29:12 +01001676 if (!blk_rq_payload_bytes(rq))
James Smarte3994412016-12-02 00:28:42 -08001677 return 0;
1678
1679 freq->sg_table.sgl = freq->first_sgl;
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001680 ret = sg_alloc_table_chained(&freq->sg_table,
1681 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
James Smarte3994412016-12-02 00:28:42 -08001682 if (ret)
1683 return -ENOMEM;
1684
1685 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001686 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
James Smarte3994412016-12-02 00:28:42 -08001687 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1688 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1689 op->nents, dir);
1690 if (unlikely(freq->sg_cnt <= 0)) {
1691 sg_free_table_chained(&freq->sg_table, true);
1692 freq->sg_cnt = 0;
1693 return -EFAULT;
1694 }
1695
1696 /*
1697 * TODO: blk_integrity_rq(rq) for DIF
1698 */
1699 return 0;
1700}
1701
1702static void
1703nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1704 struct nvme_fc_fcp_op *op)
1705{
1706 struct nvmefc_fcp_req *freq = &op->fcp_req;
1707
1708 if (!freq->sg_cnt)
1709 return;
1710
1711 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1712 ((rq_data_dir(rq) == WRITE) ?
1713 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1714
1715 nvme_cleanup_cmd(rq);
1716
1717 sg_free_table_chained(&freq->sg_table, true);
1718
1719 freq->sg_cnt = 0;
1720}
1721
1722/*
1723 * In FC, the queue is a logical thing. At transport connect, the target
1724 * creates its "queue" and returns a handle that is to be given to the
1725 * target whenever it posts something to the corresponding SQ. When an
1726 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1727 * command contained within the SQE, an io, and assigns a FC exchange
1728 * to it. The SQE and the associated SQ handle are sent in the initial
1729 * CMD IU sents on the exchange. All transfers relative to the io occur
1730 * as part of the exchange. The CQE is the last thing for the io,
1731 * which is transferred (explicitly or implicitly) with the RSP IU
1732 * sent on the exchange. After the CQE is received, the FC exchange is
1733 * terminaed and the Exchange may be used on a different io.
1734 *
1735 * The transport to LLDD api has the transport making a request for a
1736 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1737 * resource and transfers the command. The LLDD will then process all
1738 * steps to complete the io. Upon completion, the transport done routine
1739 * is called.
1740 *
1741 * So - while the operation is outstanding to the LLDD, there is a link
1742 * level FC exchange resource that is also outstanding. This must be
1743 * considered in all cleanup operations.
1744 */
1745static int
1746nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1747 struct nvme_fc_fcp_op *op, u32 data_len,
1748 enum nvmefc_fcp_datadir io_dir)
1749{
1750 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1751 struct nvme_command *sqe = &cmdiu->sqe;
1752 u32 csn;
1753 int ret;
1754
1755 if (!nvme_fc_ctrl_get(ctrl))
1756 return BLK_MQ_RQ_QUEUE_ERROR;
1757
1758 /* format the FC-NVME CMD IU and fcp_req */
1759 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1760 csn = atomic_inc_return(&queue->csn);
1761 cmdiu->csn = cpu_to_be32(csn);
1762 cmdiu->data_len = cpu_to_be32(data_len);
1763 switch (io_dir) {
1764 case NVMEFC_FCP_WRITE:
1765 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1766 break;
1767 case NVMEFC_FCP_READ:
1768 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1769 break;
1770 case NVMEFC_FCP_NODATA:
1771 cmdiu->flags = 0;
1772 break;
1773 }
1774 op->fcp_req.payload_length = data_len;
1775 op->fcp_req.io_dir = io_dir;
1776 op->fcp_req.transferred_length = 0;
1777 op->fcp_req.rcv_rsplen = 0;
James Smart62eeacb2017-03-23 20:41:27 -07001778 op->fcp_req.status = NVME_SC_SUCCESS;
James Smarte3994412016-12-02 00:28:42 -08001779 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1780
1781 /*
1782 * validate per fabric rules, set fields mandated by fabric spec
1783 * as well as those by FC-NVME spec.
1784 */
1785 WARN_ON_ONCE(sqe->common.metadata);
1786 WARN_ON_ONCE(sqe->common.dptr.prp1);
1787 WARN_ON_ONCE(sqe->common.dptr.prp2);
1788 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1789
1790 /*
1791 * format SQE DPTR field per FC-NVME rules
1792 * type=data block descr; subtype=offset;
1793 * offset is currently 0.
1794 */
1795 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1796 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1797 sqe->rw.dptr.sgl.addr = 0;
1798
1799 /* odd that we set the command_id - should come from nvme-fabrics */
1800 WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
1801
1802 if (op->rq) { /* skipped on aens */
1803 ret = nvme_fc_map_data(ctrl, op->rq, op);
1804 if (ret < 0) {
1805 dev_err(queue->ctrl->ctrl.device,
1806 "Failed to map data (%d)\n", ret);
1807 nvme_cleanup_cmd(op->rq);
1808 nvme_fc_ctrl_put(ctrl);
1809 return (ret == -ENOMEM || ret == -EAGAIN) ?
1810 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1811 }
1812 }
1813
1814 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1815 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1816
1817 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1818
1819 if (op->rq)
1820 blk_mq_start_request(op->rq);
1821
1822 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1823 &ctrl->rport->remoteport,
1824 queue->lldd_handle, &op->fcp_req);
1825
1826 if (ret) {
1827 dev_err(ctrl->dev,
1828 "Send nvme command failed - lldd returned %d.\n", ret);
1829
1830 if (op->rq) { /* normal request */
1831 nvme_fc_unmap_data(ctrl, op->rq, op);
1832 nvme_cleanup_cmd(op->rq);
1833 }
1834 /* else - aen. no cleanup needed */
1835
1836 nvme_fc_ctrl_put(ctrl);
1837
1838 if (ret != -EBUSY)
1839 return BLK_MQ_RQ_QUEUE_ERROR;
1840
1841 if (op->rq) {
1842 blk_mq_stop_hw_queues(op->rq->q);
1843 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1844 }
1845 return BLK_MQ_RQ_QUEUE_BUSY;
1846 }
1847
1848 return BLK_MQ_RQ_QUEUE_OK;
1849}
1850
1851static int
1852nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1853 const struct blk_mq_queue_data *bd)
1854{
1855 struct nvme_ns *ns = hctx->queue->queuedata;
1856 struct nvme_fc_queue *queue = hctx->driver_data;
1857 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1858 struct request *rq = bd->rq;
1859 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1860 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1861 struct nvme_command *sqe = &cmdiu->sqe;
1862 enum nvmefc_fcp_datadir io_dir;
1863 u32 data_len;
1864 int ret;
1865
1866 ret = nvme_setup_cmd(ns, rq, sqe);
1867 if (ret)
1868 return ret;
1869
Christoph Hellwigb131c612017-01-13 12:29:12 +01001870 data_len = blk_rq_payload_bytes(rq);
James Smarte3994412016-12-02 00:28:42 -08001871 if (data_len)
1872 io_dir = ((rq_data_dir(rq) == WRITE) ?
1873 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
1874 else
1875 io_dir = NVMEFC_FCP_NODATA;
1876
1877 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
1878}
1879
1880static struct blk_mq_tags *
1881nvme_fc_tagset(struct nvme_fc_queue *queue)
1882{
1883 if (queue->qnum == 0)
1884 return queue->ctrl->admin_tag_set.tags[queue->qnum];
1885
1886 return queue->ctrl->tag_set.tags[queue->qnum - 1];
1887}
1888
1889static int
1890nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1891
1892{
1893 struct nvme_fc_queue *queue = hctx->driver_data;
1894 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1895 struct request *req;
1896 struct nvme_fc_fcp_op *op;
1897
1898 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
1899 if (!req) {
1900 dev_err(queue->ctrl->ctrl.device,
1901 "tag 0x%x on QNum %#x not found\n",
1902 tag, queue->qnum);
1903 return 0;
1904 }
1905
1906 op = blk_mq_rq_to_pdu(req);
1907
1908 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
1909 (ctrl->lport->ops->poll_queue))
1910 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
1911 queue->lldd_handle);
1912
1913 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
1914}
1915
1916static void
1917nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1918{
1919 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
1920 struct nvme_fc_fcp_op *aen_op;
1921 int ret;
1922
1923 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
1924 return;
1925
1926 aen_op = &ctrl->aen_ops[aer_idx];
1927
1928 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
1929 NVMEFC_FCP_NODATA);
1930 if (ret)
1931 dev_err(ctrl->ctrl.device,
1932 "failed async event work [%d]\n", aer_idx);
1933}
1934
1935static void
1936nvme_fc_complete_rq(struct request *rq)
1937{
1938 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1939 struct nvme_fc_ctrl *ctrl = op->ctrl;
Christoph Hellwig77f02a72017-03-30 13:41:32 +02001940 int state;
James Smarte3994412016-12-02 00:28:42 -08001941
1942 state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
1943
1944 nvme_cleanup_cmd(rq);
James Smarte3994412016-12-02 00:28:42 -08001945 nvme_fc_unmap_data(ctrl, rq, op);
Christoph Hellwig77f02a72017-03-30 13:41:32 +02001946 nvme_complete_rq(rq);
James Smarte3994412016-12-02 00:28:42 -08001947 nvme_fc_ctrl_put(ctrl);
1948
James Smarte3994412016-12-02 00:28:42 -08001949}
1950
Eric Biggersf363b082017-03-30 13:39:16 -07001951static const struct blk_mq_ops nvme_fc_mq_ops = {
James Smarte3994412016-12-02 00:28:42 -08001952 .queue_rq = nvme_fc_queue_rq,
1953 .complete = nvme_fc_complete_rq,
1954 .init_request = nvme_fc_init_request,
1955 .exit_request = nvme_fc_exit_request,
1956 .reinit_request = nvme_fc_reinit_request,
1957 .init_hctx = nvme_fc_init_hctx,
1958 .poll = nvme_fc_poll,
1959 .timeout = nvme_fc_timeout,
1960};
1961
Eric Biggersf363b082017-03-30 13:39:16 -07001962static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
James Smarte3994412016-12-02 00:28:42 -08001963 .queue_rq = nvme_fc_queue_rq,
1964 .complete = nvme_fc_complete_rq,
1965 .init_request = nvme_fc_init_admin_request,
1966 .exit_request = nvme_fc_exit_request,
1967 .reinit_request = nvme_fc_reinit_request,
1968 .init_hctx = nvme_fc_init_admin_hctx,
1969 .timeout = nvme_fc_timeout,
1970};
1971
1972static int
1973nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
1974{
1975 u32 segs;
1976 int error;
1977
1978 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
1979
1980 error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
1981 NVME_FC_AQ_BLKMQ_DEPTH,
1982 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
1983 if (error)
1984 return error;
1985
1986 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
1987 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
1988 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
1989 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
1990 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
1991 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
1992 (SG_CHUNK_SIZE *
1993 sizeof(struct scatterlist)) +
1994 ctrl->lport->ops->fcprqst_priv_sz;
1995 ctrl->admin_tag_set.driver_data = ctrl;
1996 ctrl->admin_tag_set.nr_hw_queues = 1;
1997 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
1998
1999 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2000 if (error)
2001 goto out_free_queue;
2002
2003 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2004 if (IS_ERR(ctrl->ctrl.admin_q)) {
2005 error = PTR_ERR(ctrl->ctrl.admin_q);
2006 goto out_free_tagset;
2007 }
2008
2009 error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2010 NVME_FC_AQ_BLKMQ_DEPTH);
2011 if (error)
2012 goto out_cleanup_queue;
2013
2014 error = nvmf_connect_admin_queue(&ctrl->ctrl);
2015 if (error)
2016 goto out_delete_hw_queue;
2017
2018 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2019 if (error) {
2020 dev_err(ctrl->ctrl.device,
2021 "prop_get NVME_REG_CAP failed\n");
2022 goto out_delete_hw_queue;
2023 }
2024
2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error)
2030 goto out_delete_hw_queue;
2031
2032 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2033 ctrl->lport->ops->max_sgl_segments);
2034 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2035
2036 error = nvme_init_identify(&ctrl->ctrl);
2037 if (error)
2038 goto out_delete_hw_queue;
2039
2040 nvme_start_keep_alive(&ctrl->ctrl);
2041
2042 return 0;
2043
2044out_delete_hw_queue:
2045 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2046out_cleanup_queue:
2047 blk_cleanup_queue(ctrl->ctrl.admin_q);
2048out_free_tagset:
2049 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2050out_free_queue:
2051 nvme_fc_free_queue(&ctrl->queues[0]);
2052 return error;
2053}
2054
2055/*
2056 * This routine is used by the transport when it needs to find active
2057 * io on a queue that is to be terminated. The transport uses
2058 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2059 * this routine to kill them on a 1 by 1 basis.
2060 *
2061 * As FC allocates FC exchange for each io, the transport must contact
2062 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2063 * After terminating the exchange the LLDD will call the transport's
2064 * normal io done path for the request, but it will have an aborted
2065 * status. The done path will return the io request back to the block
2066 * layer with an error status.
2067 */
2068static void
2069nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2070{
2071 struct nvme_ctrl *nctrl = data;
2072 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2073 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2074int status;
2075
2076 if (!blk_mq_request_started(req))
2077 return;
2078
2079 /* this performs an ABTS-LS on the FC exchange for the io */
2080 status = __nvme_fc_abort_op(ctrl, op);
2081 /*
2082 * if __nvme_fc_abort_op failed: io wasn't active to abort
2083 * consider it done. Assume completion path already completing
2084 * in parallel
2085 */
2086 if (status)
2087 /* io wasn't active to abort consider it done */
2088 /* assume completion path already completing in parallel */
2089 return;
2090}
2091
2092
2093/*
2094 * This routine stops operation of the controller. Admin and IO queues
2095 * are stopped, outstanding ios on them terminated, and the nvme ctrl
2096 * is shutdown.
2097 */
2098static void
2099nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
2100{
2101 /*
2102 * If io queues are present, stop them and terminate all outstanding
2103 * ios on them. As FC allocates FC exchange for each io, the
2104 * transport must contact the LLDD to terminate the exchange,
2105 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2106 * to tell us what io's are busy and invoke a transport routine
2107 * to kill them with the LLDD. After terminating the exchange
2108 * the LLDD will call the transport's normal io done path, but it
2109 * will have an aborted status. The done path will return the
2110 * io requests back to the block layer as part of normal completions
2111 * (but with error status).
2112 */
2113 if (ctrl->queue_count > 1) {
2114 nvme_stop_queues(&ctrl->ctrl);
2115 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2116 nvme_fc_terminate_exchange, &ctrl->ctrl);
2117 }
2118
2119 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
2120 nvme_shutdown_ctrl(&ctrl->ctrl);
2121
2122 /*
2123 * now clean up the admin queue. Same thing as above.
2124 * use blk_mq_tagset_busy_itr() and the transport routine to
2125 * terminate the exchanges.
2126 */
2127 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2128 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2129 nvme_fc_terminate_exchange, &ctrl->ctrl);
2130}
2131
2132/*
2133 * Called to teardown an association.
2134 * May be called with association fully in place or partially in place.
2135 */
2136static void
2137__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
2138{
2139 nvme_stop_keep_alive(&ctrl->ctrl);
2140
2141 /* stop and terminate ios on admin and io queues */
2142 nvme_fc_shutdown_ctrl(ctrl);
2143
2144 /*
2145 * tear down the controller
2146 * This will result in the last reference on the nvme ctrl to
2147 * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
2148 * From there, the transport will tear down it's logical queues and
2149 * association.
2150 */
2151 nvme_uninit_ctrl(&ctrl->ctrl);
2152
2153 nvme_put_ctrl(&ctrl->ctrl);
2154}
2155
2156static void
2157nvme_fc_del_ctrl_work(struct work_struct *work)
2158{
2159 struct nvme_fc_ctrl *ctrl =
2160 container_of(work, struct nvme_fc_ctrl, delete_work);
2161
2162 __nvme_fc_remove_ctrl(ctrl);
2163}
2164
2165static int
2166__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2167{
2168 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2169 return -EBUSY;
2170
2171 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2172 return -EBUSY;
2173
2174 return 0;
2175}
2176
2177/*
2178 * Request from nvme core layer to delete the controller
2179 */
2180static int
2181nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2182{
2183 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2184 struct nvme_fc_rport *rport = ctrl->rport;
2185 unsigned long flags;
2186 int ret;
2187
2188 spin_lock_irqsave(&rport->lock, flags);
2189 ret = __nvme_fc_del_ctrl(ctrl);
2190 spin_unlock_irqrestore(&rport->lock, flags);
2191 if (ret)
2192 return ret;
2193
2194 flush_work(&ctrl->delete_work);
2195
2196 return 0;
2197}
2198
2199static int
2200nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2201{
2202 return -EIO;
2203}
2204
2205static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2206 .name = "fc",
2207 .module = THIS_MODULE,
2208 .is_fabrics = true,
2209 .reg_read32 = nvmf_reg_read32,
2210 .reg_read64 = nvmf_reg_read64,
2211 .reg_write32 = nvmf_reg_write32,
2212 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2213 .free_ctrl = nvme_fc_free_nvme_ctrl,
2214 .submit_async_event = nvme_fc_submit_async_event,
2215 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2216 .get_subsysnqn = nvmf_get_subsysnqn,
2217 .get_address = nvmf_get_address,
2218};
2219
2220static int
2221nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2222{
2223 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2224 int ret;
2225
2226 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2227 if (ret) {
2228 dev_info(ctrl->ctrl.device,
2229 "set_queue_count failed: %d\n", ret);
2230 return ret;
2231 }
2232
2233 ctrl->queue_count = opts->nr_io_queues + 1;
2234 if (!opts->nr_io_queues)
2235 return 0;
2236
2237 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2238 opts->nr_io_queues);
2239
2240 nvme_fc_init_io_queues(ctrl);
2241
2242 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2243 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2244 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2245 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2246 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2247 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2248 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2249 (SG_CHUNK_SIZE *
2250 sizeof(struct scatterlist)) +
2251 ctrl->lport->ops->fcprqst_priv_sz;
2252 ctrl->tag_set.driver_data = ctrl;
2253 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2254 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2255
2256 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2257 if (ret)
2258 return ret;
2259
2260 ctrl->ctrl.tagset = &ctrl->tag_set;
2261
2262 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2263 if (IS_ERR(ctrl->ctrl.connect_q)) {
2264 ret = PTR_ERR(ctrl->ctrl.connect_q);
2265 goto out_free_tag_set;
2266 }
2267
2268 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2269 if (ret)
2270 goto out_cleanup_blk_queue;
2271
2272 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2273 if (ret)
2274 goto out_delete_hw_queues;
2275
2276 return 0;
2277
2278out_delete_hw_queues:
2279 nvme_fc_delete_hw_io_queues(ctrl);
2280out_cleanup_blk_queue:
2281 nvme_stop_keep_alive(&ctrl->ctrl);
2282 blk_cleanup_queue(ctrl->ctrl.connect_q);
2283out_free_tag_set:
2284 blk_mq_free_tag_set(&ctrl->tag_set);
2285 nvme_fc_free_io_queues(ctrl);
2286
2287 /* force put free routine to ignore io queues */
2288 ctrl->ctrl.tagset = NULL;
2289
2290 return ret;
2291}
2292
2293
2294static struct nvme_ctrl *
2295__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2296 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2297{
2298 struct nvme_fc_ctrl *ctrl;
2299 unsigned long flags;
2300 int ret, idx;
2301 bool changed;
2302
2303 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2304 if (!ctrl) {
2305 ret = -ENOMEM;
2306 goto out_fail;
2307 }
2308
2309 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2310 if (idx < 0) {
2311 ret = -ENOSPC;
2312 goto out_free_ctrl;
2313 }
2314
2315 ctrl->ctrl.opts = opts;
2316 INIT_LIST_HEAD(&ctrl->ctrl_list);
James Smarte3994412016-12-02 00:28:42 -08002317 ctrl->lport = lport;
2318 ctrl->rport = rport;
2319 ctrl->dev = lport->dev;
2320 ctrl->state = FCCTRL_INIT;
2321 ctrl->cnum = idx;
2322
2323 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2324 if (ret)
2325 goto out_free_ida;
2326
2327 get_device(ctrl->dev);
2328 kref_init(&ctrl->ref);
2329
2330 INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
2331 spin_lock_init(&ctrl->lock);
2332
2333 /* io queue count */
2334 ctrl->queue_count = min_t(unsigned int,
2335 opts->nr_io_queues,
2336 lport->ops->max_hw_queues);
2337 opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2338 ctrl->queue_count++; /* +1 for admin queue */
2339
2340 ctrl->ctrl.sqsize = opts->queue_size - 1;
2341 ctrl->ctrl.kato = opts->kato;
2342
2343 ret = -ENOMEM;
2344 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2345 GFP_KERNEL);
2346 if (!ctrl->queues)
2347 goto out_uninit_ctrl;
2348
2349 ret = nvme_fc_configure_admin_queue(ctrl);
2350 if (ret)
2351 goto out_uninit_ctrl;
2352
2353 /* sanity checks */
2354
James Smarte3994412016-12-02 00:28:42 -08002355 /* FC-NVME does not have other data in the capsule */
2356 if (ctrl->ctrl.icdoff) {
2357 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2358 ctrl->ctrl.icdoff);
2359 goto out_remove_admin_queue;
2360 }
2361
2362 /* FC-NVME supports normal SGL Data Block Descriptors */
2363
2364 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2365 /* warn if maxcmd is lower than queue_size */
2366 dev_warn(ctrl->ctrl.device,
2367 "queue_size %zu > ctrl maxcmd %u, reducing "
2368 "to queue_size\n",
2369 opts->queue_size, ctrl->ctrl.maxcmd);
2370 opts->queue_size = ctrl->ctrl.maxcmd;
2371 }
2372
2373 ret = nvme_fc_init_aen_ops(ctrl);
2374 if (ret)
2375 goto out_exit_aen_ops;
2376
2377 if (ctrl->queue_count > 1) {
2378 ret = nvme_fc_create_io_queues(ctrl);
2379 if (ret)
2380 goto out_exit_aen_ops;
2381 }
2382
2383 spin_lock_irqsave(&ctrl->lock, flags);
2384 ctrl->state = FCCTRL_ACTIVE;
2385 spin_unlock_irqrestore(&ctrl->lock, flags);
2386
2387 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2388 WARN_ON_ONCE(!changed);
2389
2390 dev_info(ctrl->ctrl.device,
James Smartc7034892016-12-20 11:06:08 -08002391 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2392 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
James Smarte3994412016-12-02 00:28:42 -08002393
2394 kref_get(&ctrl->ctrl.kref);
2395
2396 spin_lock_irqsave(&rport->lock, flags);
2397 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2398 spin_unlock_irqrestore(&rport->lock, flags);
2399
2400 if (opts->nr_io_queues) {
2401 nvme_queue_scan(&ctrl->ctrl);
2402 nvme_queue_async_events(&ctrl->ctrl);
2403 }
2404
2405 return &ctrl->ctrl;
2406
2407out_exit_aen_ops:
2408 nvme_fc_exit_aen_ops(ctrl);
2409out_remove_admin_queue:
2410 /* send a Disconnect(association) LS to fc-nvme target */
2411 nvme_fc_xmt_disconnect_assoc(ctrl);
2412 nvme_stop_keep_alive(&ctrl->ctrl);
2413 nvme_fc_destroy_admin_queue(ctrl);
2414out_uninit_ctrl:
2415 nvme_uninit_ctrl(&ctrl->ctrl);
2416 nvme_put_ctrl(&ctrl->ctrl);
2417 if (ret > 0)
2418 ret = -EIO;
2419 /* exit via here will follow ctlr ref point callbacks to free */
2420 return ERR_PTR(ret);
2421
2422out_free_ida:
2423 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2424out_free_ctrl:
2425 kfree(ctrl);
2426out_fail:
2427 nvme_fc_rport_put(rport);
2428 /* exit via here doesn't follow ctlr ref points */
2429 return ERR_PTR(ret);
2430}
2431
2432enum {
2433 FCT_TRADDR_ERR = 0,
2434 FCT_TRADDR_WWNN = 1 << 0,
2435 FCT_TRADDR_WWPN = 1 << 1,
2436};
2437
2438struct nvmet_fc_traddr {
2439 u64 nn;
2440 u64 pn;
2441};
2442
2443static const match_table_t traddr_opt_tokens = {
2444 { FCT_TRADDR_WWNN, "nn-%s" },
2445 { FCT_TRADDR_WWPN, "pn-%s" },
2446 { FCT_TRADDR_ERR, NULL }
2447};
2448
2449static int
2450nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2451{
2452 substring_t args[MAX_OPT_ARGS];
2453 char *options, *o, *p;
2454 int token, ret = 0;
2455 u64 token64;
2456
2457 options = o = kstrdup(buf, GFP_KERNEL);
2458 if (!options)
2459 return -ENOMEM;
2460
2461 while ((p = strsep(&o, ":\n")) != NULL) {
2462 if (!*p)
2463 continue;
2464
2465 token = match_token(p, traddr_opt_tokens, args);
2466 switch (token) {
2467 case FCT_TRADDR_WWNN:
2468 if (match_u64(args, &token64)) {
2469 ret = -EINVAL;
2470 goto out;
2471 }
2472 traddr->nn = token64;
2473 break;
2474 case FCT_TRADDR_WWPN:
2475 if (match_u64(args, &token64)) {
2476 ret = -EINVAL;
2477 goto out;
2478 }
2479 traddr->pn = token64;
2480 break;
2481 default:
2482 pr_warn("unknown traddr token or missing value '%s'\n",
2483 p);
2484 ret = -EINVAL;
2485 goto out;
2486 }
2487 }
2488
2489out:
2490 kfree(options);
2491 return ret;
2492}
2493
2494static struct nvme_ctrl *
2495nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2496{
2497 struct nvme_fc_lport *lport;
2498 struct nvme_fc_rport *rport;
2499 struct nvmet_fc_traddr laddr = { 0L, 0L };
2500 struct nvmet_fc_traddr raddr = { 0L, 0L };
2501 unsigned long flags;
2502 int ret;
2503
2504 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2505 if (ret || !raddr.nn || !raddr.pn)
2506 return ERR_PTR(-EINVAL);
2507
2508 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2509 if (ret || !laddr.nn || !laddr.pn)
2510 return ERR_PTR(-EINVAL);
2511
2512 /* find the host and remote ports to connect together */
2513 spin_lock_irqsave(&nvme_fc_lock, flags);
2514 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2515 if (lport->localport.node_name != laddr.nn ||
2516 lport->localport.port_name != laddr.pn)
2517 continue;
2518
2519 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2520 if (rport->remoteport.node_name != raddr.nn ||
2521 rport->remoteport.port_name != raddr.pn)
2522 continue;
2523
2524 /* if fail to get reference fall through. Will error */
2525 if (!nvme_fc_rport_get(rport))
2526 break;
2527
2528 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2529
2530 return __nvme_fc_create_ctrl(dev, opts, lport, rport);
2531 }
2532 }
2533 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2534
2535 return ERR_PTR(-ENOENT);
2536}
2537
2538
2539static struct nvmf_transport_ops nvme_fc_transport = {
2540 .name = "fc",
2541 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2542 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2543 .create_ctrl = nvme_fc_create_ctrl,
2544};
2545
2546static int __init nvme_fc_init_module(void)
2547{
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002548 int ret;
2549
James Smarte3994412016-12-02 00:28:42 -08002550 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2551 if (!nvme_fc_wq)
2552 return -ENOMEM;
2553
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002554 ret = nvmf_register_transport(&nvme_fc_transport);
2555 if (ret)
2556 goto err;
2557
2558 return 0;
2559err:
2560 destroy_workqueue(nvme_fc_wq);
2561 return ret;
James Smarte3994412016-12-02 00:28:42 -08002562}
2563
2564static void __exit nvme_fc_exit_module(void)
2565{
2566 /* sanity check - all lports should be removed */
2567 if (!list_empty(&nvme_fc_lport_list))
2568 pr_warn("%s: localport list not empty\n", __func__);
2569
2570 nvmf_unregister_transport(&nvme_fc_transport);
2571
2572 destroy_workqueue(nvme_fc_wq);
2573
2574 ida_destroy(&nvme_fc_local_port_cnt);
2575 ida_destroy(&nvme_fc_ctrl_cnt);
2576}
2577
2578module_init(nvme_fc_init_module);
2579module_exit(nvme_fc_exit_module);
2580
2581MODULE_LICENSE("GPL v2");