blob: 596b3a453b545173b27eb4e899556b2e7aaef915 [file] [log] [blame]
James Smarte3994412016-12-02 00:28:42 -08001/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
22
23#include "nvme.h"
24#include "fabrics.h"
25#include <linux/nvme-fc-driver.h>
26#include <linux/nvme-fc.h>
27
28
29/* *************************** Data Structures/Defines ****************** */
30
31
32/*
33 * We handle AEN commands ourselves and don't even let the
34 * block layer know about them.
35 */
36#define NVME_FC_NR_AEN_COMMANDS 1
37#define NVME_FC_AQ_BLKMQ_DEPTH \
38 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
39#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
40
41enum nvme_fc_queue_flags {
42 NVME_FC_Q_CONNECTED = (1 << 0),
43};
44
45#define NVMEFC_QUEUE_DELAY 3 /* ms units */
46
47struct nvme_fc_queue {
48 struct nvme_fc_ctrl *ctrl;
49 struct device *dev;
50 struct blk_mq_hw_ctx *hctx;
51 void *lldd_handle;
52 int queue_size;
53 size_t cmnd_capsule_len;
54 u32 qnum;
55 u32 rqcnt;
56 u32 seqno;
57
58 u64 connection_id;
59 atomic_t csn;
60
61 unsigned long flags;
62} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
63
James Smart8d64daf2017-04-11 11:35:09 -070064enum nvme_fcop_flags {
65 FCOP_FLAGS_TERMIO = (1 << 0),
66 FCOP_FLAGS_RELEASED = (1 << 1),
67 FCOP_FLAGS_COMPLETE = (1 << 2),
68};
69
James Smarte3994412016-12-02 00:28:42 -080070struct nvmefc_ls_req_op {
71 struct nvmefc_ls_req ls_req;
72
James Smartc913a8b2017-04-11 11:35:08 -070073 struct nvme_fc_rport *rport;
James Smarte3994412016-12-02 00:28:42 -080074 struct nvme_fc_queue *queue;
75 struct request *rq;
James Smart8d64daf2017-04-11 11:35:09 -070076 u32 flags;
James Smarte3994412016-12-02 00:28:42 -080077
78 int ls_error;
79 struct completion ls_done;
James Smartc913a8b2017-04-11 11:35:08 -070080 struct list_head lsreq_list; /* rport->ls_req_list */
James Smarte3994412016-12-02 00:28:42 -080081 bool req_queued;
82};
83
84enum nvme_fcpop_state {
85 FCPOP_STATE_UNINIT = 0,
86 FCPOP_STATE_IDLE = 1,
87 FCPOP_STATE_ACTIVE = 2,
88 FCPOP_STATE_ABORTED = 3,
89};
90
91struct nvme_fc_fcp_op {
92 struct nvme_request nreq; /*
93 * nvme/host/core.c
94 * requires this to be
95 * the 1st element in the
96 * private structure
97 * associated with the
98 * request.
99 */
100 struct nvmefc_fcp_req fcp_req;
101
102 struct nvme_fc_ctrl *ctrl;
103 struct nvme_fc_queue *queue;
104 struct request *rq;
105
106 atomic_t state;
107 u32 rqno;
108 u32 nents;
109
110 struct nvme_fc_cmd_iu cmd_iu;
111 struct nvme_fc_ersp_iu rsp_iu;
112};
113
114struct nvme_fc_lport {
115 struct nvme_fc_local_port localport;
116
117 struct ida endp_cnt;
118 struct list_head port_list; /* nvme_fc_port_list */
119 struct list_head endp_list;
120 struct device *dev; /* physical device for dma */
121 struct nvme_fc_port_template *ops;
122 struct kref ref;
123} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
124
125struct nvme_fc_rport {
126 struct nvme_fc_remote_port remoteport;
127
128 struct list_head endp_list; /* for lport->endp_list */
129 struct list_head ctrl_list;
James Smartc913a8b2017-04-11 11:35:08 -0700130 struct list_head ls_req_list;
131 struct device *dev; /* physical device for dma */
132 struct nvme_fc_lport *lport;
James Smarte3994412016-12-02 00:28:42 -0800133 spinlock_t lock;
134 struct kref ref;
135} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
136
137enum nvme_fcctrl_state {
138 FCCTRL_INIT = 0,
139 FCCTRL_ACTIVE = 1,
140};
141
142struct nvme_fc_ctrl {
143 spinlock_t lock;
144 struct nvme_fc_queue *queues;
145 u32 queue_count;
146
147 struct device *dev;
148 struct nvme_fc_lport *lport;
149 struct nvme_fc_rport *rport;
150 u32 cnum;
151
152 u64 association_id;
153
154 u64 cap;
155
156 struct list_head ctrl_list; /* rport->ctrl_list */
James Smarte3994412016-12-02 00:28:42 -0800157
158 struct blk_mq_tag_set admin_tag_set;
159 struct blk_mq_tag_set tag_set;
160
161 struct work_struct delete_work;
162 struct kref ref;
163 int state;
164
165 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
166
167 struct nvme_ctrl ctrl;
168};
169
170static inline struct nvme_fc_ctrl *
171to_fc_ctrl(struct nvme_ctrl *ctrl)
172{
173 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
174}
175
176static inline struct nvme_fc_lport *
177localport_to_lport(struct nvme_fc_local_port *portptr)
178{
179 return container_of(portptr, struct nvme_fc_lport, localport);
180}
181
182static inline struct nvme_fc_rport *
183remoteport_to_rport(struct nvme_fc_remote_port *portptr)
184{
185 return container_of(portptr, struct nvme_fc_rport, remoteport);
186}
187
188static inline struct nvmefc_ls_req_op *
189ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
190{
191 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
192}
193
194static inline struct nvme_fc_fcp_op *
195fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
196{
197 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
198}
199
200
201
202/* *************************** Globals **************************** */
203
204
205static DEFINE_SPINLOCK(nvme_fc_lock);
206
207static LIST_HEAD(nvme_fc_lport_list);
208static DEFINE_IDA(nvme_fc_local_port_cnt);
209static DEFINE_IDA(nvme_fc_ctrl_cnt);
210
211static struct workqueue_struct *nvme_fc_wq;
212
213
214
215/* *********************** FC-NVME Port Management ************************ */
216
217static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
218static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
219 struct nvme_fc_queue *, unsigned int);
220
221
222/**
223 * nvme_fc_register_localport - transport entry point called by an
224 * LLDD to register the existence of a NVME
225 * host FC port.
226 * @pinfo: pointer to information about the port to be registered
227 * @template: LLDD entrypoints and operational parameters for the port
228 * @dev: physical hardware device node port corresponds to. Will be
229 * used for DMA mappings
230 * @lport_p: pointer to a local port pointer. Upon success, the routine
231 * will allocate a nvme_fc_local_port structure and place its
232 * address in the local port pointer. Upon failure, local port
233 * pointer will be set to 0.
234 *
235 * Returns:
236 * a completion status. Must be 0 upon success; a negative errno
237 * (ex: -ENXIO) upon failure.
238 */
239int
240nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
241 struct nvme_fc_port_template *template,
242 struct device *dev,
243 struct nvme_fc_local_port **portptr)
244{
245 struct nvme_fc_lport *newrec;
246 unsigned long flags;
247 int ret, idx;
248
249 if (!template->localport_delete || !template->remoteport_delete ||
250 !template->ls_req || !template->fcp_io ||
251 !template->ls_abort || !template->fcp_abort ||
252 !template->max_hw_queues || !template->max_sgl_segments ||
253 !template->max_dif_sgl_segments || !template->dma_boundary) {
254 ret = -EINVAL;
255 goto out_reghost_failed;
256 }
257
258 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
259 GFP_KERNEL);
260 if (!newrec) {
261 ret = -ENOMEM;
262 goto out_reghost_failed;
263 }
264
265 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
266 if (idx < 0) {
267 ret = -ENOSPC;
268 goto out_fail_kfree;
269 }
270
271 if (!get_device(dev) && dev) {
272 ret = -ENODEV;
273 goto out_ida_put;
274 }
275
276 INIT_LIST_HEAD(&newrec->port_list);
277 INIT_LIST_HEAD(&newrec->endp_list);
278 kref_init(&newrec->ref);
279 newrec->ops = template;
280 newrec->dev = dev;
281 ida_init(&newrec->endp_cnt);
282 newrec->localport.private = &newrec[1];
283 newrec->localport.node_name = pinfo->node_name;
284 newrec->localport.port_name = pinfo->port_name;
285 newrec->localport.port_role = pinfo->port_role;
286 newrec->localport.port_id = pinfo->port_id;
287 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
288 newrec->localport.port_num = idx;
289
290 spin_lock_irqsave(&nvme_fc_lock, flags);
291 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
292 spin_unlock_irqrestore(&nvme_fc_lock, flags);
293
294 if (dev)
295 dma_set_seg_boundary(dev, template->dma_boundary);
296
297 *portptr = &newrec->localport;
298 return 0;
299
300out_ida_put:
301 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
302out_fail_kfree:
303 kfree(newrec);
304out_reghost_failed:
305 *portptr = NULL;
306
307 return ret;
308}
309EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
310
311static void
312nvme_fc_free_lport(struct kref *ref)
313{
314 struct nvme_fc_lport *lport =
315 container_of(ref, struct nvme_fc_lport, ref);
316 unsigned long flags;
317
318 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
319 WARN_ON(!list_empty(&lport->endp_list));
320
321 /* remove from transport list */
322 spin_lock_irqsave(&nvme_fc_lock, flags);
323 list_del(&lport->port_list);
324 spin_unlock_irqrestore(&nvme_fc_lock, flags);
325
326 /* let the LLDD know we've finished tearing it down */
327 lport->ops->localport_delete(&lport->localport);
328
329 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
330 ida_destroy(&lport->endp_cnt);
331
332 put_device(lport->dev);
333
334 kfree(lport);
335}
336
337static void
338nvme_fc_lport_put(struct nvme_fc_lport *lport)
339{
340 kref_put(&lport->ref, nvme_fc_free_lport);
341}
342
343static int
344nvme_fc_lport_get(struct nvme_fc_lport *lport)
345{
346 return kref_get_unless_zero(&lport->ref);
347}
348
349/**
350 * nvme_fc_unregister_localport - transport entry point called by an
351 * LLDD to deregister/remove a previously
352 * registered a NVME host FC port.
353 * @localport: pointer to the (registered) local port that is to be
354 * deregistered.
355 *
356 * Returns:
357 * a completion status. Must be 0 upon success; a negative errno
358 * (ex: -ENXIO) upon failure.
359 */
360int
361nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
362{
363 struct nvme_fc_lport *lport = localport_to_lport(portptr);
364 unsigned long flags;
365
366 if (!portptr)
367 return -EINVAL;
368
369 spin_lock_irqsave(&nvme_fc_lock, flags);
370
371 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
372 spin_unlock_irqrestore(&nvme_fc_lock, flags);
373 return -EINVAL;
374 }
375 portptr->port_state = FC_OBJSTATE_DELETED;
376
377 spin_unlock_irqrestore(&nvme_fc_lock, flags);
378
379 nvme_fc_lport_put(lport);
380
381 return 0;
382}
383EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
384
385/**
386 * nvme_fc_register_remoteport - transport entry point called by an
387 * LLDD to register the existence of a NVME
388 * subsystem FC port on its fabric.
389 * @localport: pointer to the (registered) local port that the remote
390 * subsystem port is connected to.
391 * @pinfo: pointer to information about the port to be registered
392 * @rport_p: pointer to a remote port pointer. Upon success, the routine
393 * will allocate a nvme_fc_remote_port structure and place its
394 * address in the remote port pointer. Upon failure, remote port
395 * pointer will be set to 0.
396 *
397 * Returns:
398 * a completion status. Must be 0 upon success; a negative errno
399 * (ex: -ENXIO) upon failure.
400 */
401int
402nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
403 struct nvme_fc_port_info *pinfo,
404 struct nvme_fc_remote_port **portptr)
405{
406 struct nvme_fc_lport *lport = localport_to_lport(localport);
407 struct nvme_fc_rport *newrec;
408 unsigned long flags;
409 int ret, idx;
410
411 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
412 GFP_KERNEL);
413 if (!newrec) {
414 ret = -ENOMEM;
415 goto out_reghost_failed;
416 }
417
418 if (!nvme_fc_lport_get(lport)) {
419 ret = -ESHUTDOWN;
420 goto out_kfree_rport;
421 }
422
423 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
424 if (idx < 0) {
425 ret = -ENOSPC;
426 goto out_lport_put;
427 }
428
429 INIT_LIST_HEAD(&newrec->endp_list);
430 INIT_LIST_HEAD(&newrec->ctrl_list);
James Smartc913a8b2017-04-11 11:35:08 -0700431 INIT_LIST_HEAD(&newrec->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800432 kref_init(&newrec->ref);
433 spin_lock_init(&newrec->lock);
434 newrec->remoteport.localport = &lport->localport;
James Smartc913a8b2017-04-11 11:35:08 -0700435 newrec->dev = lport->dev;
436 newrec->lport = lport;
James Smarte3994412016-12-02 00:28:42 -0800437 newrec->remoteport.private = &newrec[1];
438 newrec->remoteport.port_role = pinfo->port_role;
439 newrec->remoteport.node_name = pinfo->node_name;
440 newrec->remoteport.port_name = pinfo->port_name;
441 newrec->remoteport.port_id = pinfo->port_id;
442 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
443 newrec->remoteport.port_num = idx;
444
445 spin_lock_irqsave(&nvme_fc_lock, flags);
446 list_add_tail(&newrec->endp_list, &lport->endp_list);
447 spin_unlock_irqrestore(&nvme_fc_lock, flags);
448
449 *portptr = &newrec->remoteport;
450 return 0;
451
452out_lport_put:
453 nvme_fc_lport_put(lport);
454out_kfree_rport:
455 kfree(newrec);
456out_reghost_failed:
457 *portptr = NULL;
458 return ret;
James Smarte3994412016-12-02 00:28:42 -0800459}
460EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
461
462static void
463nvme_fc_free_rport(struct kref *ref)
464{
465 struct nvme_fc_rport *rport =
466 container_of(ref, struct nvme_fc_rport, ref);
467 struct nvme_fc_lport *lport =
468 localport_to_lport(rport->remoteport.localport);
469 unsigned long flags;
470
471 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
472 WARN_ON(!list_empty(&rport->ctrl_list));
473
474 /* remove from lport list */
475 spin_lock_irqsave(&nvme_fc_lock, flags);
476 list_del(&rport->endp_list);
477 spin_unlock_irqrestore(&nvme_fc_lock, flags);
478
479 /* let the LLDD know we've finished tearing it down */
480 lport->ops->remoteport_delete(&rport->remoteport);
481
482 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
483
484 kfree(rport);
485
486 nvme_fc_lport_put(lport);
487}
488
489static void
490nvme_fc_rport_put(struct nvme_fc_rport *rport)
491{
492 kref_put(&rport->ref, nvme_fc_free_rport);
493}
494
495static int
496nvme_fc_rport_get(struct nvme_fc_rport *rport)
497{
498 return kref_get_unless_zero(&rport->ref);
499}
500
James Smart8d64daf2017-04-11 11:35:09 -0700501static int
502nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
503{
504 struct nvmefc_ls_req_op *lsop;
505 unsigned long flags;
506
507restart:
508 spin_lock_irqsave(&rport->lock, flags);
509
510 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
511 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
512 lsop->flags |= FCOP_FLAGS_TERMIO;
513 spin_unlock_irqrestore(&rport->lock, flags);
514 rport->lport->ops->ls_abort(&rport->lport->localport,
515 &rport->remoteport,
516 &lsop->ls_req);
517 goto restart;
518 }
519 }
520 spin_unlock_irqrestore(&rport->lock, flags);
521
522 return 0;
523}
524
James Smarte3994412016-12-02 00:28:42 -0800525/**
526 * nvme_fc_unregister_remoteport - transport entry point called by an
527 * LLDD to deregister/remove a previously
528 * registered a NVME subsystem FC port.
529 * @remoteport: pointer to the (registered) remote port that is to be
530 * deregistered.
531 *
532 * Returns:
533 * a completion status. Must be 0 upon success; a negative errno
534 * (ex: -ENXIO) upon failure.
535 */
536int
537nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
538{
539 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
540 struct nvme_fc_ctrl *ctrl;
541 unsigned long flags;
542
543 if (!portptr)
544 return -EINVAL;
545
546 spin_lock_irqsave(&rport->lock, flags);
547
548 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
549 spin_unlock_irqrestore(&rport->lock, flags);
550 return -EINVAL;
551 }
552 portptr->port_state = FC_OBJSTATE_DELETED;
553
554 /* tear down all associations to the remote port */
555 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
556 __nvme_fc_del_ctrl(ctrl);
557
558 spin_unlock_irqrestore(&rport->lock, flags);
559
James Smart8d64daf2017-04-11 11:35:09 -0700560 nvme_fc_abort_lsops(rport);
561
James Smarte3994412016-12-02 00:28:42 -0800562 nvme_fc_rport_put(rport);
563 return 0;
564}
565EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
566
567
568/* *********************** FC-NVME DMA Handling **************************** */
569
570/*
571 * The fcloop device passes in a NULL device pointer. Real LLD's will
572 * pass in a valid device pointer. If NULL is passed to the dma mapping
573 * routines, depending on the platform, it may or may not succeed, and
574 * may crash.
575 *
576 * As such:
577 * Wrapper all the dma routines and check the dev pointer.
578 *
579 * If simple mappings (return just a dma address, we'll noop them,
580 * returning a dma address of 0.
581 *
582 * On more complex mappings (dma_map_sg), a pseudo routine fills
583 * in the scatter list, setting all dma addresses to 0.
584 */
585
586static inline dma_addr_t
587fc_dma_map_single(struct device *dev, void *ptr, size_t size,
588 enum dma_data_direction dir)
589{
590 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
591}
592
593static inline int
594fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
595{
596 return dev ? dma_mapping_error(dev, dma_addr) : 0;
597}
598
599static inline void
600fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
601 enum dma_data_direction dir)
602{
603 if (dev)
604 dma_unmap_single(dev, addr, size, dir);
605}
606
607static inline void
608fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
609 enum dma_data_direction dir)
610{
611 if (dev)
612 dma_sync_single_for_cpu(dev, addr, size, dir);
613}
614
615static inline void
616fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
617 enum dma_data_direction dir)
618{
619 if (dev)
620 dma_sync_single_for_device(dev, addr, size, dir);
621}
622
623/* pseudo dma_map_sg call */
624static int
625fc_map_sg(struct scatterlist *sg, int nents)
626{
627 struct scatterlist *s;
628 int i;
629
630 WARN_ON(nents == 0 || sg[0].length == 0);
631
632 for_each_sg(sg, s, nents, i) {
633 s->dma_address = 0L;
634#ifdef CONFIG_NEED_SG_DMA_LENGTH
635 s->dma_length = s->length;
636#endif
637 }
638 return nents;
639}
640
641static inline int
642fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
643 enum dma_data_direction dir)
644{
645 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
646}
647
648static inline void
649fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
650 enum dma_data_direction dir)
651{
652 if (dev)
653 dma_unmap_sg(dev, sg, nents, dir);
654}
655
656
657/* *********************** FC-NVME LS Handling **************************** */
658
659static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
660static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
661
662
663static void
James Smartc913a8b2017-04-11 11:35:08 -0700664__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800665{
James Smartc913a8b2017-04-11 11:35:08 -0700666 struct nvme_fc_rport *rport = lsop->rport;
James Smarte3994412016-12-02 00:28:42 -0800667 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
668 unsigned long flags;
669
James Smartc913a8b2017-04-11 11:35:08 -0700670 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800671
672 if (!lsop->req_queued) {
James Smartc913a8b2017-04-11 11:35:08 -0700673 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800674 return;
675 }
676
677 list_del(&lsop->lsreq_list);
678
679 lsop->req_queued = false;
680
James Smartc913a8b2017-04-11 11:35:08 -0700681 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800682
James Smartc913a8b2017-04-11 11:35:08 -0700683 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
James Smarte3994412016-12-02 00:28:42 -0800684 (lsreq->rqstlen + lsreq->rsplen),
685 DMA_BIDIRECTIONAL);
686
James Smartc913a8b2017-04-11 11:35:08 -0700687 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800688}
689
690static int
James Smartc913a8b2017-04-11 11:35:08 -0700691__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800692 struct nvmefc_ls_req_op *lsop,
693 void (*done)(struct nvmefc_ls_req *req, int status))
694{
695 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
696 unsigned long flags;
James Smartc913a8b2017-04-11 11:35:08 -0700697 int ret = 0;
James Smarte3994412016-12-02 00:28:42 -0800698
James Smartc913a8b2017-04-11 11:35:08 -0700699 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
700 return -ECONNREFUSED;
701
702 if (!nvme_fc_rport_get(rport))
James Smarte3994412016-12-02 00:28:42 -0800703 return -ESHUTDOWN;
704
705 lsreq->done = done;
James Smartc913a8b2017-04-11 11:35:08 -0700706 lsop->rport = rport;
James Smarte3994412016-12-02 00:28:42 -0800707 lsop->req_queued = false;
708 INIT_LIST_HEAD(&lsop->lsreq_list);
709 init_completion(&lsop->ls_done);
710
James Smartc913a8b2017-04-11 11:35:08 -0700711 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
James Smarte3994412016-12-02 00:28:42 -0800712 lsreq->rqstlen + lsreq->rsplen,
713 DMA_BIDIRECTIONAL);
James Smartc913a8b2017-04-11 11:35:08 -0700714 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
715 ret = -EFAULT;
716 goto out_putrport;
James Smarte3994412016-12-02 00:28:42 -0800717 }
718 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
719
James Smartc913a8b2017-04-11 11:35:08 -0700720 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800721
James Smartc913a8b2017-04-11 11:35:08 -0700722 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800723
724 lsop->req_queued = true;
725
James Smartc913a8b2017-04-11 11:35:08 -0700726 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800727
James Smartc913a8b2017-04-11 11:35:08 -0700728 ret = rport->lport->ops->ls_req(&rport->lport->localport,
729 &rport->remoteport, lsreq);
James Smarte3994412016-12-02 00:28:42 -0800730 if (ret)
James Smartc913a8b2017-04-11 11:35:08 -0700731 goto out_unlink;
732
733 return 0;
734
735out_unlink:
736 lsop->ls_error = ret;
737 spin_lock_irqsave(&rport->lock, flags);
738 lsop->req_queued = false;
739 list_del(&lsop->lsreq_list);
740 spin_unlock_irqrestore(&rport->lock, flags);
741 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
742 (lsreq->rqstlen + lsreq->rsplen),
743 DMA_BIDIRECTIONAL);
744out_putrport:
745 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800746
747 return ret;
748}
749
750static void
751nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
752{
753 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
754
755 lsop->ls_error = status;
756 complete(&lsop->ls_done);
757}
758
759static int
James Smartc913a8b2017-04-11 11:35:08 -0700760nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800761{
762 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
763 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
764 int ret;
765
James Smartc913a8b2017-04-11 11:35:08 -0700766 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
James Smarte3994412016-12-02 00:28:42 -0800767
James Smartc913a8b2017-04-11 11:35:08 -0700768 if (!ret) {
James Smarte3994412016-12-02 00:28:42 -0800769 /*
770 * No timeout/not interruptible as we need the struct
771 * to exist until the lldd calls us back. Thus mandate
772 * wait until driver calls back. lldd responsible for
773 * the timeout action
774 */
775 wait_for_completion(&lsop->ls_done);
776
James Smartc913a8b2017-04-11 11:35:08 -0700777 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -0800778
James Smartc913a8b2017-04-11 11:35:08 -0700779 ret = lsop->ls_error;
James Smarte3994412016-12-02 00:28:42 -0800780 }
781
James Smartc913a8b2017-04-11 11:35:08 -0700782 if (ret)
783 return ret;
784
James Smarte3994412016-12-02 00:28:42 -0800785 /* ACC or RJT payload ? */
786 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
787 return -ENXIO;
788
789 return 0;
790}
791
James Smartc913a8b2017-04-11 11:35:08 -0700792static int
793nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800794 struct nvmefc_ls_req_op *lsop,
795 void (*done)(struct nvmefc_ls_req *req, int status))
796{
James Smarte3994412016-12-02 00:28:42 -0800797 /* don't wait for completion */
798
James Smartc913a8b2017-04-11 11:35:08 -0700799 return __nvme_fc_send_ls_req(rport, lsop, done);
James Smarte3994412016-12-02 00:28:42 -0800800}
801
802/* Validation Error indexes into the string table below */
803enum {
804 VERR_NO_ERROR = 0,
805 VERR_LSACC = 1,
806 VERR_LSDESC_RQST = 2,
807 VERR_LSDESC_RQST_LEN = 3,
808 VERR_ASSOC_ID = 4,
809 VERR_ASSOC_ID_LEN = 5,
810 VERR_CONN_ID = 6,
811 VERR_CONN_ID_LEN = 7,
812 VERR_CR_ASSOC = 8,
813 VERR_CR_ASSOC_ACC_LEN = 9,
814 VERR_CR_CONN = 10,
815 VERR_CR_CONN_ACC_LEN = 11,
816 VERR_DISCONN = 12,
817 VERR_DISCONN_ACC_LEN = 13,
818};
819
820static char *validation_errors[] = {
821 "OK",
822 "Not LS_ACC",
823 "Not LSDESC_RQST",
824 "Bad LSDESC_RQST Length",
825 "Not Association ID",
826 "Bad Association ID Length",
827 "Not Connection ID",
828 "Bad Connection ID Length",
829 "Not CR_ASSOC Rqst",
830 "Bad CR_ASSOC ACC Length",
831 "Not CR_CONN Rqst",
832 "Bad CR_CONN ACC Length",
833 "Not Disconnect Rqst",
834 "Bad Disconnect ACC Length",
835};
836
837static int
838nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
839 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
840{
841 struct nvmefc_ls_req_op *lsop;
842 struct nvmefc_ls_req *lsreq;
843 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
844 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
845 int ret, fcret = 0;
846
847 lsop = kzalloc((sizeof(*lsop) +
848 ctrl->lport->ops->lsrqst_priv_sz +
849 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
850 if (!lsop) {
851 ret = -ENOMEM;
852 goto out_no_memory;
853 }
854 lsreq = &lsop->ls_req;
855
856 lsreq->private = (void *)&lsop[1];
857 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
858 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
859 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
860
861 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
862 assoc_rqst->desc_list_len =
863 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
864
865 assoc_rqst->assoc_cmd.desc_tag =
866 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
867 assoc_rqst->assoc_cmd.desc_len =
868 fcnvme_lsdesc_len(
869 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
870
871 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
872 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
873 /* Linux supports only Dynamic controllers */
874 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
875 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
876 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
877 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
878 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
879 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
880 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
881
882 lsop->queue = queue;
883 lsreq->rqstaddr = assoc_rqst;
884 lsreq->rqstlen = sizeof(*assoc_rqst);
885 lsreq->rspaddr = assoc_acc;
886 lsreq->rsplen = sizeof(*assoc_acc);
887 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
888
James Smartc913a8b2017-04-11 11:35:08 -0700889 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -0800890 if (ret)
891 goto out_free_buffer;
892
893 /* process connect LS completion */
894
895 /* validate the ACC response */
896 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
897 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -0700898 else if (assoc_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -0800899 fcnvme_lsdesc_len(
900 sizeof(struct fcnvme_ls_cr_assoc_acc)))
901 fcret = VERR_CR_ASSOC_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -0700902 else if (assoc_acc->hdr.rqst.desc_tag !=
903 cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -0800904 fcret = VERR_LSDESC_RQST;
905 else if (assoc_acc->hdr.rqst.desc_len !=
906 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
907 fcret = VERR_LSDESC_RQST_LEN;
908 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
909 fcret = VERR_CR_ASSOC;
910 else if (assoc_acc->associd.desc_tag !=
911 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
912 fcret = VERR_ASSOC_ID;
913 else if (assoc_acc->associd.desc_len !=
914 fcnvme_lsdesc_len(
915 sizeof(struct fcnvme_lsdesc_assoc_id)))
916 fcret = VERR_ASSOC_ID_LEN;
917 else if (assoc_acc->connectid.desc_tag !=
918 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
919 fcret = VERR_CONN_ID;
920 else if (assoc_acc->connectid.desc_len !=
921 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
922 fcret = VERR_CONN_ID_LEN;
923
924 if (fcret) {
925 ret = -EBADF;
926 dev_err(ctrl->dev,
927 "q %d connect failed: %s\n",
928 queue->qnum, validation_errors[fcret]);
929 } else {
930 ctrl->association_id =
931 be64_to_cpu(assoc_acc->associd.association_id);
932 queue->connection_id =
933 be64_to_cpu(assoc_acc->connectid.connection_id);
934 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
935 }
936
937out_free_buffer:
938 kfree(lsop);
939out_no_memory:
940 if (ret)
941 dev_err(ctrl->dev,
942 "queue %d connect admin queue failed (%d).\n",
943 queue->qnum, ret);
944 return ret;
945}
946
947static int
948nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
949 u16 qsize, u16 ersp_ratio)
950{
951 struct nvmefc_ls_req_op *lsop;
952 struct nvmefc_ls_req *lsreq;
953 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
954 struct fcnvme_ls_cr_conn_acc *conn_acc;
955 int ret, fcret = 0;
956
957 lsop = kzalloc((sizeof(*lsop) +
958 ctrl->lport->ops->lsrqst_priv_sz +
959 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
960 if (!lsop) {
961 ret = -ENOMEM;
962 goto out_no_memory;
963 }
964 lsreq = &lsop->ls_req;
965
966 lsreq->private = (void *)&lsop[1];
967 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
968 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
969 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
970
971 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
972 conn_rqst->desc_list_len = cpu_to_be32(
973 sizeof(struct fcnvme_lsdesc_assoc_id) +
974 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
975
976 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
977 conn_rqst->associd.desc_len =
978 fcnvme_lsdesc_len(
979 sizeof(struct fcnvme_lsdesc_assoc_id));
980 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
981 conn_rqst->connect_cmd.desc_tag =
982 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
983 conn_rqst->connect_cmd.desc_len =
984 fcnvme_lsdesc_len(
985 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
986 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
987 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
988 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
989
990 lsop->queue = queue;
991 lsreq->rqstaddr = conn_rqst;
992 lsreq->rqstlen = sizeof(*conn_rqst);
993 lsreq->rspaddr = conn_acc;
994 lsreq->rsplen = sizeof(*conn_acc);
995 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
996
James Smartc913a8b2017-04-11 11:35:08 -0700997 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -0800998 if (ret)
999 goto out_free_buffer;
1000
1001 /* process connect LS completion */
1002
1003 /* validate the ACC response */
1004 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1005 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -07001006 else if (conn_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -08001007 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1008 fcret = VERR_CR_CONN_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -07001009 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -08001010 fcret = VERR_LSDESC_RQST;
1011 else if (conn_acc->hdr.rqst.desc_len !=
1012 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1013 fcret = VERR_LSDESC_RQST_LEN;
1014 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1015 fcret = VERR_CR_CONN;
1016 else if (conn_acc->connectid.desc_tag !=
1017 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1018 fcret = VERR_CONN_ID;
1019 else if (conn_acc->connectid.desc_len !=
1020 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1021 fcret = VERR_CONN_ID_LEN;
1022
1023 if (fcret) {
1024 ret = -EBADF;
1025 dev_err(ctrl->dev,
1026 "q %d connect failed: %s\n",
1027 queue->qnum, validation_errors[fcret]);
1028 } else {
1029 queue->connection_id =
1030 be64_to_cpu(conn_acc->connectid.connection_id);
1031 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1032 }
1033
1034out_free_buffer:
1035 kfree(lsop);
1036out_no_memory:
1037 if (ret)
1038 dev_err(ctrl->dev,
1039 "queue %d connect command failed (%d).\n",
1040 queue->qnum, ret);
1041 return ret;
1042}
1043
1044static void
1045nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1046{
1047 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
James Smarte3994412016-12-02 00:28:42 -08001048
James Smartc913a8b2017-04-11 11:35:08 -07001049 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -08001050
1051 /* fc-nvme iniator doesn't care about success or failure of cmd */
1052
1053 kfree(lsop);
1054}
1055
1056/*
1057 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1058 * the FC-NVME Association. Terminating the association also
1059 * terminates the FC-NVME connections (per queue, both admin and io
1060 * queues) that are part of the association. E.g. things are torn
1061 * down, and the related FC-NVME Association ID and Connection IDs
1062 * become invalid.
1063 *
1064 * The behavior of the fc-nvme initiator is such that it's
1065 * understanding of the association and connections will implicitly
1066 * be torn down. The action is implicit as it may be due to a loss of
1067 * connectivity with the fc-nvme target, so you may never get a
1068 * response even if you tried. As such, the action of this routine
1069 * is to asynchronously send the LS, ignore any results of the LS, and
1070 * continue on with terminating the association. If the fc-nvme target
1071 * is present and receives the LS, it too can tear down.
1072 */
1073static void
1074nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1075{
1076 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1077 struct fcnvme_ls_disconnect_acc *discon_acc;
1078 struct nvmefc_ls_req_op *lsop;
1079 struct nvmefc_ls_req *lsreq;
James Smartc913a8b2017-04-11 11:35:08 -07001080 int ret;
James Smarte3994412016-12-02 00:28:42 -08001081
1082 lsop = kzalloc((sizeof(*lsop) +
1083 ctrl->lport->ops->lsrqst_priv_sz +
1084 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1085 GFP_KERNEL);
1086 if (!lsop)
1087 /* couldn't sent it... too bad */
1088 return;
1089
1090 lsreq = &lsop->ls_req;
1091
1092 lsreq->private = (void *)&lsop[1];
1093 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1094 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1095 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1096
1097 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1098 discon_rqst->desc_list_len = cpu_to_be32(
1099 sizeof(struct fcnvme_lsdesc_assoc_id) +
1100 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1101
1102 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1103 discon_rqst->associd.desc_len =
1104 fcnvme_lsdesc_len(
1105 sizeof(struct fcnvme_lsdesc_assoc_id));
1106
1107 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1108
1109 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1110 FCNVME_LSDESC_DISCONN_CMD);
1111 discon_rqst->discon_cmd.desc_len =
1112 fcnvme_lsdesc_len(
1113 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1114 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1115 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1116
1117 lsreq->rqstaddr = discon_rqst;
1118 lsreq->rqstlen = sizeof(*discon_rqst);
1119 lsreq->rspaddr = discon_acc;
1120 lsreq->rsplen = sizeof(*discon_acc);
1121 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1122
James Smartc913a8b2017-04-11 11:35:08 -07001123 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1124 nvme_fc_disconnect_assoc_done);
1125 if (ret)
1126 kfree(lsop);
James Smarte3994412016-12-02 00:28:42 -08001127
1128 /* only meaningful part to terminating the association */
1129 ctrl->association_id = 0;
1130}
1131
1132
1133/* *********************** NVME Ctrl Routines **************************** */
1134
1135
1136static int
1137nvme_fc_reinit_request(void *data, struct request *rq)
1138{
1139 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1140 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1141
1142 memset(cmdiu, 0, sizeof(*cmdiu));
1143 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1144 cmdiu->fc_id = NVME_CMD_FC_ID;
1145 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1146 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1147
1148 return 0;
1149}
1150
1151static void
1152__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1153 struct nvme_fc_fcp_op *op)
1154{
1155 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1156 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1157 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1158 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1159
1160 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1161}
1162
1163static void
1164nvme_fc_exit_request(void *data, struct request *rq,
1165 unsigned int hctx_idx, unsigned int rq_idx)
1166{
1167 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1168
1169 return __nvme_fc_exit_request(data, op);
1170}
1171
1172static void
1173nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
1174{
1175 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1176 int i;
1177
1178 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1179 if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
1180 continue;
1181 __nvme_fc_exit_request(ctrl, aen_op);
1182 nvme_fc_ctrl_put(ctrl);
1183 }
1184}
1185
1186void
1187nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1188{
1189 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1190 struct request *rq = op->rq;
1191 struct nvmefc_fcp_req *freq = &op->fcp_req;
1192 struct nvme_fc_ctrl *ctrl = op->ctrl;
1193 struct nvme_fc_queue *queue = op->queue;
1194 struct nvme_completion *cqe = &op->rsp_iu.cqe;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001195 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001196 union nvme_result result;
James Smarte3994412016-12-02 00:28:42 -08001197
1198 /*
1199 * WARNING:
1200 * The current linux implementation of a nvme controller
1201 * allocates a single tag set for all io queues and sizes
1202 * the io queues to fully hold all possible tags. Thus, the
1203 * implementation does not reference or care about the sqhd
1204 * value as it never needs to use the sqhd/sqtail pointers
1205 * for submission pacing.
1206 *
1207 * This affects the FC-NVME implementation in two ways:
1208 * 1) As the value doesn't matter, we don't need to waste
1209 * cycles extracting it from ERSPs and stamping it in the
1210 * cases where the transport fabricates CQEs on successful
1211 * completions.
1212 * 2) The FC-NVME implementation requires that delivery of
1213 * ERSP completions are to go back to the nvme layer in order
1214 * relative to the rsn, such that the sqhd value will always
1215 * be "in order" for the nvme layer. As the nvme layer in
1216 * linux doesn't care about sqhd, there's no need to return
1217 * them in order.
1218 *
1219 * Additionally:
1220 * As the core nvme layer in linux currently does not look at
1221 * every field in the cqe - in cases where the FC transport must
1222 * fabricate a CQE, the following fields will not be set as they
1223 * are not referenced:
1224 * cqe.sqid, cqe.sqhd, cqe.command_id
1225 */
1226
1227 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1228 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1229
1230 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001231 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
James Smart62eeacb2017-03-23 20:41:27 -07001232 else if (freq->status)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001233 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001234
1235 /*
1236 * For the linux implementation, if we have an unsuccesful
1237 * status, they blk-mq layer can typically be called with the
1238 * non-zero status and the content of the cqe isn't important.
1239 */
1240 if (status)
1241 goto done;
1242
1243 /*
1244 * command completed successfully relative to the wire
1245 * protocol. However, validate anything received and
1246 * extract the status and result from the cqe (create it
1247 * where necessary).
1248 */
1249
1250 switch (freq->rcv_rsplen) {
1251
1252 case 0:
1253 case NVME_FC_SIZEOF_ZEROS_RSP:
1254 /*
1255 * No response payload or 12 bytes of payload (which
1256 * should all be zeros) are considered successful and
1257 * no payload in the CQE by the transport.
1258 */
1259 if (freq->transferred_length !=
1260 be32_to_cpu(op->cmd_iu.data_len)) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001261 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001262 goto done;
1263 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001264 result.u64 = 0;
James Smarte3994412016-12-02 00:28:42 -08001265 break;
1266
1267 case sizeof(struct nvme_fc_ersp_iu):
1268 /*
1269 * The ERSP IU contains a full completion with CQE.
1270 * Validate ERSP IU and look at cqe.
1271 */
1272 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1273 (freq->rcv_rsplen / 4) ||
1274 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1275 freq->transferred_length ||
James Smart726a1082017-03-23 20:41:23 -07001276 op->rsp_iu.status_code ||
James Smarte3994412016-12-02 00:28:42 -08001277 op->rqno != le16_to_cpu(cqe->command_id))) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001278 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001279 goto done;
1280 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001281 result = cqe->result;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001282 status = cqe->status;
James Smarte3994412016-12-02 00:28:42 -08001283 break;
1284
1285 default:
Christoph Hellwigd663b692017-04-20 16:02:56 +02001286 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001287 goto done;
1288 }
1289
1290done:
1291 if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001292 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
James Smarte3994412016-12-02 00:28:42 -08001293 nvme_fc_ctrl_put(ctrl);
1294 return;
1295 }
1296
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001297 nvme_end_request(rq, status, result);
James Smarte3994412016-12-02 00:28:42 -08001298}
1299
1300static int
1301__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1302 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1303 struct request *rq, u32 rqno)
1304{
1305 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1306 int ret = 0;
1307
1308 memset(op, 0, sizeof(*op));
1309 op->fcp_req.cmdaddr = &op->cmd_iu;
1310 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1311 op->fcp_req.rspaddr = &op->rsp_iu;
1312 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1313 op->fcp_req.done = nvme_fc_fcpio_done;
1314 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1315 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1316 op->ctrl = ctrl;
1317 op->queue = queue;
1318 op->rq = rq;
1319 op->rqno = rqno;
1320
1321 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1322 cmdiu->fc_id = NVME_CMD_FC_ID;
1323 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1324
1325 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1326 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1327 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1328 dev_err(ctrl->dev,
1329 "FCP Op failed - cmdiu dma mapping failed.\n");
1330 ret = EFAULT;
1331 goto out_on_error;
1332 }
1333
1334 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1335 &op->rsp_iu, sizeof(op->rsp_iu),
1336 DMA_FROM_DEVICE);
1337 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1338 dev_err(ctrl->dev,
1339 "FCP Op failed - rspiu dma mapping failed.\n");
1340 ret = EFAULT;
1341 }
1342
1343 atomic_set(&op->state, FCPOP_STATE_IDLE);
1344out_on_error:
1345 return ret;
1346}
1347
1348static int
1349nvme_fc_init_request(void *data, struct request *rq,
1350 unsigned int hctx_idx, unsigned int rq_idx,
1351 unsigned int numa_node)
1352{
1353 struct nvme_fc_ctrl *ctrl = data;
1354 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1355 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1356
1357 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1358}
1359
1360static int
1361nvme_fc_init_admin_request(void *data, struct request *rq,
1362 unsigned int hctx_idx, unsigned int rq_idx,
1363 unsigned int numa_node)
1364{
1365 struct nvme_fc_ctrl *ctrl = data;
1366 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1367 struct nvme_fc_queue *queue = &ctrl->queues[0];
1368
1369 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1370}
1371
1372static int
1373nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1374{
1375 struct nvme_fc_fcp_op *aen_op;
1376 struct nvme_fc_cmd_iu *cmdiu;
1377 struct nvme_command *sqe;
1378 int i, ret;
1379
1380 aen_op = ctrl->aen_ops;
1381 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1382 cmdiu = &aen_op->cmd_iu;
1383 sqe = &cmdiu->sqe;
1384 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1385 aen_op, (struct request *)NULL,
1386 (AEN_CMDID_BASE + i));
1387 if (ret)
1388 return ret;
1389
1390 memset(sqe, 0, sizeof(*sqe));
1391 sqe->common.opcode = nvme_admin_async_event;
1392 sqe->common.command_id = AEN_CMDID_BASE + i;
1393 }
1394 return 0;
1395}
1396
1397
1398static inline void
1399__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1400 unsigned int qidx)
1401{
1402 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1403
1404 hctx->driver_data = queue;
1405 queue->hctx = hctx;
1406}
1407
1408static int
1409nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1410 unsigned int hctx_idx)
1411{
1412 struct nvme_fc_ctrl *ctrl = data;
1413
1414 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1415
1416 return 0;
1417}
1418
1419static int
1420nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1421 unsigned int hctx_idx)
1422{
1423 struct nvme_fc_ctrl *ctrl = data;
1424
1425 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1426
1427 return 0;
1428}
1429
1430static void
1431nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1432{
1433 struct nvme_fc_queue *queue;
1434
1435 queue = &ctrl->queues[idx];
1436 memset(queue, 0, sizeof(*queue));
1437 queue->ctrl = ctrl;
1438 queue->qnum = idx;
1439 atomic_set(&queue->csn, 1);
1440 queue->dev = ctrl->dev;
1441
1442 if (idx > 0)
1443 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1444 else
1445 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1446
1447 queue->queue_size = queue_size;
1448
1449 /*
1450 * Considered whether we should allocate buffers for all SQEs
1451 * and CQEs and dma map them - mapping their respective entries
1452 * into the request structures (kernel vm addr and dma address)
1453 * thus the driver could use the buffers/mappings directly.
1454 * It only makes sense if the LLDD would use them for its
1455 * messaging api. It's very unlikely most adapter api's would use
1456 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1457 * structures were used instead.
1458 */
1459}
1460
1461/*
1462 * This routine terminates a queue at the transport level.
1463 * The transport has already ensured that all outstanding ios on
1464 * the queue have been terminated.
1465 * The transport will send a Disconnect LS request to terminate
1466 * the queue's connection. Termination of the admin queue will also
1467 * terminate the association at the target.
1468 */
1469static void
1470nvme_fc_free_queue(struct nvme_fc_queue *queue)
1471{
1472 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1473 return;
1474
1475 /*
1476 * Current implementation never disconnects a single queue.
1477 * It always terminates a whole association. So there is never
1478 * a disconnect(queue) LS sent to the target.
1479 */
1480
1481 queue->connection_id = 0;
1482 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1483}
1484
1485static void
1486__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1487 struct nvme_fc_queue *queue, unsigned int qidx)
1488{
1489 if (ctrl->lport->ops->delete_queue)
1490 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1491 queue->lldd_handle);
1492 queue->lldd_handle = NULL;
1493}
1494
1495static void
1496nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
1497{
1498 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
1499 blk_cleanup_queue(ctrl->ctrl.admin_q);
1500 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1501 nvme_fc_free_queue(&ctrl->queues[0]);
1502}
1503
1504static void
1505nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1506{
1507 int i;
1508
1509 for (i = 1; i < ctrl->queue_count; i++)
1510 nvme_fc_free_queue(&ctrl->queues[i]);
1511}
1512
1513static int
1514__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1515 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1516{
1517 int ret = 0;
1518
1519 queue->lldd_handle = NULL;
1520 if (ctrl->lport->ops->create_queue)
1521 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1522 qidx, qsize, &queue->lldd_handle);
1523
1524 return ret;
1525}
1526
1527static void
1528nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1529{
1530 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1531 int i;
1532
1533 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1534 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1535}
1536
1537static int
1538nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1539{
1540 struct nvme_fc_queue *queue = &ctrl->queues[1];
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001541 int i, ret;
James Smarte3994412016-12-02 00:28:42 -08001542
1543 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1544 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001545 if (ret)
1546 goto delete_queues;
James Smarte3994412016-12-02 00:28:42 -08001547 }
1548
1549 return 0;
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001550
1551delete_queues:
1552 for (; i >= 0; i--)
1553 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1554 return ret;
James Smarte3994412016-12-02 00:28:42 -08001555}
1556
1557static int
1558nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1559{
1560 int i, ret = 0;
1561
1562 for (i = 1; i < ctrl->queue_count; i++) {
1563 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1564 (qsize / 5));
1565 if (ret)
1566 break;
1567 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1568 if (ret)
1569 break;
1570 }
1571
1572 return ret;
1573}
1574
1575static void
1576nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1577{
1578 int i;
1579
1580 for (i = 1; i < ctrl->queue_count; i++)
1581 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1582}
1583
1584static void
1585nvme_fc_ctrl_free(struct kref *ref)
1586{
1587 struct nvme_fc_ctrl *ctrl =
1588 container_of(ref, struct nvme_fc_ctrl, ref);
1589 unsigned long flags;
1590
1591 if (ctrl->state != FCCTRL_INIT) {
1592 /* remove from rport list */
1593 spin_lock_irqsave(&ctrl->rport->lock, flags);
1594 list_del(&ctrl->ctrl_list);
1595 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1596 }
1597
1598 put_device(ctrl->dev);
1599 nvme_fc_rport_put(ctrl->rport);
1600
1601 kfree(ctrl->queues);
1602 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1603 nvmf_free_options(ctrl->ctrl.opts);
1604 kfree(ctrl);
1605}
1606
1607static void
1608nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1609{
1610 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1611}
1612
1613static int
1614nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1615{
1616 return kref_get_unless_zero(&ctrl->ref);
1617}
1618
1619/*
1620 * All accesses from nvme core layer done - can now free the
1621 * controller. Called after last nvme_put_ctrl() call
1622 */
1623static void
1624nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
1625{
1626 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1627
1628 WARN_ON(nctrl != &ctrl->ctrl);
1629
1630 /*
1631 * Tear down the association, which will generate link
1632 * traffic to terminate connections
1633 */
1634
1635 if (ctrl->state != FCCTRL_INIT) {
1636 /* send a Disconnect(association) LS to fc-nvme target */
1637 nvme_fc_xmt_disconnect_assoc(ctrl);
1638
1639 if (ctrl->ctrl.tagset) {
1640 blk_cleanup_queue(ctrl->ctrl.connect_q);
1641 blk_mq_free_tag_set(&ctrl->tag_set);
1642 nvme_fc_delete_hw_io_queues(ctrl);
1643 nvme_fc_free_io_queues(ctrl);
1644 }
1645
1646 nvme_fc_exit_aen_ops(ctrl);
1647
1648 nvme_fc_destroy_admin_queue(ctrl);
1649 }
1650
1651 nvme_fc_ctrl_put(ctrl);
1652}
1653
1654
1655static int
1656__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1657{
1658 int state;
1659
1660 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1661 if (state != FCPOP_STATE_ACTIVE) {
1662 atomic_set(&op->state, state);
1663 return -ECANCELED; /* fail */
1664 }
1665
1666 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1667 &ctrl->rport->remoteport,
1668 op->queue->lldd_handle,
1669 &op->fcp_req);
1670
1671 return 0;
1672}
1673
1674enum blk_eh_timer_return
1675nvme_fc_timeout(struct request *rq, bool reserved)
1676{
1677 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1678 struct nvme_fc_ctrl *ctrl = op->ctrl;
1679 int ret;
1680
1681 if (reserved)
1682 return BLK_EH_RESET_TIMER;
1683
1684 ret = __nvme_fc_abort_op(ctrl, op);
1685 if (ret)
1686 /* io wasn't active to abort consider it done */
1687 return BLK_EH_HANDLED;
1688
1689 /*
1690 * TODO: force a controller reset
1691 * when that happens, queues will be torn down and outstanding
1692 * ios will be terminated, and the above abort, on a single io
1693 * will no longer be needed.
1694 */
1695
1696 return BLK_EH_HANDLED;
1697}
1698
1699static int
1700nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1701 struct nvme_fc_fcp_op *op)
1702{
1703 struct nvmefc_fcp_req *freq = &op->fcp_req;
James Smarte3994412016-12-02 00:28:42 -08001704 enum dma_data_direction dir;
1705 int ret;
1706
1707 freq->sg_cnt = 0;
1708
Christoph Hellwigb131c612017-01-13 12:29:12 +01001709 if (!blk_rq_payload_bytes(rq))
James Smarte3994412016-12-02 00:28:42 -08001710 return 0;
1711
1712 freq->sg_table.sgl = freq->first_sgl;
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001713 ret = sg_alloc_table_chained(&freq->sg_table,
1714 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
James Smarte3994412016-12-02 00:28:42 -08001715 if (ret)
1716 return -ENOMEM;
1717
1718 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001719 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
James Smarte3994412016-12-02 00:28:42 -08001720 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1721 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1722 op->nents, dir);
1723 if (unlikely(freq->sg_cnt <= 0)) {
1724 sg_free_table_chained(&freq->sg_table, true);
1725 freq->sg_cnt = 0;
1726 return -EFAULT;
1727 }
1728
1729 /*
1730 * TODO: blk_integrity_rq(rq) for DIF
1731 */
1732 return 0;
1733}
1734
1735static void
1736nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1737 struct nvme_fc_fcp_op *op)
1738{
1739 struct nvmefc_fcp_req *freq = &op->fcp_req;
1740
1741 if (!freq->sg_cnt)
1742 return;
1743
1744 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1745 ((rq_data_dir(rq) == WRITE) ?
1746 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1747
1748 nvme_cleanup_cmd(rq);
1749
1750 sg_free_table_chained(&freq->sg_table, true);
1751
1752 freq->sg_cnt = 0;
1753}
1754
1755/*
1756 * In FC, the queue is a logical thing. At transport connect, the target
1757 * creates its "queue" and returns a handle that is to be given to the
1758 * target whenever it posts something to the corresponding SQ. When an
1759 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1760 * command contained within the SQE, an io, and assigns a FC exchange
1761 * to it. The SQE and the associated SQ handle are sent in the initial
1762 * CMD IU sents on the exchange. All transfers relative to the io occur
1763 * as part of the exchange. The CQE is the last thing for the io,
1764 * which is transferred (explicitly or implicitly) with the RSP IU
1765 * sent on the exchange. After the CQE is received, the FC exchange is
1766 * terminaed and the Exchange may be used on a different io.
1767 *
1768 * The transport to LLDD api has the transport making a request for a
1769 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1770 * resource and transfers the command. The LLDD will then process all
1771 * steps to complete the io. Upon completion, the transport done routine
1772 * is called.
1773 *
1774 * So - while the operation is outstanding to the LLDD, there is a link
1775 * level FC exchange resource that is also outstanding. This must be
1776 * considered in all cleanup operations.
1777 */
1778static int
1779nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1780 struct nvme_fc_fcp_op *op, u32 data_len,
1781 enum nvmefc_fcp_datadir io_dir)
1782{
1783 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1784 struct nvme_command *sqe = &cmdiu->sqe;
1785 u32 csn;
1786 int ret;
1787
1788 if (!nvme_fc_ctrl_get(ctrl))
1789 return BLK_MQ_RQ_QUEUE_ERROR;
1790
1791 /* format the FC-NVME CMD IU and fcp_req */
1792 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1793 csn = atomic_inc_return(&queue->csn);
1794 cmdiu->csn = cpu_to_be32(csn);
1795 cmdiu->data_len = cpu_to_be32(data_len);
1796 switch (io_dir) {
1797 case NVMEFC_FCP_WRITE:
1798 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1799 break;
1800 case NVMEFC_FCP_READ:
1801 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1802 break;
1803 case NVMEFC_FCP_NODATA:
1804 cmdiu->flags = 0;
1805 break;
1806 }
1807 op->fcp_req.payload_length = data_len;
1808 op->fcp_req.io_dir = io_dir;
1809 op->fcp_req.transferred_length = 0;
1810 op->fcp_req.rcv_rsplen = 0;
James Smart62eeacb2017-03-23 20:41:27 -07001811 op->fcp_req.status = NVME_SC_SUCCESS;
James Smarte3994412016-12-02 00:28:42 -08001812 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1813
1814 /*
1815 * validate per fabric rules, set fields mandated by fabric spec
1816 * as well as those by FC-NVME spec.
1817 */
1818 WARN_ON_ONCE(sqe->common.metadata);
1819 WARN_ON_ONCE(sqe->common.dptr.prp1);
1820 WARN_ON_ONCE(sqe->common.dptr.prp2);
1821 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1822
1823 /*
1824 * format SQE DPTR field per FC-NVME rules
1825 * type=data block descr; subtype=offset;
1826 * offset is currently 0.
1827 */
1828 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1829 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1830 sqe->rw.dptr.sgl.addr = 0;
1831
1832 /* odd that we set the command_id - should come from nvme-fabrics */
1833 WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
1834
1835 if (op->rq) { /* skipped on aens */
1836 ret = nvme_fc_map_data(ctrl, op->rq, op);
1837 if (ret < 0) {
1838 dev_err(queue->ctrl->ctrl.device,
1839 "Failed to map data (%d)\n", ret);
1840 nvme_cleanup_cmd(op->rq);
1841 nvme_fc_ctrl_put(ctrl);
1842 return (ret == -ENOMEM || ret == -EAGAIN) ?
1843 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1844 }
1845 }
1846
1847 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1848 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1849
1850 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1851
1852 if (op->rq)
1853 blk_mq_start_request(op->rq);
1854
1855 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1856 &ctrl->rport->remoteport,
1857 queue->lldd_handle, &op->fcp_req);
1858
1859 if (ret) {
1860 dev_err(ctrl->dev,
1861 "Send nvme command failed - lldd returned %d.\n", ret);
1862
1863 if (op->rq) { /* normal request */
1864 nvme_fc_unmap_data(ctrl, op->rq, op);
1865 nvme_cleanup_cmd(op->rq);
1866 }
1867 /* else - aen. no cleanup needed */
1868
1869 nvme_fc_ctrl_put(ctrl);
1870
1871 if (ret != -EBUSY)
1872 return BLK_MQ_RQ_QUEUE_ERROR;
1873
1874 if (op->rq) {
1875 blk_mq_stop_hw_queues(op->rq->q);
1876 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1877 }
1878 return BLK_MQ_RQ_QUEUE_BUSY;
1879 }
1880
1881 return BLK_MQ_RQ_QUEUE_OK;
1882}
1883
1884static int
1885nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1886 const struct blk_mq_queue_data *bd)
1887{
1888 struct nvme_ns *ns = hctx->queue->queuedata;
1889 struct nvme_fc_queue *queue = hctx->driver_data;
1890 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1891 struct request *rq = bd->rq;
1892 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1893 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1894 struct nvme_command *sqe = &cmdiu->sqe;
1895 enum nvmefc_fcp_datadir io_dir;
1896 u32 data_len;
1897 int ret;
1898
1899 ret = nvme_setup_cmd(ns, rq, sqe);
1900 if (ret)
1901 return ret;
1902
Christoph Hellwigb131c612017-01-13 12:29:12 +01001903 data_len = blk_rq_payload_bytes(rq);
James Smarte3994412016-12-02 00:28:42 -08001904 if (data_len)
1905 io_dir = ((rq_data_dir(rq) == WRITE) ?
1906 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
1907 else
1908 io_dir = NVMEFC_FCP_NODATA;
1909
1910 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
1911}
1912
1913static struct blk_mq_tags *
1914nvme_fc_tagset(struct nvme_fc_queue *queue)
1915{
1916 if (queue->qnum == 0)
1917 return queue->ctrl->admin_tag_set.tags[queue->qnum];
1918
1919 return queue->ctrl->tag_set.tags[queue->qnum - 1];
1920}
1921
1922static int
1923nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1924
1925{
1926 struct nvme_fc_queue *queue = hctx->driver_data;
1927 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1928 struct request *req;
1929 struct nvme_fc_fcp_op *op;
1930
1931 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
1932 if (!req) {
1933 dev_err(queue->ctrl->ctrl.device,
1934 "tag 0x%x on QNum %#x not found\n",
1935 tag, queue->qnum);
1936 return 0;
1937 }
1938
1939 op = blk_mq_rq_to_pdu(req);
1940
1941 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
1942 (ctrl->lport->ops->poll_queue))
1943 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
1944 queue->lldd_handle);
1945
1946 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
1947}
1948
1949static void
1950nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1951{
1952 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
1953 struct nvme_fc_fcp_op *aen_op;
1954 int ret;
1955
1956 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
1957 return;
1958
1959 aen_op = &ctrl->aen_ops[aer_idx];
1960
1961 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
1962 NVMEFC_FCP_NODATA);
1963 if (ret)
1964 dev_err(ctrl->ctrl.device,
1965 "failed async event work [%d]\n", aer_idx);
1966}
1967
1968static void
1969nvme_fc_complete_rq(struct request *rq)
1970{
1971 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1972 struct nvme_fc_ctrl *ctrl = op->ctrl;
Christoph Hellwig77f02a72017-03-30 13:41:32 +02001973 int state;
James Smarte3994412016-12-02 00:28:42 -08001974
1975 state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
1976
1977 nvme_cleanup_cmd(rq);
James Smarte3994412016-12-02 00:28:42 -08001978 nvme_fc_unmap_data(ctrl, rq, op);
Christoph Hellwig77f02a72017-03-30 13:41:32 +02001979 nvme_complete_rq(rq);
James Smarte3994412016-12-02 00:28:42 -08001980 nvme_fc_ctrl_put(ctrl);
1981
James Smarte3994412016-12-02 00:28:42 -08001982}
1983
Eric Biggersf363b082017-03-30 13:39:16 -07001984static const struct blk_mq_ops nvme_fc_mq_ops = {
James Smarte3994412016-12-02 00:28:42 -08001985 .queue_rq = nvme_fc_queue_rq,
1986 .complete = nvme_fc_complete_rq,
1987 .init_request = nvme_fc_init_request,
1988 .exit_request = nvme_fc_exit_request,
1989 .reinit_request = nvme_fc_reinit_request,
1990 .init_hctx = nvme_fc_init_hctx,
1991 .poll = nvme_fc_poll,
1992 .timeout = nvme_fc_timeout,
1993};
1994
Eric Biggersf363b082017-03-30 13:39:16 -07001995static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
James Smarte3994412016-12-02 00:28:42 -08001996 .queue_rq = nvme_fc_queue_rq,
1997 .complete = nvme_fc_complete_rq,
1998 .init_request = nvme_fc_init_admin_request,
1999 .exit_request = nvme_fc_exit_request,
2000 .reinit_request = nvme_fc_reinit_request,
2001 .init_hctx = nvme_fc_init_admin_hctx,
2002 .timeout = nvme_fc_timeout,
2003};
2004
2005static int
2006nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
2007{
2008 u32 segs;
2009 int error;
2010
2011 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2012
2013 error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2014 NVME_FC_AQ_BLKMQ_DEPTH,
2015 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
2016 if (error)
2017 return error;
2018
2019 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2020 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2021 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2022 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
2023 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2024 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2025 (SG_CHUNK_SIZE *
2026 sizeof(struct scatterlist)) +
2027 ctrl->lport->ops->fcprqst_priv_sz;
2028 ctrl->admin_tag_set.driver_data = ctrl;
2029 ctrl->admin_tag_set.nr_hw_queues = 1;
2030 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2031
2032 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2033 if (error)
2034 goto out_free_queue;
2035
2036 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2037 if (IS_ERR(ctrl->ctrl.admin_q)) {
2038 error = PTR_ERR(ctrl->ctrl.admin_q);
2039 goto out_free_tagset;
2040 }
2041
2042 error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2043 NVME_FC_AQ_BLKMQ_DEPTH);
2044 if (error)
2045 goto out_cleanup_queue;
2046
2047 error = nvmf_connect_admin_queue(&ctrl->ctrl);
2048 if (error)
2049 goto out_delete_hw_queue;
2050
2051 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2052 if (error) {
2053 dev_err(ctrl->ctrl.device,
2054 "prop_get NVME_REG_CAP failed\n");
2055 goto out_delete_hw_queue;
2056 }
2057
2058 ctrl->ctrl.sqsize =
2059 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2060
2061 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2062 if (error)
2063 goto out_delete_hw_queue;
2064
2065 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2066 ctrl->lport->ops->max_sgl_segments);
2067 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2068
2069 error = nvme_init_identify(&ctrl->ctrl);
2070 if (error)
2071 goto out_delete_hw_queue;
2072
2073 nvme_start_keep_alive(&ctrl->ctrl);
2074
2075 return 0;
2076
2077out_delete_hw_queue:
2078 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2079out_cleanup_queue:
2080 blk_cleanup_queue(ctrl->ctrl.admin_q);
2081out_free_tagset:
2082 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2083out_free_queue:
2084 nvme_fc_free_queue(&ctrl->queues[0]);
2085 return error;
2086}
2087
2088/*
2089 * This routine is used by the transport when it needs to find active
2090 * io on a queue that is to be terminated. The transport uses
2091 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2092 * this routine to kill them on a 1 by 1 basis.
2093 *
2094 * As FC allocates FC exchange for each io, the transport must contact
2095 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2096 * After terminating the exchange the LLDD will call the transport's
2097 * normal io done path for the request, but it will have an aborted
2098 * status. The done path will return the io request back to the block
2099 * layer with an error status.
2100 */
2101static void
2102nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2103{
2104 struct nvme_ctrl *nctrl = data;
2105 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2106 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2107int status;
2108
2109 if (!blk_mq_request_started(req))
2110 return;
2111
2112 /* this performs an ABTS-LS on the FC exchange for the io */
2113 status = __nvme_fc_abort_op(ctrl, op);
2114 /*
2115 * if __nvme_fc_abort_op failed: io wasn't active to abort
2116 * consider it done. Assume completion path already completing
2117 * in parallel
2118 */
2119 if (status)
2120 /* io wasn't active to abort consider it done */
2121 /* assume completion path already completing in parallel */
2122 return;
2123}
2124
2125
2126/*
2127 * This routine stops operation of the controller. Admin and IO queues
2128 * are stopped, outstanding ios on them terminated, and the nvme ctrl
2129 * is shutdown.
2130 */
2131static void
2132nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
2133{
2134 /*
2135 * If io queues are present, stop them and terminate all outstanding
2136 * ios on them. As FC allocates FC exchange for each io, the
2137 * transport must contact the LLDD to terminate the exchange,
2138 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2139 * to tell us what io's are busy and invoke a transport routine
2140 * to kill them with the LLDD. After terminating the exchange
2141 * the LLDD will call the transport's normal io done path, but it
2142 * will have an aborted status. The done path will return the
2143 * io requests back to the block layer as part of normal completions
2144 * (but with error status).
2145 */
2146 if (ctrl->queue_count > 1) {
2147 nvme_stop_queues(&ctrl->ctrl);
2148 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2149 nvme_fc_terminate_exchange, &ctrl->ctrl);
2150 }
2151
2152 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
2153 nvme_shutdown_ctrl(&ctrl->ctrl);
2154
2155 /*
2156 * now clean up the admin queue. Same thing as above.
2157 * use blk_mq_tagset_busy_itr() and the transport routine to
2158 * terminate the exchanges.
2159 */
2160 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2161 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2162 nvme_fc_terminate_exchange, &ctrl->ctrl);
2163}
2164
2165/*
2166 * Called to teardown an association.
2167 * May be called with association fully in place or partially in place.
2168 */
2169static void
2170__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
2171{
2172 nvme_stop_keep_alive(&ctrl->ctrl);
2173
2174 /* stop and terminate ios on admin and io queues */
2175 nvme_fc_shutdown_ctrl(ctrl);
2176
2177 /*
2178 * tear down the controller
2179 * This will result in the last reference on the nvme ctrl to
2180 * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
2181 * From there, the transport will tear down it's logical queues and
2182 * association.
2183 */
2184 nvme_uninit_ctrl(&ctrl->ctrl);
2185
2186 nvme_put_ctrl(&ctrl->ctrl);
2187}
2188
2189static void
2190nvme_fc_del_ctrl_work(struct work_struct *work)
2191{
2192 struct nvme_fc_ctrl *ctrl =
2193 container_of(work, struct nvme_fc_ctrl, delete_work);
2194
2195 __nvme_fc_remove_ctrl(ctrl);
2196}
2197
2198static int
2199__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2200{
2201 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2202 return -EBUSY;
2203
2204 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2205 return -EBUSY;
2206
2207 return 0;
2208}
2209
2210/*
2211 * Request from nvme core layer to delete the controller
2212 */
2213static int
2214nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2215{
2216 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2217 struct nvme_fc_rport *rport = ctrl->rport;
2218 unsigned long flags;
2219 int ret;
2220
2221 spin_lock_irqsave(&rport->lock, flags);
2222 ret = __nvme_fc_del_ctrl(ctrl);
2223 spin_unlock_irqrestore(&rport->lock, flags);
2224 if (ret)
2225 return ret;
2226
2227 flush_work(&ctrl->delete_work);
2228
2229 return 0;
2230}
2231
2232static int
2233nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2234{
2235 return -EIO;
2236}
2237
2238static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2239 .name = "fc",
2240 .module = THIS_MODULE,
2241 .is_fabrics = true,
2242 .reg_read32 = nvmf_reg_read32,
2243 .reg_read64 = nvmf_reg_read64,
2244 .reg_write32 = nvmf_reg_write32,
2245 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2246 .free_ctrl = nvme_fc_free_nvme_ctrl,
2247 .submit_async_event = nvme_fc_submit_async_event,
2248 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2249 .get_subsysnqn = nvmf_get_subsysnqn,
2250 .get_address = nvmf_get_address,
2251};
2252
2253static int
2254nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2255{
2256 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2257 int ret;
2258
2259 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2260 if (ret) {
2261 dev_info(ctrl->ctrl.device,
2262 "set_queue_count failed: %d\n", ret);
2263 return ret;
2264 }
2265
2266 ctrl->queue_count = opts->nr_io_queues + 1;
2267 if (!opts->nr_io_queues)
2268 return 0;
2269
2270 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2271 opts->nr_io_queues);
2272
2273 nvme_fc_init_io_queues(ctrl);
2274
2275 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2276 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2277 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2278 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2279 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2280 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2281 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2282 (SG_CHUNK_SIZE *
2283 sizeof(struct scatterlist)) +
2284 ctrl->lport->ops->fcprqst_priv_sz;
2285 ctrl->tag_set.driver_data = ctrl;
2286 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2287 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2288
2289 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2290 if (ret)
2291 return ret;
2292
2293 ctrl->ctrl.tagset = &ctrl->tag_set;
2294
2295 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2296 if (IS_ERR(ctrl->ctrl.connect_q)) {
2297 ret = PTR_ERR(ctrl->ctrl.connect_q);
2298 goto out_free_tag_set;
2299 }
2300
2301 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2302 if (ret)
2303 goto out_cleanup_blk_queue;
2304
2305 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2306 if (ret)
2307 goto out_delete_hw_queues;
2308
2309 return 0;
2310
2311out_delete_hw_queues:
2312 nvme_fc_delete_hw_io_queues(ctrl);
2313out_cleanup_blk_queue:
2314 nvme_stop_keep_alive(&ctrl->ctrl);
2315 blk_cleanup_queue(ctrl->ctrl.connect_q);
2316out_free_tag_set:
2317 blk_mq_free_tag_set(&ctrl->tag_set);
2318 nvme_fc_free_io_queues(ctrl);
2319
2320 /* force put free routine to ignore io queues */
2321 ctrl->ctrl.tagset = NULL;
2322
2323 return ret;
2324}
2325
2326
2327static struct nvme_ctrl *
2328__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2329 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2330{
2331 struct nvme_fc_ctrl *ctrl;
2332 unsigned long flags;
2333 int ret, idx;
2334 bool changed;
2335
2336 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2337 if (!ctrl) {
2338 ret = -ENOMEM;
2339 goto out_fail;
2340 }
2341
2342 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2343 if (idx < 0) {
2344 ret = -ENOSPC;
2345 goto out_free_ctrl;
2346 }
2347
2348 ctrl->ctrl.opts = opts;
2349 INIT_LIST_HEAD(&ctrl->ctrl_list);
James Smarte3994412016-12-02 00:28:42 -08002350 ctrl->lport = lport;
2351 ctrl->rport = rport;
2352 ctrl->dev = lport->dev;
2353 ctrl->state = FCCTRL_INIT;
2354 ctrl->cnum = idx;
2355
2356 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2357 if (ret)
2358 goto out_free_ida;
2359
2360 get_device(ctrl->dev);
2361 kref_init(&ctrl->ref);
2362
2363 INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
2364 spin_lock_init(&ctrl->lock);
2365
2366 /* io queue count */
2367 ctrl->queue_count = min_t(unsigned int,
2368 opts->nr_io_queues,
2369 lport->ops->max_hw_queues);
2370 opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2371 ctrl->queue_count++; /* +1 for admin queue */
2372
2373 ctrl->ctrl.sqsize = opts->queue_size - 1;
2374 ctrl->ctrl.kato = opts->kato;
2375
2376 ret = -ENOMEM;
2377 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2378 GFP_KERNEL);
2379 if (!ctrl->queues)
2380 goto out_uninit_ctrl;
2381
2382 ret = nvme_fc_configure_admin_queue(ctrl);
2383 if (ret)
2384 goto out_uninit_ctrl;
2385
2386 /* sanity checks */
2387
James Smarte3994412016-12-02 00:28:42 -08002388 /* FC-NVME does not have other data in the capsule */
2389 if (ctrl->ctrl.icdoff) {
2390 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2391 ctrl->ctrl.icdoff);
2392 goto out_remove_admin_queue;
2393 }
2394
2395 /* FC-NVME supports normal SGL Data Block Descriptors */
2396
2397 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2398 /* warn if maxcmd is lower than queue_size */
2399 dev_warn(ctrl->ctrl.device,
2400 "queue_size %zu > ctrl maxcmd %u, reducing "
2401 "to queue_size\n",
2402 opts->queue_size, ctrl->ctrl.maxcmd);
2403 opts->queue_size = ctrl->ctrl.maxcmd;
2404 }
2405
2406 ret = nvme_fc_init_aen_ops(ctrl);
2407 if (ret)
2408 goto out_exit_aen_ops;
2409
2410 if (ctrl->queue_count > 1) {
2411 ret = nvme_fc_create_io_queues(ctrl);
2412 if (ret)
2413 goto out_exit_aen_ops;
2414 }
2415
2416 spin_lock_irqsave(&ctrl->lock, flags);
2417 ctrl->state = FCCTRL_ACTIVE;
2418 spin_unlock_irqrestore(&ctrl->lock, flags);
2419
2420 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2421 WARN_ON_ONCE(!changed);
2422
2423 dev_info(ctrl->ctrl.device,
James Smartc7034892016-12-20 11:06:08 -08002424 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2425 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
James Smarte3994412016-12-02 00:28:42 -08002426
2427 kref_get(&ctrl->ctrl.kref);
2428
2429 spin_lock_irqsave(&rport->lock, flags);
2430 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2431 spin_unlock_irqrestore(&rport->lock, flags);
2432
2433 if (opts->nr_io_queues) {
2434 nvme_queue_scan(&ctrl->ctrl);
2435 nvme_queue_async_events(&ctrl->ctrl);
2436 }
2437
2438 return &ctrl->ctrl;
2439
2440out_exit_aen_ops:
2441 nvme_fc_exit_aen_ops(ctrl);
2442out_remove_admin_queue:
2443 /* send a Disconnect(association) LS to fc-nvme target */
2444 nvme_fc_xmt_disconnect_assoc(ctrl);
2445 nvme_stop_keep_alive(&ctrl->ctrl);
2446 nvme_fc_destroy_admin_queue(ctrl);
2447out_uninit_ctrl:
2448 nvme_uninit_ctrl(&ctrl->ctrl);
2449 nvme_put_ctrl(&ctrl->ctrl);
2450 if (ret > 0)
2451 ret = -EIO;
2452 /* exit via here will follow ctlr ref point callbacks to free */
2453 return ERR_PTR(ret);
2454
2455out_free_ida:
2456 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2457out_free_ctrl:
2458 kfree(ctrl);
2459out_fail:
2460 nvme_fc_rport_put(rport);
2461 /* exit via here doesn't follow ctlr ref points */
2462 return ERR_PTR(ret);
2463}
2464
2465enum {
2466 FCT_TRADDR_ERR = 0,
2467 FCT_TRADDR_WWNN = 1 << 0,
2468 FCT_TRADDR_WWPN = 1 << 1,
2469};
2470
2471struct nvmet_fc_traddr {
2472 u64 nn;
2473 u64 pn;
2474};
2475
2476static const match_table_t traddr_opt_tokens = {
2477 { FCT_TRADDR_WWNN, "nn-%s" },
2478 { FCT_TRADDR_WWPN, "pn-%s" },
2479 { FCT_TRADDR_ERR, NULL }
2480};
2481
2482static int
2483nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2484{
2485 substring_t args[MAX_OPT_ARGS];
2486 char *options, *o, *p;
2487 int token, ret = 0;
2488 u64 token64;
2489
2490 options = o = kstrdup(buf, GFP_KERNEL);
2491 if (!options)
2492 return -ENOMEM;
2493
2494 while ((p = strsep(&o, ":\n")) != NULL) {
2495 if (!*p)
2496 continue;
2497
2498 token = match_token(p, traddr_opt_tokens, args);
2499 switch (token) {
2500 case FCT_TRADDR_WWNN:
2501 if (match_u64(args, &token64)) {
2502 ret = -EINVAL;
2503 goto out;
2504 }
2505 traddr->nn = token64;
2506 break;
2507 case FCT_TRADDR_WWPN:
2508 if (match_u64(args, &token64)) {
2509 ret = -EINVAL;
2510 goto out;
2511 }
2512 traddr->pn = token64;
2513 break;
2514 default:
2515 pr_warn("unknown traddr token or missing value '%s'\n",
2516 p);
2517 ret = -EINVAL;
2518 goto out;
2519 }
2520 }
2521
2522out:
2523 kfree(options);
2524 return ret;
2525}
2526
2527static struct nvme_ctrl *
2528nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2529{
2530 struct nvme_fc_lport *lport;
2531 struct nvme_fc_rport *rport;
2532 struct nvmet_fc_traddr laddr = { 0L, 0L };
2533 struct nvmet_fc_traddr raddr = { 0L, 0L };
2534 unsigned long flags;
2535 int ret;
2536
2537 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2538 if (ret || !raddr.nn || !raddr.pn)
2539 return ERR_PTR(-EINVAL);
2540
2541 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2542 if (ret || !laddr.nn || !laddr.pn)
2543 return ERR_PTR(-EINVAL);
2544
2545 /* find the host and remote ports to connect together */
2546 spin_lock_irqsave(&nvme_fc_lock, flags);
2547 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2548 if (lport->localport.node_name != laddr.nn ||
2549 lport->localport.port_name != laddr.pn)
2550 continue;
2551
2552 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2553 if (rport->remoteport.node_name != raddr.nn ||
2554 rport->remoteport.port_name != raddr.pn)
2555 continue;
2556
2557 /* if fail to get reference fall through. Will error */
2558 if (!nvme_fc_rport_get(rport))
2559 break;
2560
2561 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2562
2563 return __nvme_fc_create_ctrl(dev, opts, lport, rport);
2564 }
2565 }
2566 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2567
2568 return ERR_PTR(-ENOENT);
2569}
2570
2571
2572static struct nvmf_transport_ops nvme_fc_transport = {
2573 .name = "fc",
2574 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2575 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2576 .create_ctrl = nvme_fc_create_ctrl,
2577};
2578
2579static int __init nvme_fc_init_module(void)
2580{
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002581 int ret;
2582
James Smarte3994412016-12-02 00:28:42 -08002583 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2584 if (!nvme_fc_wq)
2585 return -ENOMEM;
2586
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002587 ret = nvmf_register_transport(&nvme_fc_transport);
2588 if (ret)
2589 goto err;
2590
2591 return 0;
2592err:
2593 destroy_workqueue(nvme_fc_wq);
2594 return ret;
James Smarte3994412016-12-02 00:28:42 -08002595}
2596
2597static void __exit nvme_fc_exit_module(void)
2598{
2599 /* sanity check - all lports should be removed */
2600 if (!list_empty(&nvme_fc_lport_list))
2601 pr_warn("%s: localport list not empty\n", __func__);
2602
2603 nvmf_unregister_transport(&nvme_fc_transport);
2604
2605 destroy_workqueue(nvme_fc_wq);
2606
2607 ida_destroy(&nvme_fc_local_port_cnt);
2608 ida_destroy(&nvme_fc_ctrl_cnt);
2609}
2610
2611module_init(nvme_fc_init_module);
2612module_exit(nvme_fc_exit_module);
2613
2614MODULE_LICENSE("GPL v2");