blob: dca7165fabcf9ce5df19fee1007e8da8bd794e21 [file] [log] [blame]
James Smarte3994412016-12-02 00:28:42 -08001/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/parser.h>
20#include <uapi/scsi/fc/fc_fs.h>
21#include <uapi/scsi/fc/fc_els.h>
James Smart61bff8e2017-04-23 08:30:08 -070022#include <linux/delay.h>
James Smarte3994412016-12-02 00:28:42 -080023
24#include "nvme.h"
25#include "fabrics.h"
26#include <linux/nvme-fc-driver.h>
27#include <linux/nvme-fc.h>
28
29
30/* *************************** Data Structures/Defines ****************** */
31
32
33/*
34 * We handle AEN commands ourselves and don't even let the
35 * block layer know about them.
36 */
37#define NVME_FC_NR_AEN_COMMANDS 1
38#define NVME_FC_AQ_BLKMQ_DEPTH \
39 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
41
42enum nvme_fc_queue_flags {
43 NVME_FC_Q_CONNECTED = (1 << 0),
44};
45
46#define NVMEFC_QUEUE_DELAY 3 /* ms units */
47
James Smart61bff8e2017-04-23 08:30:08 -070048#define NVME_FC_MAX_CONNECT_ATTEMPTS 1
49
James Smarte3994412016-12-02 00:28:42 -080050struct nvme_fc_queue {
51 struct nvme_fc_ctrl *ctrl;
52 struct device *dev;
53 struct blk_mq_hw_ctx *hctx;
54 void *lldd_handle;
55 int queue_size;
56 size_t cmnd_capsule_len;
57 u32 qnum;
58 u32 rqcnt;
59 u32 seqno;
60
61 u64 connection_id;
62 atomic_t csn;
63
64 unsigned long flags;
65} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
66
James Smart8d64daf2017-04-11 11:35:09 -070067enum nvme_fcop_flags {
68 FCOP_FLAGS_TERMIO = (1 << 0),
69 FCOP_FLAGS_RELEASED = (1 << 1),
70 FCOP_FLAGS_COMPLETE = (1 << 2),
James Smart78a7ac22017-04-23 08:30:07 -070071 FCOP_FLAGS_AEN = (1 << 3),
James Smart8d64daf2017-04-11 11:35:09 -070072};
73
James Smarte3994412016-12-02 00:28:42 -080074struct nvmefc_ls_req_op {
75 struct nvmefc_ls_req ls_req;
76
James Smartc913a8b2017-04-11 11:35:08 -070077 struct nvme_fc_rport *rport;
James Smarte3994412016-12-02 00:28:42 -080078 struct nvme_fc_queue *queue;
79 struct request *rq;
James Smart8d64daf2017-04-11 11:35:09 -070080 u32 flags;
James Smarte3994412016-12-02 00:28:42 -080081
82 int ls_error;
83 struct completion ls_done;
James Smartc913a8b2017-04-11 11:35:08 -070084 struct list_head lsreq_list; /* rport->ls_req_list */
James Smarte3994412016-12-02 00:28:42 -080085 bool req_queued;
86};
87
88enum nvme_fcpop_state {
89 FCPOP_STATE_UNINIT = 0,
90 FCPOP_STATE_IDLE = 1,
91 FCPOP_STATE_ACTIVE = 2,
92 FCPOP_STATE_ABORTED = 3,
James Smart78a7ac22017-04-23 08:30:07 -070093 FCPOP_STATE_COMPLETE = 4,
James Smarte3994412016-12-02 00:28:42 -080094};
95
96struct nvme_fc_fcp_op {
97 struct nvme_request nreq; /*
98 * nvme/host/core.c
99 * requires this to be
100 * the 1st element in the
101 * private structure
102 * associated with the
103 * request.
104 */
105 struct nvmefc_fcp_req fcp_req;
106
107 struct nvme_fc_ctrl *ctrl;
108 struct nvme_fc_queue *queue;
109 struct request *rq;
110
111 atomic_t state;
James Smart78a7ac22017-04-23 08:30:07 -0700112 u32 flags;
James Smarte3994412016-12-02 00:28:42 -0800113 u32 rqno;
114 u32 nents;
115
116 struct nvme_fc_cmd_iu cmd_iu;
117 struct nvme_fc_ersp_iu rsp_iu;
118};
119
120struct nvme_fc_lport {
121 struct nvme_fc_local_port localport;
122
123 struct ida endp_cnt;
124 struct list_head port_list; /* nvme_fc_port_list */
125 struct list_head endp_list;
126 struct device *dev; /* physical device for dma */
127 struct nvme_fc_port_template *ops;
128 struct kref ref;
129} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
130
131struct nvme_fc_rport {
132 struct nvme_fc_remote_port remoteport;
133
134 struct list_head endp_list; /* for lport->endp_list */
135 struct list_head ctrl_list;
James Smartc913a8b2017-04-11 11:35:08 -0700136 struct list_head ls_req_list;
137 struct device *dev; /* physical device for dma */
138 struct nvme_fc_lport *lport;
James Smarte3994412016-12-02 00:28:42 -0800139 spinlock_t lock;
140 struct kref ref;
141} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
142
James Smart61bff8e2017-04-23 08:30:08 -0700143enum nvme_fcctrl_flags {
144 FCCTRL_TERMIO = (1 << 0),
James Smarte3994412016-12-02 00:28:42 -0800145};
146
147struct nvme_fc_ctrl {
148 spinlock_t lock;
149 struct nvme_fc_queue *queues;
James Smarte3994412016-12-02 00:28:42 -0800150 struct device *dev;
151 struct nvme_fc_lport *lport;
152 struct nvme_fc_rport *rport;
James Smart61bff8e2017-04-23 08:30:08 -0700153 u32 queue_count;
James Smarte3994412016-12-02 00:28:42 -0800154 u32 cnum;
155
156 u64 association_id;
157
158 u64 cap;
159
160 struct list_head ctrl_list; /* rport->ctrl_list */
James Smarte3994412016-12-02 00:28:42 -0800161
162 struct blk_mq_tag_set admin_tag_set;
163 struct blk_mq_tag_set tag_set;
164
165 struct work_struct delete_work;
James Smart61bff8e2017-04-23 08:30:08 -0700166 struct work_struct reset_work;
167 struct delayed_work connect_work;
168 int reconnect_delay;
169 int connect_attempts;
170
James Smarte3994412016-12-02 00:28:42 -0800171 struct kref ref;
James Smart61bff8e2017-04-23 08:30:08 -0700172 u32 flags;
173 u32 iocnt;
James Smarte3994412016-12-02 00:28:42 -0800174
175 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
176
177 struct nvme_ctrl ctrl;
178};
179
180static inline struct nvme_fc_ctrl *
181to_fc_ctrl(struct nvme_ctrl *ctrl)
182{
183 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
184}
185
186static inline struct nvme_fc_lport *
187localport_to_lport(struct nvme_fc_local_port *portptr)
188{
189 return container_of(portptr, struct nvme_fc_lport, localport);
190}
191
192static inline struct nvme_fc_rport *
193remoteport_to_rport(struct nvme_fc_remote_port *portptr)
194{
195 return container_of(portptr, struct nvme_fc_rport, remoteport);
196}
197
198static inline struct nvmefc_ls_req_op *
199ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
200{
201 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
202}
203
204static inline struct nvme_fc_fcp_op *
205fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
206{
207 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
208}
209
210
211
212/* *************************** Globals **************************** */
213
214
215static DEFINE_SPINLOCK(nvme_fc_lock);
216
217static LIST_HEAD(nvme_fc_lport_list);
218static DEFINE_IDA(nvme_fc_local_port_cnt);
219static DEFINE_IDA(nvme_fc_ctrl_cnt);
220
221static struct workqueue_struct *nvme_fc_wq;
222
223
224
225/* *********************** FC-NVME Port Management ************************ */
226
227static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
228static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
229 struct nvme_fc_queue *, unsigned int);
230
231
232/**
233 * nvme_fc_register_localport - transport entry point called by an
234 * LLDD to register the existence of a NVME
235 * host FC port.
236 * @pinfo: pointer to information about the port to be registered
237 * @template: LLDD entrypoints and operational parameters for the port
238 * @dev: physical hardware device node port corresponds to. Will be
239 * used for DMA mappings
240 * @lport_p: pointer to a local port pointer. Upon success, the routine
241 * will allocate a nvme_fc_local_port structure and place its
242 * address in the local port pointer. Upon failure, local port
243 * pointer will be set to 0.
244 *
245 * Returns:
246 * a completion status. Must be 0 upon success; a negative errno
247 * (ex: -ENXIO) upon failure.
248 */
249int
250nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
251 struct nvme_fc_port_template *template,
252 struct device *dev,
253 struct nvme_fc_local_port **portptr)
254{
255 struct nvme_fc_lport *newrec;
256 unsigned long flags;
257 int ret, idx;
258
259 if (!template->localport_delete || !template->remoteport_delete ||
260 !template->ls_req || !template->fcp_io ||
261 !template->ls_abort || !template->fcp_abort ||
262 !template->max_hw_queues || !template->max_sgl_segments ||
263 !template->max_dif_sgl_segments || !template->dma_boundary) {
264 ret = -EINVAL;
265 goto out_reghost_failed;
266 }
267
268 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
269 GFP_KERNEL);
270 if (!newrec) {
271 ret = -ENOMEM;
272 goto out_reghost_failed;
273 }
274
275 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
276 if (idx < 0) {
277 ret = -ENOSPC;
278 goto out_fail_kfree;
279 }
280
281 if (!get_device(dev) && dev) {
282 ret = -ENODEV;
283 goto out_ida_put;
284 }
285
286 INIT_LIST_HEAD(&newrec->port_list);
287 INIT_LIST_HEAD(&newrec->endp_list);
288 kref_init(&newrec->ref);
289 newrec->ops = template;
290 newrec->dev = dev;
291 ida_init(&newrec->endp_cnt);
292 newrec->localport.private = &newrec[1];
293 newrec->localport.node_name = pinfo->node_name;
294 newrec->localport.port_name = pinfo->port_name;
295 newrec->localport.port_role = pinfo->port_role;
296 newrec->localport.port_id = pinfo->port_id;
297 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
298 newrec->localport.port_num = idx;
299
300 spin_lock_irqsave(&nvme_fc_lock, flags);
301 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
302 spin_unlock_irqrestore(&nvme_fc_lock, flags);
303
304 if (dev)
305 dma_set_seg_boundary(dev, template->dma_boundary);
306
307 *portptr = &newrec->localport;
308 return 0;
309
310out_ida_put:
311 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
312out_fail_kfree:
313 kfree(newrec);
314out_reghost_failed:
315 *portptr = NULL;
316
317 return ret;
318}
319EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
320
321static void
322nvme_fc_free_lport(struct kref *ref)
323{
324 struct nvme_fc_lport *lport =
325 container_of(ref, struct nvme_fc_lport, ref);
326 unsigned long flags;
327
328 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
329 WARN_ON(!list_empty(&lport->endp_list));
330
331 /* remove from transport list */
332 spin_lock_irqsave(&nvme_fc_lock, flags);
333 list_del(&lport->port_list);
334 spin_unlock_irqrestore(&nvme_fc_lock, flags);
335
336 /* let the LLDD know we've finished tearing it down */
337 lport->ops->localport_delete(&lport->localport);
338
339 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
340 ida_destroy(&lport->endp_cnt);
341
342 put_device(lport->dev);
343
344 kfree(lport);
345}
346
347static void
348nvme_fc_lport_put(struct nvme_fc_lport *lport)
349{
350 kref_put(&lport->ref, nvme_fc_free_lport);
351}
352
353static int
354nvme_fc_lport_get(struct nvme_fc_lport *lport)
355{
356 return kref_get_unless_zero(&lport->ref);
357}
358
359/**
360 * nvme_fc_unregister_localport - transport entry point called by an
361 * LLDD to deregister/remove a previously
362 * registered a NVME host FC port.
363 * @localport: pointer to the (registered) local port that is to be
364 * deregistered.
365 *
366 * Returns:
367 * a completion status. Must be 0 upon success; a negative errno
368 * (ex: -ENXIO) upon failure.
369 */
370int
371nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
372{
373 struct nvme_fc_lport *lport = localport_to_lport(portptr);
374 unsigned long flags;
375
376 if (!portptr)
377 return -EINVAL;
378
379 spin_lock_irqsave(&nvme_fc_lock, flags);
380
381 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
382 spin_unlock_irqrestore(&nvme_fc_lock, flags);
383 return -EINVAL;
384 }
385 portptr->port_state = FC_OBJSTATE_DELETED;
386
387 spin_unlock_irqrestore(&nvme_fc_lock, flags);
388
389 nvme_fc_lport_put(lport);
390
391 return 0;
392}
393EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
394
395/**
396 * nvme_fc_register_remoteport - transport entry point called by an
397 * LLDD to register the existence of a NVME
398 * subsystem FC port on its fabric.
399 * @localport: pointer to the (registered) local port that the remote
400 * subsystem port is connected to.
401 * @pinfo: pointer to information about the port to be registered
402 * @rport_p: pointer to a remote port pointer. Upon success, the routine
403 * will allocate a nvme_fc_remote_port structure and place its
404 * address in the remote port pointer. Upon failure, remote port
405 * pointer will be set to 0.
406 *
407 * Returns:
408 * a completion status. Must be 0 upon success; a negative errno
409 * (ex: -ENXIO) upon failure.
410 */
411int
412nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
413 struct nvme_fc_port_info *pinfo,
414 struct nvme_fc_remote_port **portptr)
415{
416 struct nvme_fc_lport *lport = localport_to_lport(localport);
417 struct nvme_fc_rport *newrec;
418 unsigned long flags;
419 int ret, idx;
420
421 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
422 GFP_KERNEL);
423 if (!newrec) {
424 ret = -ENOMEM;
425 goto out_reghost_failed;
426 }
427
428 if (!nvme_fc_lport_get(lport)) {
429 ret = -ESHUTDOWN;
430 goto out_kfree_rport;
431 }
432
433 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
434 if (idx < 0) {
435 ret = -ENOSPC;
436 goto out_lport_put;
437 }
438
439 INIT_LIST_HEAD(&newrec->endp_list);
440 INIT_LIST_HEAD(&newrec->ctrl_list);
James Smartc913a8b2017-04-11 11:35:08 -0700441 INIT_LIST_HEAD(&newrec->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800442 kref_init(&newrec->ref);
443 spin_lock_init(&newrec->lock);
444 newrec->remoteport.localport = &lport->localport;
James Smartc913a8b2017-04-11 11:35:08 -0700445 newrec->dev = lport->dev;
446 newrec->lport = lport;
James Smarte3994412016-12-02 00:28:42 -0800447 newrec->remoteport.private = &newrec[1];
448 newrec->remoteport.port_role = pinfo->port_role;
449 newrec->remoteport.node_name = pinfo->node_name;
450 newrec->remoteport.port_name = pinfo->port_name;
451 newrec->remoteport.port_id = pinfo->port_id;
452 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
453 newrec->remoteport.port_num = idx;
454
455 spin_lock_irqsave(&nvme_fc_lock, flags);
456 list_add_tail(&newrec->endp_list, &lport->endp_list);
457 spin_unlock_irqrestore(&nvme_fc_lock, flags);
458
459 *portptr = &newrec->remoteport;
460 return 0;
461
462out_lport_put:
463 nvme_fc_lport_put(lport);
464out_kfree_rport:
465 kfree(newrec);
466out_reghost_failed:
467 *portptr = NULL;
468 return ret;
James Smarte3994412016-12-02 00:28:42 -0800469}
470EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
471
472static void
473nvme_fc_free_rport(struct kref *ref)
474{
475 struct nvme_fc_rport *rport =
476 container_of(ref, struct nvme_fc_rport, ref);
477 struct nvme_fc_lport *lport =
478 localport_to_lport(rport->remoteport.localport);
479 unsigned long flags;
480
481 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
482 WARN_ON(!list_empty(&rport->ctrl_list));
483
484 /* remove from lport list */
485 spin_lock_irqsave(&nvme_fc_lock, flags);
486 list_del(&rport->endp_list);
487 spin_unlock_irqrestore(&nvme_fc_lock, flags);
488
489 /* let the LLDD know we've finished tearing it down */
490 lport->ops->remoteport_delete(&rport->remoteport);
491
492 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
493
494 kfree(rport);
495
496 nvme_fc_lport_put(lport);
497}
498
499static void
500nvme_fc_rport_put(struct nvme_fc_rport *rport)
501{
502 kref_put(&rport->ref, nvme_fc_free_rport);
503}
504
505static int
506nvme_fc_rport_get(struct nvme_fc_rport *rport)
507{
508 return kref_get_unless_zero(&rport->ref);
509}
510
James Smart8d64daf2017-04-11 11:35:09 -0700511static int
512nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
513{
514 struct nvmefc_ls_req_op *lsop;
515 unsigned long flags;
516
517restart:
518 spin_lock_irqsave(&rport->lock, flags);
519
520 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
521 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
522 lsop->flags |= FCOP_FLAGS_TERMIO;
523 spin_unlock_irqrestore(&rport->lock, flags);
524 rport->lport->ops->ls_abort(&rport->lport->localport,
525 &rport->remoteport,
526 &lsop->ls_req);
527 goto restart;
528 }
529 }
530 spin_unlock_irqrestore(&rport->lock, flags);
531
532 return 0;
533}
534
James Smarte3994412016-12-02 00:28:42 -0800535/**
536 * nvme_fc_unregister_remoteport - transport entry point called by an
537 * LLDD to deregister/remove a previously
538 * registered a NVME subsystem FC port.
539 * @remoteport: pointer to the (registered) remote port that is to be
540 * deregistered.
541 *
542 * Returns:
543 * a completion status. Must be 0 upon success; a negative errno
544 * (ex: -ENXIO) upon failure.
545 */
546int
547nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
548{
549 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
550 struct nvme_fc_ctrl *ctrl;
551 unsigned long flags;
552
553 if (!portptr)
554 return -EINVAL;
555
556 spin_lock_irqsave(&rport->lock, flags);
557
558 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
559 spin_unlock_irqrestore(&rport->lock, flags);
560 return -EINVAL;
561 }
562 portptr->port_state = FC_OBJSTATE_DELETED;
563
564 /* tear down all associations to the remote port */
565 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
566 __nvme_fc_del_ctrl(ctrl);
567
568 spin_unlock_irqrestore(&rport->lock, flags);
569
James Smart8d64daf2017-04-11 11:35:09 -0700570 nvme_fc_abort_lsops(rport);
571
James Smarte3994412016-12-02 00:28:42 -0800572 nvme_fc_rport_put(rport);
573 return 0;
574}
575EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
576
577
578/* *********************** FC-NVME DMA Handling **************************** */
579
580/*
581 * The fcloop device passes in a NULL device pointer. Real LLD's will
582 * pass in a valid device pointer. If NULL is passed to the dma mapping
583 * routines, depending on the platform, it may or may not succeed, and
584 * may crash.
585 *
586 * As such:
587 * Wrapper all the dma routines and check the dev pointer.
588 *
589 * If simple mappings (return just a dma address, we'll noop them,
590 * returning a dma address of 0.
591 *
592 * On more complex mappings (dma_map_sg), a pseudo routine fills
593 * in the scatter list, setting all dma addresses to 0.
594 */
595
596static inline dma_addr_t
597fc_dma_map_single(struct device *dev, void *ptr, size_t size,
598 enum dma_data_direction dir)
599{
600 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
601}
602
603static inline int
604fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
605{
606 return dev ? dma_mapping_error(dev, dma_addr) : 0;
607}
608
609static inline void
610fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
611 enum dma_data_direction dir)
612{
613 if (dev)
614 dma_unmap_single(dev, addr, size, dir);
615}
616
617static inline void
618fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
619 enum dma_data_direction dir)
620{
621 if (dev)
622 dma_sync_single_for_cpu(dev, addr, size, dir);
623}
624
625static inline void
626fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
627 enum dma_data_direction dir)
628{
629 if (dev)
630 dma_sync_single_for_device(dev, addr, size, dir);
631}
632
633/* pseudo dma_map_sg call */
634static int
635fc_map_sg(struct scatterlist *sg, int nents)
636{
637 struct scatterlist *s;
638 int i;
639
640 WARN_ON(nents == 0 || sg[0].length == 0);
641
642 for_each_sg(sg, s, nents, i) {
643 s->dma_address = 0L;
644#ifdef CONFIG_NEED_SG_DMA_LENGTH
645 s->dma_length = s->length;
646#endif
647 }
648 return nents;
649}
650
651static inline int
652fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
653 enum dma_data_direction dir)
654{
655 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
656}
657
658static inline void
659fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
660 enum dma_data_direction dir)
661{
662 if (dev)
663 dma_unmap_sg(dev, sg, nents, dir);
664}
665
666
667/* *********************** FC-NVME LS Handling **************************** */
668
669static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
670static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
671
672
673static void
James Smartc913a8b2017-04-11 11:35:08 -0700674__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800675{
James Smartc913a8b2017-04-11 11:35:08 -0700676 struct nvme_fc_rport *rport = lsop->rport;
James Smarte3994412016-12-02 00:28:42 -0800677 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
678 unsigned long flags;
679
James Smartc913a8b2017-04-11 11:35:08 -0700680 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800681
682 if (!lsop->req_queued) {
James Smartc913a8b2017-04-11 11:35:08 -0700683 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800684 return;
685 }
686
687 list_del(&lsop->lsreq_list);
688
689 lsop->req_queued = false;
690
James Smartc913a8b2017-04-11 11:35:08 -0700691 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800692
James Smartc913a8b2017-04-11 11:35:08 -0700693 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
James Smarte3994412016-12-02 00:28:42 -0800694 (lsreq->rqstlen + lsreq->rsplen),
695 DMA_BIDIRECTIONAL);
696
James Smartc913a8b2017-04-11 11:35:08 -0700697 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800698}
699
700static int
James Smartc913a8b2017-04-11 11:35:08 -0700701__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800702 struct nvmefc_ls_req_op *lsop,
703 void (*done)(struct nvmefc_ls_req *req, int status))
704{
705 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
706 unsigned long flags;
James Smartc913a8b2017-04-11 11:35:08 -0700707 int ret = 0;
James Smarte3994412016-12-02 00:28:42 -0800708
James Smartc913a8b2017-04-11 11:35:08 -0700709 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
710 return -ECONNREFUSED;
711
712 if (!nvme_fc_rport_get(rport))
James Smarte3994412016-12-02 00:28:42 -0800713 return -ESHUTDOWN;
714
715 lsreq->done = done;
James Smartc913a8b2017-04-11 11:35:08 -0700716 lsop->rport = rport;
James Smarte3994412016-12-02 00:28:42 -0800717 lsop->req_queued = false;
718 INIT_LIST_HEAD(&lsop->lsreq_list);
719 init_completion(&lsop->ls_done);
720
James Smartc913a8b2017-04-11 11:35:08 -0700721 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
James Smarte3994412016-12-02 00:28:42 -0800722 lsreq->rqstlen + lsreq->rsplen,
723 DMA_BIDIRECTIONAL);
James Smartc913a8b2017-04-11 11:35:08 -0700724 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
725 ret = -EFAULT;
726 goto out_putrport;
James Smarte3994412016-12-02 00:28:42 -0800727 }
728 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
729
James Smartc913a8b2017-04-11 11:35:08 -0700730 spin_lock_irqsave(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800731
James Smartc913a8b2017-04-11 11:35:08 -0700732 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
James Smarte3994412016-12-02 00:28:42 -0800733
734 lsop->req_queued = true;
735
James Smartc913a8b2017-04-11 11:35:08 -0700736 spin_unlock_irqrestore(&rport->lock, flags);
James Smarte3994412016-12-02 00:28:42 -0800737
James Smartc913a8b2017-04-11 11:35:08 -0700738 ret = rport->lport->ops->ls_req(&rport->lport->localport,
739 &rport->remoteport, lsreq);
James Smarte3994412016-12-02 00:28:42 -0800740 if (ret)
James Smartc913a8b2017-04-11 11:35:08 -0700741 goto out_unlink;
742
743 return 0;
744
745out_unlink:
746 lsop->ls_error = ret;
747 spin_lock_irqsave(&rport->lock, flags);
748 lsop->req_queued = false;
749 list_del(&lsop->lsreq_list);
750 spin_unlock_irqrestore(&rport->lock, flags);
751 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
752 (lsreq->rqstlen + lsreq->rsplen),
753 DMA_BIDIRECTIONAL);
754out_putrport:
755 nvme_fc_rport_put(rport);
James Smarte3994412016-12-02 00:28:42 -0800756
757 return ret;
758}
759
760static void
761nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
762{
763 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
764
765 lsop->ls_error = status;
766 complete(&lsop->ls_done);
767}
768
769static int
James Smartc913a8b2017-04-11 11:35:08 -0700770nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
James Smarte3994412016-12-02 00:28:42 -0800771{
772 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
773 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
774 int ret;
775
James Smartc913a8b2017-04-11 11:35:08 -0700776 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
James Smarte3994412016-12-02 00:28:42 -0800777
James Smartc913a8b2017-04-11 11:35:08 -0700778 if (!ret) {
James Smarte3994412016-12-02 00:28:42 -0800779 /*
780 * No timeout/not interruptible as we need the struct
781 * to exist until the lldd calls us back. Thus mandate
782 * wait until driver calls back. lldd responsible for
783 * the timeout action
784 */
785 wait_for_completion(&lsop->ls_done);
786
James Smartc913a8b2017-04-11 11:35:08 -0700787 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -0800788
James Smartc913a8b2017-04-11 11:35:08 -0700789 ret = lsop->ls_error;
James Smarte3994412016-12-02 00:28:42 -0800790 }
791
James Smartc913a8b2017-04-11 11:35:08 -0700792 if (ret)
793 return ret;
794
James Smarte3994412016-12-02 00:28:42 -0800795 /* ACC or RJT payload ? */
796 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
797 return -ENXIO;
798
799 return 0;
800}
801
James Smartc913a8b2017-04-11 11:35:08 -0700802static int
803nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
James Smarte3994412016-12-02 00:28:42 -0800804 struct nvmefc_ls_req_op *lsop,
805 void (*done)(struct nvmefc_ls_req *req, int status))
806{
James Smarte3994412016-12-02 00:28:42 -0800807 /* don't wait for completion */
808
James Smartc913a8b2017-04-11 11:35:08 -0700809 return __nvme_fc_send_ls_req(rport, lsop, done);
James Smarte3994412016-12-02 00:28:42 -0800810}
811
812/* Validation Error indexes into the string table below */
813enum {
814 VERR_NO_ERROR = 0,
815 VERR_LSACC = 1,
816 VERR_LSDESC_RQST = 2,
817 VERR_LSDESC_RQST_LEN = 3,
818 VERR_ASSOC_ID = 4,
819 VERR_ASSOC_ID_LEN = 5,
820 VERR_CONN_ID = 6,
821 VERR_CONN_ID_LEN = 7,
822 VERR_CR_ASSOC = 8,
823 VERR_CR_ASSOC_ACC_LEN = 9,
824 VERR_CR_CONN = 10,
825 VERR_CR_CONN_ACC_LEN = 11,
826 VERR_DISCONN = 12,
827 VERR_DISCONN_ACC_LEN = 13,
828};
829
830static char *validation_errors[] = {
831 "OK",
832 "Not LS_ACC",
833 "Not LSDESC_RQST",
834 "Bad LSDESC_RQST Length",
835 "Not Association ID",
836 "Bad Association ID Length",
837 "Not Connection ID",
838 "Bad Connection ID Length",
839 "Not CR_ASSOC Rqst",
840 "Bad CR_ASSOC ACC Length",
841 "Not CR_CONN Rqst",
842 "Bad CR_CONN ACC Length",
843 "Not Disconnect Rqst",
844 "Bad Disconnect ACC Length",
845};
846
847static int
848nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
849 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
850{
851 struct nvmefc_ls_req_op *lsop;
852 struct nvmefc_ls_req *lsreq;
853 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
854 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
855 int ret, fcret = 0;
856
857 lsop = kzalloc((sizeof(*lsop) +
858 ctrl->lport->ops->lsrqst_priv_sz +
859 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
860 if (!lsop) {
861 ret = -ENOMEM;
862 goto out_no_memory;
863 }
864 lsreq = &lsop->ls_req;
865
866 lsreq->private = (void *)&lsop[1];
867 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
868 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
869 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
870
871 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
872 assoc_rqst->desc_list_len =
873 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
874
875 assoc_rqst->assoc_cmd.desc_tag =
876 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
877 assoc_rqst->assoc_cmd.desc_len =
878 fcnvme_lsdesc_len(
879 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
880
881 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
882 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
883 /* Linux supports only Dynamic controllers */
884 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
885 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
886 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
887 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
888 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
889 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
890 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
891
892 lsop->queue = queue;
893 lsreq->rqstaddr = assoc_rqst;
894 lsreq->rqstlen = sizeof(*assoc_rqst);
895 lsreq->rspaddr = assoc_acc;
896 lsreq->rsplen = sizeof(*assoc_acc);
897 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
898
James Smartc913a8b2017-04-11 11:35:08 -0700899 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -0800900 if (ret)
901 goto out_free_buffer;
902
903 /* process connect LS completion */
904
905 /* validate the ACC response */
906 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
907 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -0700908 else if (assoc_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -0800909 fcnvme_lsdesc_len(
910 sizeof(struct fcnvme_ls_cr_assoc_acc)))
911 fcret = VERR_CR_ASSOC_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -0700912 else if (assoc_acc->hdr.rqst.desc_tag !=
913 cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -0800914 fcret = VERR_LSDESC_RQST;
915 else if (assoc_acc->hdr.rqst.desc_len !=
916 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
917 fcret = VERR_LSDESC_RQST_LEN;
918 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
919 fcret = VERR_CR_ASSOC;
920 else if (assoc_acc->associd.desc_tag !=
921 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
922 fcret = VERR_ASSOC_ID;
923 else if (assoc_acc->associd.desc_len !=
924 fcnvme_lsdesc_len(
925 sizeof(struct fcnvme_lsdesc_assoc_id)))
926 fcret = VERR_ASSOC_ID_LEN;
927 else if (assoc_acc->connectid.desc_tag !=
928 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
929 fcret = VERR_CONN_ID;
930 else if (assoc_acc->connectid.desc_len !=
931 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
932 fcret = VERR_CONN_ID_LEN;
933
934 if (fcret) {
935 ret = -EBADF;
936 dev_err(ctrl->dev,
937 "q %d connect failed: %s\n",
938 queue->qnum, validation_errors[fcret]);
939 } else {
940 ctrl->association_id =
941 be64_to_cpu(assoc_acc->associd.association_id);
942 queue->connection_id =
943 be64_to_cpu(assoc_acc->connectid.connection_id);
944 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
945 }
946
947out_free_buffer:
948 kfree(lsop);
949out_no_memory:
950 if (ret)
951 dev_err(ctrl->dev,
952 "queue %d connect admin queue failed (%d).\n",
953 queue->qnum, ret);
954 return ret;
955}
956
957static int
958nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
959 u16 qsize, u16 ersp_ratio)
960{
961 struct nvmefc_ls_req_op *lsop;
962 struct nvmefc_ls_req *lsreq;
963 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
964 struct fcnvme_ls_cr_conn_acc *conn_acc;
965 int ret, fcret = 0;
966
967 lsop = kzalloc((sizeof(*lsop) +
968 ctrl->lport->ops->lsrqst_priv_sz +
969 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
970 if (!lsop) {
971 ret = -ENOMEM;
972 goto out_no_memory;
973 }
974 lsreq = &lsop->ls_req;
975
976 lsreq->private = (void *)&lsop[1];
977 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
978 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
979 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
980
981 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
982 conn_rqst->desc_list_len = cpu_to_be32(
983 sizeof(struct fcnvme_lsdesc_assoc_id) +
984 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
985
986 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
987 conn_rqst->associd.desc_len =
988 fcnvme_lsdesc_len(
989 sizeof(struct fcnvme_lsdesc_assoc_id));
990 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
991 conn_rqst->connect_cmd.desc_tag =
992 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
993 conn_rqst->connect_cmd.desc_len =
994 fcnvme_lsdesc_len(
995 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
996 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
997 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
998 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
999
1000 lsop->queue = queue;
1001 lsreq->rqstaddr = conn_rqst;
1002 lsreq->rqstlen = sizeof(*conn_rqst);
1003 lsreq->rspaddr = conn_acc;
1004 lsreq->rsplen = sizeof(*conn_acc);
1005 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1006
James Smartc913a8b2017-04-11 11:35:08 -07001007 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
James Smarte3994412016-12-02 00:28:42 -08001008 if (ret)
1009 goto out_free_buffer;
1010
1011 /* process connect LS completion */
1012
1013 /* validate the ACC response */
1014 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1015 fcret = VERR_LSACC;
James Smartf77fc872017-03-23 20:41:25 -07001016 else if (conn_acc->hdr.desc_list_len !=
James Smarte3994412016-12-02 00:28:42 -08001017 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1018 fcret = VERR_CR_CONN_ACC_LEN;
James Smartf77fc872017-03-23 20:41:25 -07001019 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
James Smarte3994412016-12-02 00:28:42 -08001020 fcret = VERR_LSDESC_RQST;
1021 else if (conn_acc->hdr.rqst.desc_len !=
1022 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1023 fcret = VERR_LSDESC_RQST_LEN;
1024 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1025 fcret = VERR_CR_CONN;
1026 else if (conn_acc->connectid.desc_tag !=
1027 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1028 fcret = VERR_CONN_ID;
1029 else if (conn_acc->connectid.desc_len !=
1030 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1031 fcret = VERR_CONN_ID_LEN;
1032
1033 if (fcret) {
1034 ret = -EBADF;
1035 dev_err(ctrl->dev,
1036 "q %d connect failed: %s\n",
1037 queue->qnum, validation_errors[fcret]);
1038 } else {
1039 queue->connection_id =
1040 be64_to_cpu(conn_acc->connectid.connection_id);
1041 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1042 }
1043
1044out_free_buffer:
1045 kfree(lsop);
1046out_no_memory:
1047 if (ret)
1048 dev_err(ctrl->dev,
1049 "queue %d connect command failed (%d).\n",
1050 queue->qnum, ret);
1051 return ret;
1052}
1053
1054static void
1055nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1056{
1057 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
James Smarte3994412016-12-02 00:28:42 -08001058
James Smartc913a8b2017-04-11 11:35:08 -07001059 __nvme_fc_finish_ls_req(lsop);
James Smarte3994412016-12-02 00:28:42 -08001060
1061 /* fc-nvme iniator doesn't care about success or failure of cmd */
1062
1063 kfree(lsop);
1064}
1065
1066/*
1067 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1068 * the FC-NVME Association. Terminating the association also
1069 * terminates the FC-NVME connections (per queue, both admin and io
1070 * queues) that are part of the association. E.g. things are torn
1071 * down, and the related FC-NVME Association ID and Connection IDs
1072 * become invalid.
1073 *
1074 * The behavior of the fc-nvme initiator is such that it's
1075 * understanding of the association and connections will implicitly
1076 * be torn down. The action is implicit as it may be due to a loss of
1077 * connectivity with the fc-nvme target, so you may never get a
1078 * response even if you tried. As such, the action of this routine
1079 * is to asynchronously send the LS, ignore any results of the LS, and
1080 * continue on with terminating the association. If the fc-nvme target
1081 * is present and receives the LS, it too can tear down.
1082 */
1083static void
1084nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1085{
1086 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1087 struct fcnvme_ls_disconnect_acc *discon_acc;
1088 struct nvmefc_ls_req_op *lsop;
1089 struct nvmefc_ls_req *lsreq;
James Smartc913a8b2017-04-11 11:35:08 -07001090 int ret;
James Smarte3994412016-12-02 00:28:42 -08001091
1092 lsop = kzalloc((sizeof(*lsop) +
1093 ctrl->lport->ops->lsrqst_priv_sz +
1094 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1095 GFP_KERNEL);
1096 if (!lsop)
1097 /* couldn't sent it... too bad */
1098 return;
1099
1100 lsreq = &lsop->ls_req;
1101
1102 lsreq->private = (void *)&lsop[1];
1103 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1104 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1105 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1106
1107 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1108 discon_rqst->desc_list_len = cpu_to_be32(
1109 sizeof(struct fcnvme_lsdesc_assoc_id) +
1110 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1111
1112 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1113 discon_rqst->associd.desc_len =
1114 fcnvme_lsdesc_len(
1115 sizeof(struct fcnvme_lsdesc_assoc_id));
1116
1117 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1118
1119 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1120 FCNVME_LSDESC_DISCONN_CMD);
1121 discon_rqst->discon_cmd.desc_len =
1122 fcnvme_lsdesc_len(
1123 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1124 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1125 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1126
1127 lsreq->rqstaddr = discon_rqst;
1128 lsreq->rqstlen = sizeof(*discon_rqst);
1129 lsreq->rspaddr = discon_acc;
1130 lsreq->rsplen = sizeof(*discon_acc);
1131 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1132
James Smartc913a8b2017-04-11 11:35:08 -07001133 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1134 nvme_fc_disconnect_assoc_done);
1135 if (ret)
1136 kfree(lsop);
James Smarte3994412016-12-02 00:28:42 -08001137
1138 /* only meaningful part to terminating the association */
1139 ctrl->association_id = 0;
1140}
1141
1142
1143/* *********************** NVME Ctrl Routines **************************** */
1144
James Smart78a7ac22017-04-23 08:30:07 -07001145static void __nvme_fc_final_op_cleanup(struct request *rq);
James Smarte3994412016-12-02 00:28:42 -08001146
1147static int
1148nvme_fc_reinit_request(void *data, struct request *rq)
1149{
1150 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1151 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1152
1153 memset(cmdiu, 0, sizeof(*cmdiu));
1154 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1155 cmdiu->fc_id = NVME_CMD_FC_ID;
1156 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1157 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1158
1159 return 0;
1160}
1161
1162static void
1163__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1164 struct nvme_fc_fcp_op *op)
1165{
1166 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1167 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1168 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1169 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1170
1171 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1172}
1173
1174static void
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001175nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1176 unsigned int hctx_idx)
James Smarte3994412016-12-02 00:28:42 -08001177{
1178 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1179
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001180 return __nvme_fc_exit_request(set->driver_data, op);
James Smarte3994412016-12-02 00:28:42 -08001181}
1182
James Smart78a7ac22017-04-23 08:30:07 -07001183static int
1184__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1185{
1186 int state;
1187
1188 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1189 if (state != FCPOP_STATE_ACTIVE) {
1190 atomic_set(&op->state, state);
1191 return -ECANCELED;
1192 }
1193
1194 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1195 &ctrl->rport->remoteport,
1196 op->queue->lldd_handle,
1197 &op->fcp_req);
1198
1199 return 0;
1200}
1201
James Smarte3994412016-12-02 00:28:42 -08001202static void
James Smart78a7ac22017-04-23 08:30:07 -07001203nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
James Smarte3994412016-12-02 00:28:42 -08001204{
1205 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
James Smart78a7ac22017-04-23 08:30:07 -07001206 unsigned long flags;
1207 int i, ret;
James Smarte3994412016-12-02 00:28:42 -08001208
1209 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
James Smart78a7ac22017-04-23 08:30:07 -07001210 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
James Smarte3994412016-12-02 00:28:42 -08001211 continue;
James Smart78a7ac22017-04-23 08:30:07 -07001212
1213 spin_lock_irqsave(&ctrl->lock, flags);
James Smart61bff8e2017-04-23 08:30:08 -07001214 if (ctrl->flags & FCCTRL_TERMIO) {
1215 ctrl->iocnt++;
1216 aen_op->flags |= FCOP_FLAGS_TERMIO;
1217 }
James Smart78a7ac22017-04-23 08:30:07 -07001218 spin_unlock_irqrestore(&ctrl->lock, flags);
1219
1220 ret = __nvme_fc_abort_op(ctrl, aen_op);
1221 if (ret) {
1222 /*
1223 * if __nvme_fc_abort_op failed the io wasn't
1224 * active. Thus this call path is running in
1225 * parallel to the io complete. Treat as non-error.
1226 */
1227
1228 /* back out the flags/counters */
1229 spin_lock_irqsave(&ctrl->lock, flags);
James Smart61bff8e2017-04-23 08:30:08 -07001230 if (ctrl->flags & FCCTRL_TERMIO)
1231 ctrl->iocnt--;
James Smart78a7ac22017-04-23 08:30:07 -07001232 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1233 spin_unlock_irqrestore(&ctrl->lock, flags);
1234 return;
1235 }
James Smarte3994412016-12-02 00:28:42 -08001236 }
1237}
1238
James Smart78a7ac22017-04-23 08:30:07 -07001239static inline int
1240__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1241 struct nvme_fc_fcp_op *op)
1242{
1243 unsigned long flags;
1244 bool complete_rq = false;
1245
1246 spin_lock_irqsave(&ctrl->lock, flags);
James Smart61bff8e2017-04-23 08:30:08 -07001247 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1248 if (ctrl->flags & FCCTRL_TERMIO)
1249 ctrl->iocnt--;
1250 }
James Smart78a7ac22017-04-23 08:30:07 -07001251 if (op->flags & FCOP_FLAGS_RELEASED)
1252 complete_rq = true;
1253 else
1254 op->flags |= FCOP_FLAGS_COMPLETE;
1255 spin_unlock_irqrestore(&ctrl->lock, flags);
1256
1257 return complete_rq;
1258}
1259
Christoph Hellwigbaee29a2017-04-21 10:44:06 +02001260static void
James Smarte3994412016-12-02 00:28:42 -08001261nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1262{
1263 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1264 struct request *rq = op->rq;
1265 struct nvmefc_fcp_req *freq = &op->fcp_req;
1266 struct nvme_fc_ctrl *ctrl = op->ctrl;
1267 struct nvme_fc_queue *queue = op->queue;
1268 struct nvme_completion *cqe = &op->rsp_iu.cqe;
James Smart458f2802017-04-23 08:30:06 -07001269 struct nvme_command *sqe = &op->cmd_iu.sqe;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001270 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001271 union nvme_result result;
James Smart78a7ac22017-04-23 08:30:07 -07001272 bool complete_rq;
James Smarte3994412016-12-02 00:28:42 -08001273
1274 /*
1275 * WARNING:
1276 * The current linux implementation of a nvme controller
1277 * allocates a single tag set for all io queues and sizes
1278 * the io queues to fully hold all possible tags. Thus, the
1279 * implementation does not reference or care about the sqhd
1280 * value as it never needs to use the sqhd/sqtail pointers
1281 * for submission pacing.
1282 *
1283 * This affects the FC-NVME implementation in two ways:
1284 * 1) As the value doesn't matter, we don't need to waste
1285 * cycles extracting it from ERSPs and stamping it in the
1286 * cases where the transport fabricates CQEs on successful
1287 * completions.
1288 * 2) The FC-NVME implementation requires that delivery of
1289 * ERSP completions are to go back to the nvme layer in order
1290 * relative to the rsn, such that the sqhd value will always
1291 * be "in order" for the nvme layer. As the nvme layer in
1292 * linux doesn't care about sqhd, there's no need to return
1293 * them in order.
1294 *
1295 * Additionally:
1296 * As the core nvme layer in linux currently does not look at
1297 * every field in the cqe - in cases where the FC transport must
1298 * fabricate a CQE, the following fields will not be set as they
1299 * are not referenced:
1300 * cqe.sqid, cqe.sqhd, cqe.command_id
1301 */
1302
1303 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1304 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1305
1306 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001307 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
James Smart62eeacb2017-03-23 20:41:27 -07001308 else if (freq->status)
Christoph Hellwigd663b692017-04-20 16:02:56 +02001309 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001310
1311 /*
1312 * For the linux implementation, if we have an unsuccesful
1313 * status, they blk-mq layer can typically be called with the
1314 * non-zero status and the content of the cqe isn't important.
1315 */
1316 if (status)
1317 goto done;
1318
1319 /*
1320 * command completed successfully relative to the wire
1321 * protocol. However, validate anything received and
1322 * extract the status and result from the cqe (create it
1323 * where necessary).
1324 */
1325
1326 switch (freq->rcv_rsplen) {
1327
1328 case 0:
1329 case NVME_FC_SIZEOF_ZEROS_RSP:
1330 /*
1331 * No response payload or 12 bytes of payload (which
1332 * should all be zeros) are considered successful and
1333 * no payload in the CQE by the transport.
1334 */
1335 if (freq->transferred_length !=
1336 be32_to_cpu(op->cmd_iu.data_len)) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001337 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001338 goto done;
1339 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001340 result.u64 = 0;
James Smarte3994412016-12-02 00:28:42 -08001341 break;
1342
1343 case sizeof(struct nvme_fc_ersp_iu):
1344 /*
1345 * The ERSP IU contains a full completion with CQE.
1346 * Validate ERSP IU and look at cqe.
1347 */
1348 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1349 (freq->rcv_rsplen / 4) ||
1350 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1351 freq->transferred_length ||
James Smart726a1082017-03-23 20:41:23 -07001352 op->rsp_iu.status_code ||
James Smart458f2802017-04-23 08:30:06 -07001353 sqe->common.command_id != cqe->command_id)) {
Christoph Hellwigd663b692017-04-20 16:02:56 +02001354 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001355 goto done;
1356 }
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001357 result = cqe->result;
Christoph Hellwigd663b692017-04-20 16:02:56 +02001358 status = cqe->status;
James Smarte3994412016-12-02 00:28:42 -08001359 break;
1360
1361 default:
Christoph Hellwigd663b692017-04-20 16:02:56 +02001362 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
James Smarte3994412016-12-02 00:28:42 -08001363 goto done;
1364 }
1365
1366done:
James Smart78a7ac22017-04-23 08:30:07 -07001367 if (op->flags & FCOP_FLAGS_AEN) {
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001368 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
James Smart78a7ac22017-04-23 08:30:07 -07001369 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1370 atomic_set(&op->state, FCPOP_STATE_IDLE);
1371 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
James Smarte3994412016-12-02 00:28:42 -08001372 nvme_fc_ctrl_put(ctrl);
1373 return;
1374 }
1375
James Smart78a7ac22017-04-23 08:30:07 -07001376 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1377 if (!complete_rq) {
1378 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1379 status = cpu_to_le16(NVME_SC_ABORT_REQ);
1380 if (blk_queue_dying(rq->q))
1381 status |= cpu_to_le16(NVME_SC_DNR);
1382 }
1383 nvme_end_request(rq, status, result);
1384 } else
1385 __nvme_fc_final_op_cleanup(rq);
James Smarte3994412016-12-02 00:28:42 -08001386}
1387
1388static int
1389__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1390 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1391 struct request *rq, u32 rqno)
1392{
1393 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1394 int ret = 0;
1395
1396 memset(op, 0, sizeof(*op));
1397 op->fcp_req.cmdaddr = &op->cmd_iu;
1398 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1399 op->fcp_req.rspaddr = &op->rsp_iu;
1400 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1401 op->fcp_req.done = nvme_fc_fcpio_done;
1402 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1403 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1404 op->ctrl = ctrl;
1405 op->queue = queue;
1406 op->rq = rq;
1407 op->rqno = rqno;
1408
1409 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1410 cmdiu->fc_id = NVME_CMD_FC_ID;
1411 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1412
1413 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1414 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1415 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1416 dev_err(ctrl->dev,
1417 "FCP Op failed - cmdiu dma mapping failed.\n");
1418 ret = EFAULT;
1419 goto out_on_error;
1420 }
1421
1422 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1423 &op->rsp_iu, sizeof(op->rsp_iu),
1424 DMA_FROM_DEVICE);
1425 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1426 dev_err(ctrl->dev,
1427 "FCP Op failed - rspiu dma mapping failed.\n");
1428 ret = EFAULT;
1429 }
1430
1431 atomic_set(&op->state, FCPOP_STATE_IDLE);
1432out_on_error:
1433 return ret;
1434}
1435
1436static int
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001437nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1438 unsigned int hctx_idx, unsigned int numa_node)
James Smarte3994412016-12-02 00:28:42 -08001439{
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001440 struct nvme_fc_ctrl *ctrl = set->driver_data;
James Smarte3994412016-12-02 00:28:42 -08001441 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1442 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1443
1444 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1445}
1446
1447static int
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001448nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
1449 unsigned int hctx_idx, unsigned int numa_node)
James Smarte3994412016-12-02 00:28:42 -08001450{
Christoph Hellwigd6296d32017-05-01 10:19:08 -06001451 struct nvme_fc_ctrl *ctrl = set->driver_data;
James Smarte3994412016-12-02 00:28:42 -08001452 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1453 struct nvme_fc_queue *queue = &ctrl->queues[0];
1454
1455 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1456}
1457
1458static int
1459nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1460{
1461 struct nvme_fc_fcp_op *aen_op;
1462 struct nvme_fc_cmd_iu *cmdiu;
1463 struct nvme_command *sqe;
James Smart61bff8e2017-04-23 08:30:08 -07001464 void *private;
James Smarte3994412016-12-02 00:28:42 -08001465 int i, ret;
1466
1467 aen_op = ctrl->aen_ops;
1468 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
James Smart61bff8e2017-04-23 08:30:08 -07001469 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1470 GFP_KERNEL);
1471 if (!private)
1472 return -ENOMEM;
1473
James Smarte3994412016-12-02 00:28:42 -08001474 cmdiu = &aen_op->cmd_iu;
1475 sqe = &cmdiu->sqe;
1476 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1477 aen_op, (struct request *)NULL,
1478 (AEN_CMDID_BASE + i));
James Smart61bff8e2017-04-23 08:30:08 -07001479 if (ret) {
1480 kfree(private);
James Smarte3994412016-12-02 00:28:42 -08001481 return ret;
James Smart61bff8e2017-04-23 08:30:08 -07001482 }
James Smarte3994412016-12-02 00:28:42 -08001483
James Smart78a7ac22017-04-23 08:30:07 -07001484 aen_op->flags = FCOP_FLAGS_AEN;
James Smart61bff8e2017-04-23 08:30:08 -07001485 aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1486 aen_op->fcp_req.private = private;
James Smart78a7ac22017-04-23 08:30:07 -07001487
James Smarte3994412016-12-02 00:28:42 -08001488 memset(sqe, 0, sizeof(*sqe));
1489 sqe->common.opcode = nvme_admin_async_event;
James Smart78a7ac22017-04-23 08:30:07 -07001490 /* Note: core layer may overwrite the sqe.command_id value */
James Smarte3994412016-12-02 00:28:42 -08001491 sqe->common.command_id = AEN_CMDID_BASE + i;
1492 }
1493 return 0;
1494}
1495
James Smart61bff8e2017-04-23 08:30:08 -07001496static void
1497nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1498{
1499 struct nvme_fc_fcp_op *aen_op;
1500 int i;
1501
1502 aen_op = ctrl->aen_ops;
1503 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1504 if (!aen_op->fcp_req.private)
1505 continue;
1506
1507 __nvme_fc_exit_request(ctrl, aen_op);
1508
1509 kfree(aen_op->fcp_req.private);
1510 aen_op->fcp_req.private = NULL;
1511 }
1512}
James Smarte3994412016-12-02 00:28:42 -08001513
1514static inline void
1515__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1516 unsigned int qidx)
1517{
1518 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1519
1520 hctx->driver_data = queue;
1521 queue->hctx = hctx;
1522}
1523
1524static int
1525nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1526 unsigned int hctx_idx)
1527{
1528 struct nvme_fc_ctrl *ctrl = data;
1529
1530 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1531
1532 return 0;
1533}
1534
1535static int
1536nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1537 unsigned int hctx_idx)
1538{
1539 struct nvme_fc_ctrl *ctrl = data;
1540
1541 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1542
1543 return 0;
1544}
1545
1546static void
1547nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1548{
1549 struct nvme_fc_queue *queue;
1550
1551 queue = &ctrl->queues[idx];
1552 memset(queue, 0, sizeof(*queue));
1553 queue->ctrl = ctrl;
1554 queue->qnum = idx;
1555 atomic_set(&queue->csn, 1);
1556 queue->dev = ctrl->dev;
1557
1558 if (idx > 0)
1559 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1560 else
1561 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1562
1563 queue->queue_size = queue_size;
1564
1565 /*
1566 * Considered whether we should allocate buffers for all SQEs
1567 * and CQEs and dma map them - mapping their respective entries
1568 * into the request structures (kernel vm addr and dma address)
1569 * thus the driver could use the buffers/mappings directly.
1570 * It only makes sense if the LLDD would use them for its
1571 * messaging api. It's very unlikely most adapter api's would use
1572 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1573 * structures were used instead.
1574 */
1575}
1576
1577/*
1578 * This routine terminates a queue at the transport level.
1579 * The transport has already ensured that all outstanding ios on
1580 * the queue have been terminated.
1581 * The transport will send a Disconnect LS request to terminate
1582 * the queue's connection. Termination of the admin queue will also
1583 * terminate the association at the target.
1584 */
1585static void
1586nvme_fc_free_queue(struct nvme_fc_queue *queue)
1587{
1588 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1589 return;
1590
1591 /*
1592 * Current implementation never disconnects a single queue.
1593 * It always terminates a whole association. So there is never
1594 * a disconnect(queue) LS sent to the target.
1595 */
1596
1597 queue->connection_id = 0;
1598 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1599}
1600
1601static void
1602__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1603 struct nvme_fc_queue *queue, unsigned int qidx)
1604{
1605 if (ctrl->lport->ops->delete_queue)
1606 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1607 queue->lldd_handle);
1608 queue->lldd_handle = NULL;
1609}
1610
1611static void
James Smarte3994412016-12-02 00:28:42 -08001612nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1613{
1614 int i;
1615
1616 for (i = 1; i < ctrl->queue_count; i++)
1617 nvme_fc_free_queue(&ctrl->queues[i]);
1618}
1619
1620static int
1621__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1622 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1623{
1624 int ret = 0;
1625
1626 queue->lldd_handle = NULL;
1627 if (ctrl->lport->ops->create_queue)
1628 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1629 qidx, qsize, &queue->lldd_handle);
1630
1631 return ret;
1632}
1633
1634static void
1635nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1636{
1637 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1638 int i;
1639
1640 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1641 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1642}
1643
1644static int
1645nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1646{
1647 struct nvme_fc_queue *queue = &ctrl->queues[1];
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001648 int i, ret;
James Smarte3994412016-12-02 00:28:42 -08001649
1650 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1651 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001652 if (ret)
1653 goto delete_queues;
James Smarte3994412016-12-02 00:28:42 -08001654 }
1655
1656 return 0;
Johannes Thumshirn17a1ec02016-12-15 14:20:48 +01001657
1658delete_queues:
1659 for (; i >= 0; i--)
1660 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1661 return ret;
James Smarte3994412016-12-02 00:28:42 -08001662}
1663
1664static int
1665nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1666{
1667 int i, ret = 0;
1668
1669 for (i = 1; i < ctrl->queue_count; i++) {
1670 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1671 (qsize / 5));
1672 if (ret)
1673 break;
1674 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1675 if (ret)
1676 break;
1677 }
1678
1679 return ret;
1680}
1681
1682static void
1683nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1684{
1685 int i;
1686
1687 for (i = 1; i < ctrl->queue_count; i++)
1688 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1689}
1690
1691static void
1692nvme_fc_ctrl_free(struct kref *ref)
1693{
1694 struct nvme_fc_ctrl *ctrl =
1695 container_of(ref, struct nvme_fc_ctrl, ref);
1696 unsigned long flags;
1697
James Smart61bff8e2017-04-23 08:30:08 -07001698 if (ctrl->ctrl.tagset) {
1699 blk_cleanup_queue(ctrl->ctrl.connect_q);
1700 blk_mq_free_tag_set(&ctrl->tag_set);
James Smarte3994412016-12-02 00:28:42 -08001701 }
1702
James Smart61bff8e2017-04-23 08:30:08 -07001703 /* remove from rport list */
1704 spin_lock_irqsave(&ctrl->rport->lock, flags);
1705 list_del(&ctrl->ctrl_list);
1706 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1707
1708 blk_cleanup_queue(ctrl->ctrl.admin_q);
1709 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1710
1711 kfree(ctrl->queues);
1712
James Smarte3994412016-12-02 00:28:42 -08001713 put_device(ctrl->dev);
1714 nvme_fc_rport_put(ctrl->rport);
1715
James Smarte3994412016-12-02 00:28:42 -08001716 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
Ewan D. Milnede414472017-04-24 13:24:16 -04001717 if (ctrl->ctrl.opts)
1718 nvmf_free_options(ctrl->ctrl.opts);
James Smarte3994412016-12-02 00:28:42 -08001719 kfree(ctrl);
1720}
1721
1722static void
1723nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1724{
1725 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1726}
1727
1728static int
1729nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1730{
1731 return kref_get_unless_zero(&ctrl->ref);
1732}
1733
1734/*
1735 * All accesses from nvme core layer done - can now free the
1736 * controller. Called after last nvme_put_ctrl() call
1737 */
1738static void
James Smart61bff8e2017-04-23 08:30:08 -07001739nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
James Smarte3994412016-12-02 00:28:42 -08001740{
1741 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1742
1743 WARN_ON(nctrl != &ctrl->ctrl);
1744
James Smart61bff8e2017-04-23 08:30:08 -07001745 nvme_fc_ctrl_put(ctrl);
1746}
James Smarte3994412016-12-02 00:28:42 -08001747
James Smart61bff8e2017-04-23 08:30:08 -07001748static void
1749nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1750{
1751 dev_warn(ctrl->ctrl.device,
1752 "NVME-FC{%d}: transport association error detected: %s\n",
1753 ctrl->cnum, errmsg);
1754 dev_info(ctrl->ctrl.device,
1755 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
James Smarte3994412016-12-02 00:28:42 -08001756
James Smart2952a872017-04-25 15:32:01 -07001757 /* stop the queues on error, cleanup is in reset thread */
1758 if (ctrl->queue_count > 1)
1759 nvme_stop_queues(&ctrl->ctrl);
1760
James Smart61bff8e2017-04-23 08:30:08 -07001761 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1762 dev_err(ctrl->ctrl.device,
1763 "NVME-FC{%d}: error_recovery: Couldn't change state "
1764 "to RECONNECTING\n", ctrl->cnum);
1765 return;
James Smarte3994412016-12-02 00:28:42 -08001766 }
1767
James Smart61bff8e2017-04-23 08:30:08 -07001768 if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
1769 dev_err(ctrl->ctrl.device,
1770 "NVME-FC{%d}: error_recovery: Failed to schedule "
1771 "reset work\n", ctrl->cnum);
James Smarte3994412016-12-02 00:28:42 -08001772}
1773
Christoph Hellwigbaee29a2017-04-21 10:44:06 +02001774static enum blk_eh_timer_return
James Smarte3994412016-12-02 00:28:42 -08001775nvme_fc_timeout(struct request *rq, bool reserved)
1776{
1777 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1778 struct nvme_fc_ctrl *ctrl = op->ctrl;
1779 int ret;
1780
1781 if (reserved)
1782 return BLK_EH_RESET_TIMER;
1783
1784 ret = __nvme_fc_abort_op(ctrl, op);
1785 if (ret)
1786 /* io wasn't active to abort consider it done */
1787 return BLK_EH_HANDLED;
1788
1789 /*
James Smart61bff8e2017-04-23 08:30:08 -07001790 * we can't individually ABTS an io without affecting the queue,
1791 * thus killing the queue, adn thus the association.
1792 * So resolve by performing a controller reset, which will stop
1793 * the host/io stack, terminate the association on the link,
1794 * and recreate an association on the link.
James Smarte3994412016-12-02 00:28:42 -08001795 */
James Smart61bff8e2017-04-23 08:30:08 -07001796 nvme_fc_error_recovery(ctrl, "io timeout error");
James Smarte3994412016-12-02 00:28:42 -08001797
1798 return BLK_EH_HANDLED;
1799}
1800
1801static int
1802nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1803 struct nvme_fc_fcp_op *op)
1804{
1805 struct nvmefc_fcp_req *freq = &op->fcp_req;
James Smarte3994412016-12-02 00:28:42 -08001806 enum dma_data_direction dir;
1807 int ret;
1808
1809 freq->sg_cnt = 0;
1810
Christoph Hellwigb131c612017-01-13 12:29:12 +01001811 if (!blk_rq_payload_bytes(rq))
James Smarte3994412016-12-02 00:28:42 -08001812 return 0;
1813
1814 freq->sg_table.sgl = freq->first_sgl;
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001815 ret = sg_alloc_table_chained(&freq->sg_table,
1816 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
James Smarte3994412016-12-02 00:28:42 -08001817 if (ret)
1818 return -ENOMEM;
1819
1820 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
Christoph Hellwig19e420b2017-01-19 16:55:57 +01001821 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
James Smarte3994412016-12-02 00:28:42 -08001822 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1823 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1824 op->nents, dir);
1825 if (unlikely(freq->sg_cnt <= 0)) {
1826 sg_free_table_chained(&freq->sg_table, true);
1827 freq->sg_cnt = 0;
1828 return -EFAULT;
1829 }
1830
1831 /*
1832 * TODO: blk_integrity_rq(rq) for DIF
1833 */
1834 return 0;
1835}
1836
1837static void
1838nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1839 struct nvme_fc_fcp_op *op)
1840{
1841 struct nvmefc_fcp_req *freq = &op->fcp_req;
1842
1843 if (!freq->sg_cnt)
1844 return;
1845
1846 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1847 ((rq_data_dir(rq) == WRITE) ?
1848 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1849
1850 nvme_cleanup_cmd(rq);
1851
1852 sg_free_table_chained(&freq->sg_table, true);
1853
1854 freq->sg_cnt = 0;
1855}
1856
1857/*
1858 * In FC, the queue is a logical thing. At transport connect, the target
1859 * creates its "queue" and returns a handle that is to be given to the
1860 * target whenever it posts something to the corresponding SQ. When an
1861 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1862 * command contained within the SQE, an io, and assigns a FC exchange
1863 * to it. The SQE and the associated SQ handle are sent in the initial
1864 * CMD IU sents on the exchange. All transfers relative to the io occur
1865 * as part of the exchange. The CQE is the last thing for the io,
1866 * which is transferred (explicitly or implicitly) with the RSP IU
1867 * sent on the exchange. After the CQE is received, the FC exchange is
1868 * terminaed and the Exchange may be used on a different io.
1869 *
1870 * The transport to LLDD api has the transport making a request for a
1871 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1872 * resource and transfers the command. The LLDD will then process all
1873 * steps to complete the io. Upon completion, the transport done routine
1874 * is called.
1875 *
1876 * So - while the operation is outstanding to the LLDD, there is a link
1877 * level FC exchange resource that is also outstanding. This must be
1878 * considered in all cleanup operations.
1879 */
1880static int
1881nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1882 struct nvme_fc_fcp_op *op, u32 data_len,
1883 enum nvmefc_fcp_datadir io_dir)
1884{
1885 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1886 struct nvme_command *sqe = &cmdiu->sqe;
1887 u32 csn;
1888 int ret;
1889
James Smart61bff8e2017-04-23 08:30:08 -07001890 /*
1891 * before attempting to send the io, check to see if we believe
1892 * the target device is present
1893 */
1894 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1895 return BLK_MQ_RQ_QUEUE_ERROR;
1896
James Smarte3994412016-12-02 00:28:42 -08001897 if (!nvme_fc_ctrl_get(ctrl))
1898 return BLK_MQ_RQ_QUEUE_ERROR;
1899
1900 /* format the FC-NVME CMD IU and fcp_req */
1901 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1902 csn = atomic_inc_return(&queue->csn);
1903 cmdiu->csn = cpu_to_be32(csn);
1904 cmdiu->data_len = cpu_to_be32(data_len);
1905 switch (io_dir) {
1906 case NVMEFC_FCP_WRITE:
1907 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1908 break;
1909 case NVMEFC_FCP_READ:
1910 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1911 break;
1912 case NVMEFC_FCP_NODATA:
1913 cmdiu->flags = 0;
1914 break;
1915 }
1916 op->fcp_req.payload_length = data_len;
1917 op->fcp_req.io_dir = io_dir;
1918 op->fcp_req.transferred_length = 0;
1919 op->fcp_req.rcv_rsplen = 0;
James Smart62eeacb2017-03-23 20:41:27 -07001920 op->fcp_req.status = NVME_SC_SUCCESS;
James Smarte3994412016-12-02 00:28:42 -08001921 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1922
1923 /*
1924 * validate per fabric rules, set fields mandated by fabric spec
1925 * as well as those by FC-NVME spec.
1926 */
1927 WARN_ON_ONCE(sqe->common.metadata);
1928 WARN_ON_ONCE(sqe->common.dptr.prp1);
1929 WARN_ON_ONCE(sqe->common.dptr.prp2);
1930 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1931
1932 /*
1933 * format SQE DPTR field per FC-NVME rules
1934 * type=data block descr; subtype=offset;
1935 * offset is currently 0.
1936 */
1937 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1938 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1939 sqe->rw.dptr.sgl.addr = 0;
1940
James Smart78a7ac22017-04-23 08:30:07 -07001941 if (!(op->flags & FCOP_FLAGS_AEN)) {
James Smarte3994412016-12-02 00:28:42 -08001942 ret = nvme_fc_map_data(ctrl, op->rq, op);
1943 if (ret < 0) {
James Smarte3994412016-12-02 00:28:42 -08001944 nvme_cleanup_cmd(op->rq);
1945 nvme_fc_ctrl_put(ctrl);
1946 return (ret == -ENOMEM || ret == -EAGAIN) ?
1947 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1948 }
1949 }
1950
1951 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1952 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1953
1954 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1955
James Smart78a7ac22017-04-23 08:30:07 -07001956 if (!(op->flags & FCOP_FLAGS_AEN))
James Smarte3994412016-12-02 00:28:42 -08001957 blk_mq_start_request(op->rq);
1958
1959 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1960 &ctrl->rport->remoteport,
1961 queue->lldd_handle, &op->fcp_req);
1962
1963 if (ret) {
James Smarte3994412016-12-02 00:28:42 -08001964 if (op->rq) { /* normal request */
1965 nvme_fc_unmap_data(ctrl, op->rq, op);
1966 nvme_cleanup_cmd(op->rq);
1967 }
1968 /* else - aen. no cleanup needed */
1969
1970 nvme_fc_ctrl_put(ctrl);
1971
1972 if (ret != -EBUSY)
1973 return BLK_MQ_RQ_QUEUE_ERROR;
1974
1975 if (op->rq) {
1976 blk_mq_stop_hw_queues(op->rq->q);
1977 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1978 }
1979 return BLK_MQ_RQ_QUEUE_BUSY;
1980 }
1981
1982 return BLK_MQ_RQ_QUEUE_OK;
1983}
1984
1985static int
1986nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1987 const struct blk_mq_queue_data *bd)
1988{
1989 struct nvme_ns *ns = hctx->queue->queuedata;
1990 struct nvme_fc_queue *queue = hctx->driver_data;
1991 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1992 struct request *rq = bd->rq;
1993 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1994 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1995 struct nvme_command *sqe = &cmdiu->sqe;
1996 enum nvmefc_fcp_datadir io_dir;
1997 u32 data_len;
1998 int ret;
1999
2000 ret = nvme_setup_cmd(ns, rq, sqe);
2001 if (ret)
2002 return ret;
2003
Christoph Hellwigb131c612017-01-13 12:29:12 +01002004 data_len = blk_rq_payload_bytes(rq);
James Smarte3994412016-12-02 00:28:42 -08002005 if (data_len)
2006 io_dir = ((rq_data_dir(rq) == WRITE) ?
2007 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2008 else
2009 io_dir = NVMEFC_FCP_NODATA;
2010
2011 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2012}
2013
2014static struct blk_mq_tags *
2015nvme_fc_tagset(struct nvme_fc_queue *queue)
2016{
2017 if (queue->qnum == 0)
2018 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2019
2020 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2021}
2022
2023static int
2024nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2025
2026{
2027 struct nvme_fc_queue *queue = hctx->driver_data;
2028 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2029 struct request *req;
2030 struct nvme_fc_fcp_op *op;
2031
2032 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
James Smart61bff8e2017-04-23 08:30:08 -07002033 if (!req)
James Smarte3994412016-12-02 00:28:42 -08002034 return 0;
James Smarte3994412016-12-02 00:28:42 -08002035
2036 op = blk_mq_rq_to_pdu(req);
2037
2038 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2039 (ctrl->lport->ops->poll_queue))
2040 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2041 queue->lldd_handle);
2042
2043 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2044}
2045
2046static void
2047nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2048{
2049 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2050 struct nvme_fc_fcp_op *aen_op;
James Smart61bff8e2017-04-23 08:30:08 -07002051 unsigned long flags;
2052 bool terminating = false;
James Smarte3994412016-12-02 00:28:42 -08002053 int ret;
2054
2055 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2056 return;
2057
James Smart61bff8e2017-04-23 08:30:08 -07002058 spin_lock_irqsave(&ctrl->lock, flags);
2059 if (ctrl->flags & FCCTRL_TERMIO)
2060 terminating = true;
2061 spin_unlock_irqrestore(&ctrl->lock, flags);
2062
2063 if (terminating)
2064 return;
2065
James Smarte3994412016-12-02 00:28:42 -08002066 aen_op = &ctrl->aen_ops[aer_idx];
2067
2068 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2069 NVMEFC_FCP_NODATA);
2070 if (ret)
2071 dev_err(ctrl->ctrl.device,
2072 "failed async event work [%d]\n", aer_idx);
2073}
2074
2075static void
James Smart78a7ac22017-04-23 08:30:07 -07002076__nvme_fc_final_op_cleanup(struct request *rq)
James Smarte3994412016-12-02 00:28:42 -08002077{
2078 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2079 struct nvme_fc_ctrl *ctrl = op->ctrl;
James Smarte3994412016-12-02 00:28:42 -08002080
James Smart78a7ac22017-04-23 08:30:07 -07002081 atomic_set(&op->state, FCPOP_STATE_IDLE);
2082 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2083 FCOP_FLAGS_COMPLETE);
James Smarte3994412016-12-02 00:28:42 -08002084
2085 nvme_cleanup_cmd(rq);
James Smarte3994412016-12-02 00:28:42 -08002086 nvme_fc_unmap_data(ctrl, rq, op);
Christoph Hellwig77f02a72017-03-30 13:41:32 +02002087 nvme_complete_rq(rq);
James Smarte3994412016-12-02 00:28:42 -08002088 nvme_fc_ctrl_put(ctrl);
2089
James Smarte3994412016-12-02 00:28:42 -08002090}
2091
James Smart78a7ac22017-04-23 08:30:07 -07002092static void
2093nvme_fc_complete_rq(struct request *rq)
2094{
2095 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2096 struct nvme_fc_ctrl *ctrl = op->ctrl;
2097 unsigned long flags;
2098 bool completed = false;
2099
2100 /*
2101 * the core layer, on controller resets after calling
2102 * nvme_shutdown_ctrl(), calls complete_rq without our
2103 * calling blk_mq_complete_request(), thus there may still
2104 * be live i/o outstanding with the LLDD. Means transport has
2105 * to track complete calls vs fcpio_done calls to know what
2106 * path to take on completes and dones.
2107 */
2108 spin_lock_irqsave(&ctrl->lock, flags);
2109 if (op->flags & FCOP_FLAGS_COMPLETE)
2110 completed = true;
2111 else
2112 op->flags |= FCOP_FLAGS_RELEASED;
2113 spin_unlock_irqrestore(&ctrl->lock, flags);
2114
2115 if (completed)
2116 __nvme_fc_final_op_cleanup(rq);
2117}
2118
James Smarte3994412016-12-02 00:28:42 -08002119/*
2120 * This routine is used by the transport when it needs to find active
2121 * io on a queue that is to be terminated. The transport uses
2122 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2123 * this routine to kill them on a 1 by 1 basis.
2124 *
2125 * As FC allocates FC exchange for each io, the transport must contact
2126 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2127 * After terminating the exchange the LLDD will call the transport's
2128 * normal io done path for the request, but it will have an aborted
2129 * status. The done path will return the io request back to the block
2130 * layer with an error status.
2131 */
2132static void
2133nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2134{
2135 struct nvme_ctrl *nctrl = data;
2136 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2137 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
James Smart78a7ac22017-04-23 08:30:07 -07002138 unsigned long flags;
2139 int status;
James Smarte3994412016-12-02 00:28:42 -08002140
2141 if (!blk_mq_request_started(req))
2142 return;
2143
James Smart78a7ac22017-04-23 08:30:07 -07002144 spin_lock_irqsave(&ctrl->lock, flags);
James Smart61bff8e2017-04-23 08:30:08 -07002145 if (ctrl->flags & FCCTRL_TERMIO) {
2146 ctrl->iocnt++;
2147 op->flags |= FCOP_FLAGS_TERMIO;
2148 }
James Smart78a7ac22017-04-23 08:30:07 -07002149 spin_unlock_irqrestore(&ctrl->lock, flags);
James Smarte3994412016-12-02 00:28:42 -08002150
James Smart78a7ac22017-04-23 08:30:07 -07002151 status = __nvme_fc_abort_op(ctrl, op);
2152 if (status) {
2153 /*
2154 * if __nvme_fc_abort_op failed the io wasn't
2155 * active. Thus this call path is running in
2156 * parallel to the io complete. Treat as non-error.
2157 */
2158
2159 /* back out the flags/counters */
2160 spin_lock_irqsave(&ctrl->lock, flags);
James Smart61bff8e2017-04-23 08:30:08 -07002161 if (ctrl->flags & FCCTRL_TERMIO)
2162 ctrl->iocnt--;
James Smart78a7ac22017-04-23 08:30:07 -07002163 op->flags &= ~FCOP_FLAGS_TERMIO;
2164 spin_unlock_irqrestore(&ctrl->lock, flags);
2165 return;
2166 }
2167}
James Smarte3994412016-12-02 00:28:42 -08002168
James Smarte3994412016-12-02 00:28:42 -08002169
James Smart61bff8e2017-04-23 08:30:08 -07002170static const struct blk_mq_ops nvme_fc_mq_ops = {
2171 .queue_rq = nvme_fc_queue_rq,
2172 .complete = nvme_fc_complete_rq,
2173 .init_request = nvme_fc_init_request,
2174 .exit_request = nvme_fc_exit_request,
2175 .reinit_request = nvme_fc_reinit_request,
2176 .init_hctx = nvme_fc_init_hctx,
2177 .poll = nvme_fc_poll,
2178 .timeout = nvme_fc_timeout,
James Smarte3994412016-12-02 00:28:42 -08002179};
2180
2181static int
2182nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2183{
2184 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2185 int ret;
2186
2187 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2188 if (ret) {
2189 dev_info(ctrl->ctrl.device,
2190 "set_queue_count failed: %d\n", ret);
2191 return ret;
2192 }
2193
2194 ctrl->queue_count = opts->nr_io_queues + 1;
2195 if (!opts->nr_io_queues)
2196 return 0;
2197
2198 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2199 opts->nr_io_queues);
2200
2201 nvme_fc_init_io_queues(ctrl);
2202
2203 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2204 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2205 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2206 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2207 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2208 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2209 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2210 (SG_CHUNK_SIZE *
2211 sizeof(struct scatterlist)) +
2212 ctrl->lport->ops->fcprqst_priv_sz;
2213 ctrl->tag_set.driver_data = ctrl;
2214 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2215 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2216
2217 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2218 if (ret)
2219 return ret;
2220
2221 ctrl->ctrl.tagset = &ctrl->tag_set;
2222
2223 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2224 if (IS_ERR(ctrl->ctrl.connect_q)) {
2225 ret = PTR_ERR(ctrl->ctrl.connect_q);
2226 goto out_free_tag_set;
2227 }
2228
2229 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2230 if (ret)
2231 goto out_cleanup_blk_queue;
2232
2233 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2234 if (ret)
2235 goto out_delete_hw_queues;
2236
2237 return 0;
2238
2239out_delete_hw_queues:
2240 nvme_fc_delete_hw_io_queues(ctrl);
2241out_cleanup_blk_queue:
2242 nvme_stop_keep_alive(&ctrl->ctrl);
2243 blk_cleanup_queue(ctrl->ctrl.connect_q);
2244out_free_tag_set:
2245 blk_mq_free_tag_set(&ctrl->tag_set);
2246 nvme_fc_free_io_queues(ctrl);
2247
2248 /* force put free routine to ignore io queues */
2249 ctrl->ctrl.tagset = NULL;
2250
2251 return ret;
2252}
2253
James Smart61bff8e2017-04-23 08:30:08 -07002254static int
2255nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2256{
2257 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2258 int ret;
2259
2260 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2261 if (ret) {
2262 dev_info(ctrl->ctrl.device,
2263 "set_queue_count failed: %d\n", ret);
2264 return ret;
2265 }
2266
2267 /* check for io queues existing */
2268 if (ctrl->queue_count == 1)
2269 return 0;
2270
2271 dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2272 opts->nr_io_queues);
2273
2274 nvme_fc_init_io_queues(ctrl);
2275
2276 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
2277 if (ret)
2278 goto out_free_io_queues;
2279
2280 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2281 if (ret)
2282 goto out_free_io_queues;
2283
2284 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2285 if (ret)
2286 goto out_delete_hw_queues;
2287
2288 return 0;
2289
2290out_delete_hw_queues:
2291 nvme_fc_delete_hw_io_queues(ctrl);
2292out_free_io_queues:
2293 nvme_fc_free_io_queues(ctrl);
2294 return ret;
2295}
2296
2297/*
2298 * This routine restarts the controller on the host side, and
2299 * on the link side, recreates the controller association.
2300 */
2301static int
2302nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2303{
2304 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2305 u32 segs;
2306 int ret;
2307 bool changed;
2308
2309 ctrl->connect_attempts++;
2310
2311 /*
2312 * Create the admin queue
2313 */
2314
2315 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2316
2317 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2318 NVME_FC_AQ_BLKMQ_DEPTH);
2319 if (ret)
2320 goto out_free_queue;
2321
2322 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2323 NVME_FC_AQ_BLKMQ_DEPTH,
2324 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
2325 if (ret)
2326 goto out_delete_hw_queue;
2327
2328 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2329 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
2330
2331 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2332 if (ret)
2333 goto out_disconnect_admin_queue;
2334
2335 /*
2336 * Check controller capabilities
2337 *
2338 * todo:- add code to check if ctrl attributes changed from
2339 * prior connection values
2340 */
2341
2342 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2343 if (ret) {
2344 dev_err(ctrl->ctrl.device,
2345 "prop_get NVME_REG_CAP failed\n");
2346 goto out_disconnect_admin_queue;
2347 }
2348
2349 ctrl->ctrl.sqsize =
2350 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2351
2352 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2353 if (ret)
2354 goto out_disconnect_admin_queue;
2355
2356 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2357 ctrl->lport->ops->max_sgl_segments);
2358 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2359
2360 ret = nvme_init_identify(&ctrl->ctrl);
2361 if (ret)
2362 goto out_disconnect_admin_queue;
2363
2364 /* sanity checks */
2365
2366 /* FC-NVME does not have other data in the capsule */
2367 if (ctrl->ctrl.icdoff) {
2368 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2369 ctrl->ctrl.icdoff);
2370 goto out_disconnect_admin_queue;
2371 }
2372
2373 nvme_start_keep_alive(&ctrl->ctrl);
2374
2375 /* FC-NVME supports normal SGL Data Block Descriptors */
2376
2377 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2378 /* warn if maxcmd is lower than queue_size */
2379 dev_warn(ctrl->ctrl.device,
2380 "queue_size %zu > ctrl maxcmd %u, reducing "
2381 "to queue_size\n",
2382 opts->queue_size, ctrl->ctrl.maxcmd);
2383 opts->queue_size = ctrl->ctrl.maxcmd;
2384 }
2385
2386 ret = nvme_fc_init_aen_ops(ctrl);
2387 if (ret)
2388 goto out_term_aen_ops;
2389
2390 /*
2391 * Create the io queues
2392 */
2393
2394 if (ctrl->queue_count > 1) {
2395 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2396 ret = nvme_fc_create_io_queues(ctrl);
2397 else
2398 ret = nvme_fc_reinit_io_queues(ctrl);
2399 if (ret)
2400 goto out_term_aen_ops;
2401 }
2402
2403 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2404 WARN_ON_ONCE(!changed);
2405
2406 ctrl->connect_attempts = 0;
2407
2408 kref_get(&ctrl->ctrl.kref);
2409
2410 if (ctrl->queue_count > 1) {
2411 nvme_start_queues(&ctrl->ctrl);
2412 nvme_queue_scan(&ctrl->ctrl);
2413 nvme_queue_async_events(&ctrl->ctrl);
2414 }
2415
2416 return 0; /* Success */
2417
2418out_term_aen_ops:
2419 nvme_fc_term_aen_ops(ctrl);
2420 nvme_stop_keep_alive(&ctrl->ctrl);
2421out_disconnect_admin_queue:
2422 /* send a Disconnect(association) LS to fc-nvme target */
2423 nvme_fc_xmt_disconnect_assoc(ctrl);
2424out_delete_hw_queue:
2425 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2426out_free_queue:
2427 nvme_fc_free_queue(&ctrl->queues[0]);
2428
2429 return ret;
2430}
2431
2432/*
2433 * This routine stops operation of the controller on the host side.
2434 * On the host os stack side: Admin and IO queues are stopped,
2435 * outstanding ios on them terminated via FC ABTS.
2436 * On the link side: the association is terminated.
2437 */
2438static void
2439nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2440{
2441 unsigned long flags;
2442
2443 nvme_stop_keep_alive(&ctrl->ctrl);
2444
2445 spin_lock_irqsave(&ctrl->lock, flags);
2446 ctrl->flags |= FCCTRL_TERMIO;
2447 ctrl->iocnt = 0;
2448 spin_unlock_irqrestore(&ctrl->lock, flags);
2449
2450 /*
2451 * If io queues are present, stop them and terminate all outstanding
2452 * ios on them. As FC allocates FC exchange for each io, the
2453 * transport must contact the LLDD to terminate the exchange,
2454 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2455 * to tell us what io's are busy and invoke a transport routine
2456 * to kill them with the LLDD. After terminating the exchange
2457 * the LLDD will call the transport's normal io done path, but it
2458 * will have an aborted status. The done path will return the
2459 * io requests back to the block layer as part of normal completions
2460 * (but with error status).
2461 */
2462 if (ctrl->queue_count > 1) {
2463 nvme_stop_queues(&ctrl->ctrl);
2464 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2465 nvme_fc_terminate_exchange, &ctrl->ctrl);
2466 }
2467
2468 /*
2469 * Other transports, which don't have link-level contexts bound
2470 * to sqe's, would try to gracefully shutdown the controller by
2471 * writing the registers for shutdown and polling (call
2472 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2473 * just aborted and we will wait on those contexts, and given
2474 * there was no indication of how live the controlelr is on the
2475 * link, don't send more io to create more contexts for the
2476 * shutdown. Let the controller fail via keepalive failure if
2477 * its still present.
2478 */
2479
2480 /*
2481 * clean up the admin queue. Same thing as above.
2482 * use blk_mq_tagset_busy_itr() and the transport routine to
2483 * terminate the exchanges.
2484 */
2485 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2486 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2487 nvme_fc_terminate_exchange, &ctrl->ctrl);
2488
2489 /* kill the aens as they are a separate path */
2490 nvme_fc_abort_aen_ops(ctrl);
2491
2492 /* wait for all io that had to be aborted */
2493 spin_lock_irqsave(&ctrl->lock, flags);
2494 while (ctrl->iocnt) {
2495 spin_unlock_irqrestore(&ctrl->lock, flags);
2496 msleep(1000);
2497 spin_lock_irqsave(&ctrl->lock, flags);
2498 }
2499 ctrl->flags &= ~FCCTRL_TERMIO;
2500 spin_unlock_irqrestore(&ctrl->lock, flags);
2501
2502 nvme_fc_term_aen_ops(ctrl);
2503
2504 /*
2505 * send a Disconnect(association) LS to fc-nvme target
2506 * Note: could have been sent at top of process, but
2507 * cleaner on link traffic if after the aborts complete.
2508 * Note: if association doesn't exist, association_id will be 0
2509 */
2510 if (ctrl->association_id)
2511 nvme_fc_xmt_disconnect_assoc(ctrl);
2512
2513 if (ctrl->ctrl.tagset) {
2514 nvme_fc_delete_hw_io_queues(ctrl);
2515 nvme_fc_free_io_queues(ctrl);
2516 }
2517
2518 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2519 nvme_fc_free_queue(&ctrl->queues[0]);
2520}
2521
2522static void
2523nvme_fc_delete_ctrl_work(struct work_struct *work)
2524{
2525 struct nvme_fc_ctrl *ctrl =
2526 container_of(work, struct nvme_fc_ctrl, delete_work);
2527
2528 cancel_work_sync(&ctrl->reset_work);
2529 cancel_delayed_work_sync(&ctrl->connect_work);
2530
2531 /*
2532 * kill the association on the link side. this will block
2533 * waiting for io to terminate
2534 */
2535 nvme_fc_delete_association(ctrl);
2536
2537 /*
2538 * tear down the controller
2539 * This will result in the last reference on the nvme ctrl to
2540 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
2541 * From there, the transport will tear down it's logical queues and
2542 * association.
2543 */
2544 nvme_uninit_ctrl(&ctrl->ctrl);
2545
2546 nvme_put_ctrl(&ctrl->ctrl);
2547}
2548
2549static int
2550__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2551{
2552 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2553 return -EBUSY;
2554
2555 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2556 return -EBUSY;
2557
2558 return 0;
2559}
2560
2561/*
2562 * Request from nvme core layer to delete the controller
2563 */
2564static int
2565nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2566{
2567 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2568 int ret;
2569
2570 if (!kref_get_unless_zero(&ctrl->ctrl.kref))
2571 return -EBUSY;
2572
2573 ret = __nvme_fc_del_ctrl(ctrl);
2574
2575 if (!ret)
2576 flush_workqueue(nvme_fc_wq);
2577
2578 nvme_put_ctrl(&ctrl->ctrl);
2579
2580 return ret;
2581}
2582
2583static void
2584nvme_fc_reset_ctrl_work(struct work_struct *work)
2585{
2586 struct nvme_fc_ctrl *ctrl =
2587 container_of(work, struct nvme_fc_ctrl, reset_work);
2588 int ret;
2589
2590 /* will block will waiting for io to terminate */
2591 nvme_fc_delete_association(ctrl);
2592
2593 ret = nvme_fc_create_association(ctrl);
2594 if (ret) {
2595 dev_warn(ctrl->ctrl.device,
2596 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2597 ctrl->cnum, ret);
2598 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2599 dev_warn(ctrl->ctrl.device,
2600 "NVME-FC{%d}: Max reconnect attempts (%d) "
2601 "reached. Removing controller\n",
2602 ctrl->cnum, ctrl->connect_attempts);
2603
2604 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2605 NVME_CTRL_DELETING)) {
2606 dev_err(ctrl->ctrl.device,
2607 "NVME-FC{%d}: failed to change state "
2608 "to DELETING\n", ctrl->cnum);
2609 return;
2610 }
2611
2612 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2613 return;
2614 }
2615
2616 dev_warn(ctrl->ctrl.device,
2617 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2618 ctrl->cnum, ctrl->reconnect_delay);
2619 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2620 ctrl->reconnect_delay * HZ);
2621 } else
2622 dev_info(ctrl->ctrl.device,
2623 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2624}
2625
2626/*
2627 * called by the nvme core layer, for sysfs interface that requests
2628 * a reset of the nvme controller
2629 */
2630static int
2631nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2632{
2633 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2634
2635 dev_warn(ctrl->ctrl.device,
2636 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2637
2638 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
2639 return -EBUSY;
2640
2641 if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
2642 return -EBUSY;
2643
2644 flush_work(&ctrl->reset_work);
2645
2646 return 0;
2647}
2648
2649static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2650 .name = "fc",
2651 .module = THIS_MODULE,
2652 .is_fabrics = true,
2653 .reg_read32 = nvmf_reg_read32,
2654 .reg_read64 = nvmf_reg_read64,
2655 .reg_write32 = nvmf_reg_write32,
2656 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2657 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2658 .submit_async_event = nvme_fc_submit_async_event,
2659 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2660 .get_subsysnqn = nvmf_get_subsysnqn,
2661 .get_address = nvmf_get_address,
2662};
2663
2664static void
2665nvme_fc_connect_ctrl_work(struct work_struct *work)
2666{
2667 int ret;
2668
2669 struct nvme_fc_ctrl *ctrl =
2670 container_of(to_delayed_work(work),
2671 struct nvme_fc_ctrl, connect_work);
2672
2673 ret = nvme_fc_create_association(ctrl);
2674 if (ret) {
2675 dev_warn(ctrl->ctrl.device,
2676 "NVME-FC{%d}: Reconnect attempt failed (%d)\n",
2677 ctrl->cnum, ret);
2678 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2679 dev_warn(ctrl->ctrl.device,
2680 "NVME-FC{%d}: Max reconnect attempts (%d) "
2681 "reached. Removing controller\n",
2682 ctrl->cnum, ctrl->connect_attempts);
2683
2684 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2685 NVME_CTRL_DELETING)) {
2686 dev_err(ctrl->ctrl.device,
2687 "NVME-FC{%d}: failed to change state "
2688 "to DELETING\n", ctrl->cnum);
2689 return;
2690 }
2691
2692 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2693 return;
2694 }
2695
2696 dev_warn(ctrl->ctrl.device,
2697 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2698 ctrl->cnum, ctrl->reconnect_delay);
2699 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2700 ctrl->reconnect_delay * HZ);
2701 } else
2702 dev_info(ctrl->ctrl.device,
2703 "NVME-FC{%d}: controller reconnect complete\n",
2704 ctrl->cnum);
2705}
2706
2707
2708static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2709 .queue_rq = nvme_fc_queue_rq,
2710 .complete = nvme_fc_complete_rq,
2711 .init_request = nvme_fc_init_admin_request,
2712 .exit_request = nvme_fc_exit_request,
2713 .reinit_request = nvme_fc_reinit_request,
2714 .init_hctx = nvme_fc_init_admin_hctx,
2715 .timeout = nvme_fc_timeout,
2716};
2717
James Smarte3994412016-12-02 00:28:42 -08002718
2719static struct nvme_ctrl *
James Smart61bff8e2017-04-23 08:30:08 -07002720nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
James Smarte3994412016-12-02 00:28:42 -08002721 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2722{
2723 struct nvme_fc_ctrl *ctrl;
2724 unsigned long flags;
2725 int ret, idx;
James Smarte3994412016-12-02 00:28:42 -08002726
James Smart85e6a6a2017-05-05 16:13:15 -07002727 if (!(rport->remoteport.port_role &
2728 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2729 ret = -EBADR;
2730 goto out_fail;
2731 }
2732
James Smarte3994412016-12-02 00:28:42 -08002733 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2734 if (!ctrl) {
2735 ret = -ENOMEM;
2736 goto out_fail;
2737 }
2738
2739 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2740 if (idx < 0) {
2741 ret = -ENOSPC;
2742 goto out_free_ctrl;
2743 }
2744
2745 ctrl->ctrl.opts = opts;
2746 INIT_LIST_HEAD(&ctrl->ctrl_list);
James Smarte3994412016-12-02 00:28:42 -08002747 ctrl->lport = lport;
2748 ctrl->rport = rport;
2749 ctrl->dev = lport->dev;
James Smarte3994412016-12-02 00:28:42 -08002750 ctrl->cnum = idx;
2751
James Smarte3994412016-12-02 00:28:42 -08002752 get_device(ctrl->dev);
2753 kref_init(&ctrl->ref);
2754
James Smart61bff8e2017-04-23 08:30:08 -07002755 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2756 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2757 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2758 ctrl->reconnect_delay = opts->reconnect_delay;
James Smarte3994412016-12-02 00:28:42 -08002759 spin_lock_init(&ctrl->lock);
2760
2761 /* io queue count */
2762 ctrl->queue_count = min_t(unsigned int,
2763 opts->nr_io_queues,
2764 lport->ops->max_hw_queues);
2765 opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2766 ctrl->queue_count++; /* +1 for admin queue */
2767
2768 ctrl->ctrl.sqsize = opts->queue_size - 1;
2769 ctrl->ctrl.kato = opts->kato;
2770
2771 ret = -ENOMEM;
2772 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2773 GFP_KERNEL);
2774 if (!ctrl->queues)
James Smart61bff8e2017-04-23 08:30:08 -07002775 goto out_free_ida;
James Smarte3994412016-12-02 00:28:42 -08002776
James Smart61bff8e2017-04-23 08:30:08 -07002777 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2778 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2779 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2780 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
2781 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2782 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2783 (SG_CHUNK_SIZE *
2784 sizeof(struct scatterlist)) +
2785 ctrl->lport->ops->fcprqst_priv_sz;
2786 ctrl->admin_tag_set.driver_data = ctrl;
2787 ctrl->admin_tag_set.nr_hw_queues = 1;
2788 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2789
2790 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
James Smarte3994412016-12-02 00:28:42 -08002791 if (ret)
James Smart61bff8e2017-04-23 08:30:08 -07002792 goto out_free_queues;
James Smarte3994412016-12-02 00:28:42 -08002793
James Smart61bff8e2017-04-23 08:30:08 -07002794 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2795 if (IS_ERR(ctrl->ctrl.admin_q)) {
2796 ret = PTR_ERR(ctrl->ctrl.admin_q);
2797 goto out_free_admin_tag_set;
James Smarte3994412016-12-02 00:28:42 -08002798 }
2799
James Smart61bff8e2017-04-23 08:30:08 -07002800 /*
2801 * Would have been nice to init io queues tag set as well.
2802 * However, we require interaction from the controller
2803 * for max io queue count before we can do so.
2804 * Defer this to the connect path.
2805 */
James Smarte3994412016-12-02 00:28:42 -08002806
James Smart61bff8e2017-04-23 08:30:08 -07002807 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
James Smarte3994412016-12-02 00:28:42 -08002808 if (ret)
James Smart61bff8e2017-04-23 08:30:08 -07002809 goto out_cleanup_admin_q;
James Smarte3994412016-12-02 00:28:42 -08002810
James Smart61bff8e2017-04-23 08:30:08 -07002811 /* at this point, teardown path changes to ref counting on nvme ctrl */
James Smarte3994412016-12-02 00:28:42 -08002812
2813 spin_lock_irqsave(&rport->lock, flags);
2814 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2815 spin_unlock_irqrestore(&rport->lock, flags);
2816
James Smart61bff8e2017-04-23 08:30:08 -07002817 ret = nvme_fc_create_association(ctrl);
2818 if (ret) {
Ewan D. Milnede414472017-04-24 13:24:16 -04002819 ctrl->ctrl.opts = NULL;
James Smart61bff8e2017-04-23 08:30:08 -07002820 /* initiate nvme ctrl ref counting teardown */
2821 nvme_uninit_ctrl(&ctrl->ctrl);
2822 nvme_put_ctrl(&ctrl->ctrl);
2823
2824 /* as we're past the point where we transition to the ref
2825 * counting teardown path, if we return a bad pointer here,
2826 * the calling routine, thinking it's prior to the
2827 * transition, will do an rport put. Since the teardown
2828 * path also does a rport put, we do an extra get here to
2829 * so proper order/teardown happens.
2830 */
2831 nvme_fc_rport_get(rport);
2832
2833 if (ret > 0)
2834 ret = -EIO;
2835 return ERR_PTR(ret);
James Smarte3994412016-12-02 00:28:42 -08002836 }
2837
James Smart61bff8e2017-04-23 08:30:08 -07002838 dev_info(ctrl->ctrl.device,
2839 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2840 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2841
James Smarte3994412016-12-02 00:28:42 -08002842 return &ctrl->ctrl;
2843
James Smart61bff8e2017-04-23 08:30:08 -07002844out_cleanup_admin_q:
2845 blk_cleanup_queue(ctrl->ctrl.admin_q);
2846out_free_admin_tag_set:
2847 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2848out_free_queues:
2849 kfree(ctrl->queues);
James Smarte3994412016-12-02 00:28:42 -08002850out_free_ida:
James Smart61bff8e2017-04-23 08:30:08 -07002851 put_device(ctrl->dev);
James Smarte3994412016-12-02 00:28:42 -08002852 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2853out_free_ctrl:
2854 kfree(ctrl);
2855out_fail:
James Smarte3994412016-12-02 00:28:42 -08002856 /* exit via here doesn't follow ctlr ref points */
2857 return ERR_PTR(ret);
2858}
2859
2860enum {
2861 FCT_TRADDR_ERR = 0,
2862 FCT_TRADDR_WWNN = 1 << 0,
2863 FCT_TRADDR_WWPN = 1 << 1,
2864};
2865
2866struct nvmet_fc_traddr {
2867 u64 nn;
2868 u64 pn;
2869};
2870
2871static const match_table_t traddr_opt_tokens = {
2872 { FCT_TRADDR_WWNN, "nn-%s" },
2873 { FCT_TRADDR_WWPN, "pn-%s" },
2874 { FCT_TRADDR_ERR, NULL }
2875};
2876
2877static int
2878nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2879{
2880 substring_t args[MAX_OPT_ARGS];
2881 char *options, *o, *p;
2882 int token, ret = 0;
2883 u64 token64;
2884
2885 options = o = kstrdup(buf, GFP_KERNEL);
2886 if (!options)
2887 return -ENOMEM;
2888
2889 while ((p = strsep(&o, ":\n")) != NULL) {
2890 if (!*p)
2891 continue;
2892
2893 token = match_token(p, traddr_opt_tokens, args);
2894 switch (token) {
2895 case FCT_TRADDR_WWNN:
2896 if (match_u64(args, &token64)) {
2897 ret = -EINVAL;
2898 goto out;
2899 }
2900 traddr->nn = token64;
2901 break;
2902 case FCT_TRADDR_WWPN:
2903 if (match_u64(args, &token64)) {
2904 ret = -EINVAL;
2905 goto out;
2906 }
2907 traddr->pn = token64;
2908 break;
2909 default:
2910 pr_warn("unknown traddr token or missing value '%s'\n",
2911 p);
2912 ret = -EINVAL;
2913 goto out;
2914 }
2915 }
2916
2917out:
2918 kfree(options);
2919 return ret;
2920}
2921
2922static struct nvme_ctrl *
2923nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2924{
2925 struct nvme_fc_lport *lport;
2926 struct nvme_fc_rport *rport;
James Smart61bff8e2017-04-23 08:30:08 -07002927 struct nvme_ctrl *ctrl;
James Smarte3994412016-12-02 00:28:42 -08002928 struct nvmet_fc_traddr laddr = { 0L, 0L };
2929 struct nvmet_fc_traddr raddr = { 0L, 0L };
2930 unsigned long flags;
2931 int ret;
2932
2933 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2934 if (ret || !raddr.nn || !raddr.pn)
2935 return ERR_PTR(-EINVAL);
2936
2937 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2938 if (ret || !laddr.nn || !laddr.pn)
2939 return ERR_PTR(-EINVAL);
2940
2941 /* find the host and remote ports to connect together */
2942 spin_lock_irqsave(&nvme_fc_lock, flags);
2943 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2944 if (lport->localport.node_name != laddr.nn ||
2945 lport->localport.port_name != laddr.pn)
2946 continue;
2947
2948 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2949 if (rport->remoteport.node_name != raddr.nn ||
2950 rport->remoteport.port_name != raddr.pn)
2951 continue;
2952
2953 /* if fail to get reference fall through. Will error */
2954 if (!nvme_fc_rport_get(rport))
2955 break;
2956
2957 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2958
James Smart61bff8e2017-04-23 08:30:08 -07002959 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
2960 if (IS_ERR(ctrl))
2961 nvme_fc_rport_put(rport);
2962 return ctrl;
James Smarte3994412016-12-02 00:28:42 -08002963 }
2964 }
2965 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2966
2967 return ERR_PTR(-ENOENT);
2968}
2969
2970
2971static struct nvmf_transport_ops nvme_fc_transport = {
2972 .name = "fc",
2973 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2974 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2975 .create_ctrl = nvme_fc_create_ctrl,
2976};
2977
2978static int __init nvme_fc_init_module(void)
2979{
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002980 int ret;
2981
James Smarte3994412016-12-02 00:28:42 -08002982 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2983 if (!nvme_fc_wq)
2984 return -ENOMEM;
2985
Sagi Grimbergc0e4a6f2017-03-19 14:16:05 +02002986 ret = nvmf_register_transport(&nvme_fc_transport);
2987 if (ret)
2988 goto err;
2989
2990 return 0;
2991err:
2992 destroy_workqueue(nvme_fc_wq);
2993 return ret;
James Smarte3994412016-12-02 00:28:42 -08002994}
2995
2996static void __exit nvme_fc_exit_module(void)
2997{
2998 /* sanity check - all lports should be removed */
2999 if (!list_empty(&nvme_fc_lport_list))
3000 pr_warn("%s: localport list not empty\n", __func__);
3001
3002 nvmf_unregister_transport(&nvme_fc_transport);
3003
3004 destroy_workqueue(nvme_fc_wq);
3005
3006 ida_destroy(&nvme_fc_local_port_cnt);
3007 ida_destroy(&nvme_fc_ctrl_cnt);
3008}
3009
3010module_init(nvme_fc_init_module);
3011module_exit(nvme_fc_exit_module);
3012
3013MODULE_LICENSE("GPL v2");