blob: e5edb651b6d26a79a0e70bf87d5c674bcf89279a [file] [log] [blame]
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/kthread.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/configfs.h>
34#include <linux/ctype.h>
35#include <linux/compat.h>
36#include <linux/eventfd.h>
Nicholas Bellinger057cbf42012-07-18 14:31:32 -070037#include <linux/fs.h>
38#include <linux/miscdevice.h>
39#include <asm/unaligned.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h>
43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47#include <linux/vhost.h>
Nicholas Bellinger057cbf42012-07-18 14:31:32 -070048#include <linux/virtio_scsi.h>
Asias He9d6064a2013-01-06 14:36:13 +080049#include <linux/llist.h>
Asias He1b7f3902013-02-06 13:20:59 +080050#include <linux/bitmap.h>
Nicholas Bellinger057cbf42012-07-18 14:31:32 -070051
52#include "vhost.c"
53#include "vhost.h"
Michael S. Tsirkin5012a3a2013-05-02 03:50:34 +030054
55#define TCM_VHOST_VERSION "v0.1"
56#define TCM_VHOST_NAMELEN 256
57#define TCM_VHOST_MAX_CDB_SIZE 32
58
59struct vhost_scsi_inflight {
60 /* Wait for the flush operation to finish */
61 struct completion comp;
62 /* Refcount for the inflight reqs */
63 struct kref kref;
64};
65
66struct tcm_vhost_cmd {
67 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
68 int tvc_vq_desc;
69 /* virtio-scsi initiator task attribute */
70 int tvc_task_attr;
71 /* virtio-scsi initiator data direction */
72 enum dma_data_direction tvc_data_direction;
73 /* Expected data transfer length from virtio-scsi header */
74 u32 tvc_exp_data_len;
75 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
76 u64 tvc_tag;
77 /* The number of scatterlists associated with this cmd */
78 u32 tvc_sgl_count;
79 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
80 u32 tvc_lun;
81 /* Pointer to the SGL formatted memory from virtio-scsi */
82 struct scatterlist *tvc_sgl;
83 /* Pointer to response */
84 struct virtio_scsi_cmd_resp __user *tvc_resp;
85 /* Pointer to vhost_scsi for our device */
86 struct vhost_scsi *tvc_vhost;
87 /* Pointer to vhost_virtqueue for the cmd */
88 struct vhost_virtqueue *tvc_vq;
89 /* Pointer to vhost nexus memory */
90 struct tcm_vhost_nexus *tvc_nexus;
91 /* The TCM I/O descriptor that is accessed via container_of() */
92 struct se_cmd tvc_se_cmd;
93 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
94 struct work_struct work;
95 /* Copy of the incoming SCSI command descriptor block (CDB) */
96 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
97 /* Sense buffer that will be mapped into outgoing status */
98 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
99 /* Completed commands list, serviced from vhost worker thread */
100 struct llist_node tvc_completion_list;
101 /* Used to track inflight cmd */
102 struct vhost_scsi_inflight *inflight;
103};
104
105struct tcm_vhost_nexus {
106 /* Pointer to TCM session for I_T Nexus */
107 struct se_session *tvn_se_sess;
108};
109
110struct tcm_vhost_nacl {
111 /* Binary World Wide unique Port Name for Vhost Initiator port */
112 u64 iport_wwpn;
113 /* ASCII formatted WWPN for Sas Initiator port */
114 char iport_name[TCM_VHOST_NAMELEN];
115 /* Returned by tcm_vhost_make_nodeacl() */
116 struct se_node_acl se_node_acl;
117};
118
Michael S. Tsirkin5012a3a2013-05-02 03:50:34 +0300119struct tcm_vhost_tpg {
120 /* Vhost port target portal group tag for TCM */
121 u16 tport_tpgt;
122 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
123 int tv_tpg_port_count;
124 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
125 int tv_tpg_vhost_count;
126 /* list for tcm_vhost_list */
127 struct list_head tv_tpg_list;
128 /* Used to protect access for tpg_nexus */
129 struct mutex tv_tpg_mutex;
130 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
131 struct tcm_vhost_nexus *tpg_nexus;
132 /* Pointer back to tcm_vhost_tport */
133 struct tcm_vhost_tport *tport;
134 /* Returned by tcm_vhost_make_tpg() */
135 struct se_portal_group se_tpg;
136 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
137 struct vhost_scsi *vhost_scsi;
138};
139
140struct tcm_vhost_tport {
141 /* SCSI protocol the tport is providing */
142 u8 tport_proto_id;
143 /* Binary World Wide unique Port Name for Vhost Target port */
144 u64 tport_wwpn;
145 /* ASCII formatted WWPN for Vhost Target port */
146 char tport_name[TCM_VHOST_NAMELEN];
147 /* Returned by tcm_vhost_make_tport() */
148 struct se_wwn tport_wwn;
149};
150
151struct tcm_vhost_evt {
152 /* event to be sent to guest */
153 struct virtio_scsi_event event;
154 /* event list, serviced from vhost worker thread */
155 struct llist_node list;
156};
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700157
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700158enum {
159 VHOST_SCSI_VQ_CTL = 0,
160 VHOST_SCSI_VQ_EVT = 1,
161 VHOST_SCSI_VQ_IO = 2,
162};
163
Nicholas Bellinger5dade712013-03-27 17:23:41 -0700164enum {
Asias Hea18cc422013-05-07 14:51:49 +0800165 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
Nicholas Bellinger5dade712013-03-27 17:23:41 -0700166};
167
Asias He1b7f3902013-02-06 13:20:59 +0800168#define VHOST_SCSI_MAX_TARGET 256
169#define VHOST_SCSI_MAX_VQ 128
Asias Hea6c9af82013-04-25 15:35:21 +0800170#define VHOST_SCSI_MAX_EVENT 128
Asias He67e18cf2013-02-05 12:31:57 +0800171
Asias He3ab2e422013-04-27 11:16:48 +0800172struct vhost_scsi_virtqueue {
173 struct vhost_virtqueue vq;
Michael S. Tsirkin3dfbff32013-04-28 15:38:52 +0300174 /*
175 * Reference counting for inflight reqs, used for flush operation. At
176 * each time, one reference tracks new commands submitted, while we
177 * wait for another one to reach 0.
178 */
Asias Hef2f0173d2013-04-27 11:16:49 +0800179 struct vhost_scsi_inflight inflights[2];
Michael S. Tsirkin3dfbff32013-04-28 15:38:52 +0300180 /*
181 * Indicate current inflight in use, protected by vq->mutex.
182 * Writers must also take dev mutex and flush under it.
183 */
Asias Hef2f0173d2013-04-27 11:16:49 +0800184 int inflight_idx;
Asias He3ab2e422013-04-27 11:16:48 +0800185};
186
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700187struct vhost_scsi {
Asias He67e18cf2013-02-05 12:31:57 +0800188 /* Protected by vhost_scsi->dev.mutex */
Asias He4f7f46d2013-04-03 14:17:37 +0800189 struct tcm_vhost_tpg **vs_tpg;
Asias He67e18cf2013-02-05 12:31:57 +0800190 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
Asias He67e18cf2013-02-05 12:31:57 +0800191
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700192 struct vhost_dev dev;
Asias He3ab2e422013-04-27 11:16:48 +0800193 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700194
195 struct vhost_work vs_completion_work; /* cmd completion work item */
Asias He9d6064a2013-01-06 14:36:13 +0800196 struct llist_head vs_completion_list; /* cmd completion queue */
Asias Hea6c9af82013-04-25 15:35:21 +0800197
198 struct vhost_work vs_event_work; /* evt injection work item */
199 struct llist_head vs_event_list; /* evt injection queue */
200
201 bool vs_events_missed; /* any missed events, protected by vq->mutex */
202 int vs_events_nr; /* num of pending events, protected by vq->mutex */
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700203};
204
205/* Local pointer to allocated TCM configfs fabric module */
206static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
207
208static struct workqueue_struct *tcm_vhost_workqueue;
209
210/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
211static DEFINE_MUTEX(tcm_vhost_mutex);
212static LIST_HEAD(tcm_vhost_list);
213
Asias He765b34f2013-01-22 11:20:25 +0800214static int iov_num_pages(struct iovec *iov)
215{
216 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
217 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
218}
219
Asias Hef2f0173d2013-04-27 11:16:49 +0800220void tcm_vhost_done_inflight(struct kref *kref)
221{
222 struct vhost_scsi_inflight *inflight;
223
224 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
225 complete(&inflight->comp);
226}
227
228static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
229 struct vhost_scsi_inflight *old_inflight[])
230{
231 struct vhost_scsi_inflight *new_inflight;
232 struct vhost_virtqueue *vq;
233 int idx, i;
234
235 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
236 vq = &vs->vqs[i].vq;
237
238 mutex_lock(&vq->mutex);
239
240 /* store old infight */
241 idx = vs->vqs[i].inflight_idx;
242 if (old_inflight)
243 old_inflight[i] = &vs->vqs[i].inflights[idx];
244
245 /* setup new infight */
246 vs->vqs[i].inflight_idx = idx ^ 1;
247 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
248 kref_init(&new_inflight->kref);
249 init_completion(&new_inflight->comp);
250
251 mutex_unlock(&vq->mutex);
252 }
253}
254
255static struct vhost_scsi_inflight *
256tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
257{
258 struct vhost_scsi_inflight *inflight;
259 struct vhost_scsi_virtqueue *svq;
260
261 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
262 inflight = &svq->inflights[svq->inflight_idx];
263 kref_get(&inflight->kref);
264
265 return inflight;
266}
267
268static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
269{
270 kref_put(&inflight->kref, tcm_vhost_done_inflight);
271}
272
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700273static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
274{
275 return 1;
276}
277
278static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
279{
280 return 0;
281}
282
283static char *tcm_vhost_get_fabric_name(void)
284{
285 return "vhost";
286}
287
288static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
289{
290 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
291 struct tcm_vhost_tpg, se_tpg);
292 struct tcm_vhost_tport *tport = tpg->tport;
293
294 switch (tport->tport_proto_id) {
295 case SCSI_PROTOCOL_SAS:
296 return sas_get_fabric_proto_ident(se_tpg);
297 case SCSI_PROTOCOL_FCP:
298 return fc_get_fabric_proto_ident(se_tpg);
299 case SCSI_PROTOCOL_ISCSI:
300 return iscsi_get_fabric_proto_ident(se_tpg);
301 default:
302 pr_err("Unknown tport_proto_id: 0x%02x, using"
303 " SAS emulation\n", tport->tport_proto_id);
304 break;
305 }
306
307 return sas_get_fabric_proto_ident(se_tpg);
308}
309
310static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
311{
312 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
313 struct tcm_vhost_tpg, se_tpg);
314 struct tcm_vhost_tport *tport = tpg->tport;
315
316 return &tport->tport_name[0];
317}
318
319static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
320{
321 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
322 struct tcm_vhost_tpg, se_tpg);
323 return tpg->tport_tpgt;
324}
325
326static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
327{
328 return 1;
329}
330
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700331static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700332 struct se_node_acl *se_nacl,
333 struct t10_pr_registration *pr_reg,
334 int *format_code,
335 unsigned char *buf)
336{
337 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
338 struct tcm_vhost_tpg, se_tpg);
339 struct tcm_vhost_tport *tport = tpg->tport;
340
341 switch (tport->tport_proto_id) {
342 case SCSI_PROTOCOL_SAS:
343 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
344 format_code, buf);
345 case SCSI_PROTOCOL_FCP:
346 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
347 format_code, buf);
348 case SCSI_PROTOCOL_ISCSI:
349 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
350 format_code, buf);
351 default:
352 pr_err("Unknown tport_proto_id: 0x%02x, using"
353 " SAS emulation\n", tport->tport_proto_id);
354 break;
355 }
356
357 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
358 format_code, buf);
359}
360
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700361static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700362 struct se_node_acl *se_nacl,
363 struct t10_pr_registration *pr_reg,
364 int *format_code)
365{
366 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
367 struct tcm_vhost_tpg, se_tpg);
368 struct tcm_vhost_tport *tport = tpg->tport;
369
370 switch (tport->tport_proto_id) {
371 case SCSI_PROTOCOL_SAS:
372 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
373 format_code);
374 case SCSI_PROTOCOL_FCP:
375 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
376 format_code);
377 case SCSI_PROTOCOL_ISCSI:
378 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
379 format_code);
380 default:
381 pr_err("Unknown tport_proto_id: 0x%02x, using"
382 " SAS emulation\n", tport->tport_proto_id);
383 break;
384 }
385
386 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
387 format_code);
388}
389
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700390static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700391 const char *buf,
392 u32 *out_tid_len,
393 char **port_nexus_ptr)
394{
395 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
396 struct tcm_vhost_tpg, se_tpg);
397 struct tcm_vhost_tport *tport = tpg->tport;
398
399 switch (tport->tport_proto_id) {
400 case SCSI_PROTOCOL_SAS:
401 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
402 port_nexus_ptr);
403 case SCSI_PROTOCOL_FCP:
404 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
405 port_nexus_ptr);
406 case SCSI_PROTOCOL_ISCSI:
407 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
408 port_nexus_ptr);
409 default:
410 pr_err("Unknown tport_proto_id: 0x%02x, using"
411 " SAS emulation\n", tport->tport_proto_id);
412 break;
413 }
414
415 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416 port_nexus_ptr);
417}
418
419static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
420 struct se_portal_group *se_tpg)
421{
422 struct tcm_vhost_nacl *nacl;
423
424 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
425 if (!nacl) {
Masanari Iida744627e92012-11-05 23:30:40 +0900426 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700427 return NULL;
428 }
429
430 return &nacl->se_node_acl;
431}
432
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700433static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700434 struct se_node_acl *se_nacl)
435{
436 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
437 struct tcm_vhost_nacl, se_node_acl);
438 kfree(nacl);
439}
440
441static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
442{
443 return 1;
444}
445
446static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
447{
448 return;
449}
450
451static int tcm_vhost_shutdown_session(struct se_session *se_sess)
452{
453 return 0;
454}
455
456static void tcm_vhost_close_session(struct se_session *se_sess)
457{
458 return;
459}
460
461static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
462{
463 return 0;
464}
465
466static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
467{
468 /* Go ahead and process the write immediately */
469 target_execute_cmd(se_cmd);
470 return 0;
471}
472
473static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
474{
475 return 0;
476}
477
478static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
479{
480 return;
481}
482
483static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
484{
485 return 0;
486}
487
488static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
489{
490 return 0;
491}
492
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700493static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
494{
495 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
496
Asias He9d6064a2013-01-06 14:36:13 +0800497 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
Nicholas Bellinger101998f2012-07-30 13:30:00 -0700498
499 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
500}
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700501
502static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
503{
504 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
505 struct tcm_vhost_cmd, tvc_se_cmd);
506 vhost_scsi_complete_cmd(tv_cmd);
507 return 0;
508}
509
510static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
511{
512 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
513 struct tcm_vhost_cmd, tvc_se_cmd);
514 vhost_scsi_complete_cmd(tv_cmd);
515 return 0;
516}
517
518static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
519{
520 return 0;
521}
522
Asias Hea6c9af82013-04-25 15:35:21 +0800523static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
524{
525 vs->vs_events_nr--;
526 kfree(evt);
527}
528
529static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
530 u32 event, u32 reason)
531{
Asias He3ab2e422013-04-27 11:16:48 +0800532 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
Asias Hea6c9af82013-04-25 15:35:21 +0800533 struct tcm_vhost_evt *evt;
534
535 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
536 vs->vs_events_missed = true;
537 return NULL;
538 }
539
540 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
541 if (!evt) {
542 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
543 vs->vs_events_missed = true;
544 return NULL;
545 }
546
547 evt->event.event = event;
548 evt->event.reason = reason;
549 vs->vs_events_nr++;
550
551 return evt;
552}
553
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700554static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
555{
556 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
557
558 /* TODO locking against target/backend threads? */
559 transport_generic_free_cmd(se_cmd, 1);
560
561 if (tv_cmd->tvc_sgl_count) {
562 u32 i;
563 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
564 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
565
566 kfree(tv_cmd->tvc_sgl);
567 }
568
Asias Hef2f0173d2013-04-27 11:16:49 +0800569 tcm_vhost_put_inflight(tv_cmd->inflight);
570
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700571 kfree(tv_cmd);
572}
573
Asias Hea6c9af82013-04-25 15:35:21 +0800574static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
575 struct tcm_vhost_evt *evt)
576{
Asias He3ab2e422013-04-27 11:16:48 +0800577 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
Asias Hea6c9af82013-04-25 15:35:21 +0800578 struct virtio_scsi_event *event = &evt->event;
579 struct virtio_scsi_event __user *eventp;
580 unsigned out, in;
581 int head, ret;
582
583 if (!vq->private_data) {
584 vs->vs_events_missed = true;
585 return;
586 }
587
588again:
589 vhost_disable_notify(&vs->dev, vq);
590 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
591 ARRAY_SIZE(vq->iov), &out, &in,
592 NULL, NULL);
593 if (head < 0) {
594 vs->vs_events_missed = true;
595 return;
596 }
597 if (head == vq->num) {
598 if (vhost_enable_notify(&vs->dev, vq))
599 goto again;
600 vs->vs_events_missed = true;
601 return;
602 }
603
604 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
605 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
606 vq->iov[out].iov_len);
607 vs->vs_events_missed = true;
608 return;
609 }
610
611 if (vs->vs_events_missed) {
612 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
613 vs->vs_events_missed = false;
614 }
615
616 eventp = vq->iov[out].iov_base;
617 ret = __copy_to_user(eventp, event, sizeof(*event));
618 if (!ret)
619 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
620 else
621 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
622}
623
624static void tcm_vhost_evt_work(struct vhost_work *work)
625{
626 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
627 vs_event_work);
Asias He3ab2e422013-04-27 11:16:48 +0800628 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
Asias Hea6c9af82013-04-25 15:35:21 +0800629 struct tcm_vhost_evt *evt;
630 struct llist_node *llnode;
631
632 mutex_lock(&vq->mutex);
633 llnode = llist_del_all(&vs->vs_event_list);
634 while (llnode) {
635 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
636 llnode = llist_next(llnode);
637 tcm_vhost_do_evt_work(vs, evt);
638 tcm_vhost_free_evt(vs, evt);
639 }
640 mutex_unlock(&vq->mutex);
641}
642
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700643/* Fill in status and signal that we are done processing this command
644 *
645 * This is scheduled in the vhost work queue so we are called with the owner
646 * process mm and can access the vring.
647 */
648static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
649{
650 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
651 vs_completion_work);
Asias He1b7f3902013-02-06 13:20:59 +0800652 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
Asias He9d6064a2013-01-06 14:36:13 +0800653 struct virtio_scsi_cmd_resp v_rsp;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700654 struct tcm_vhost_cmd *tv_cmd;
Asias He9d6064a2013-01-06 14:36:13 +0800655 struct llist_node *llnode;
656 struct se_cmd *se_cmd;
Asias He1b7f3902013-02-06 13:20:59 +0800657 int ret, vq;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700658
Asias He1b7f3902013-02-06 13:20:59 +0800659 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
Asias He9d6064a2013-01-06 14:36:13 +0800660 llnode = llist_del_all(&vs->vs_completion_list);
661 while (llnode) {
662 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
663 tvc_completion_list);
664 llnode = llist_next(llnode);
665 se_cmd = &tv_cmd->tvc_se_cmd;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700666
667 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
668 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
669
670 memset(&v_rsp, 0, sizeof(v_rsp));
671 v_rsp.resid = se_cmd->residual_count;
672 /* TODO is status_qualifier field needed? */
673 v_rsp.status = se_cmd->scsi_status;
674 v_rsp.sense_len = se_cmd->scsi_sense_length;
675 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
676 v_rsp.sense_len);
677 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
Asias He1b7f3902013-02-06 13:20:59 +0800678 if (likely(ret == 0)) {
Asias He3ab2e422013-04-27 11:16:48 +0800679 struct vhost_scsi_virtqueue *q;
Asias He1b7f3902013-02-06 13:20:59 +0800680 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
Asias He3ab2e422013-04-27 11:16:48 +0800681 q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
682 vq = q - vs->vqs;
Asias He1b7f3902013-02-06 13:20:59 +0800683 __set_bit(vq, signal);
684 } else
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700685 pr_err("Faulted on virtio_scsi_cmd_resp\n");
686
687 vhost_scsi_free_cmd(tv_cmd);
688 }
689
Asias He1b7f3902013-02-06 13:20:59 +0800690 vq = -1;
691 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
692 < VHOST_SCSI_MAX_VQ)
Asias He3ab2e422013-04-27 11:16:48 +0800693 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700694}
695
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700696static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
Asias Hef2f0173d2013-04-27 11:16:49 +0800697 struct vhost_virtqueue *vq,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700698 struct tcm_vhost_tpg *tv_tpg,
699 struct virtio_scsi_cmd_req *v_req,
700 u32 exp_data_len,
701 int data_direction)
702{
703 struct tcm_vhost_cmd *tv_cmd;
704 struct tcm_vhost_nexus *tv_nexus;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700705
706 tv_nexus = tv_tpg->tpg_nexus;
707 if (!tv_nexus) {
708 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
709 return ERR_PTR(-EIO);
710 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700711
712 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
713 if (!tv_cmd) {
714 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
715 return ERR_PTR(-ENOMEM);
716 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700717 tv_cmd->tvc_tag = v_req->tag;
Nicholas Bellinger9f0abc12012-10-01 18:40:55 -0700718 tv_cmd->tvc_task_attr = v_req->task_attr;
719 tv_cmd->tvc_exp_data_len = exp_data_len;
720 tv_cmd->tvc_data_direction = data_direction;
721 tv_cmd->tvc_nexus = tv_nexus;
Asias Hef2f0173d2013-04-27 11:16:49 +0800722 tv_cmd->inflight = tcm_vhost_get_inflight(vq);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700723
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700724 return tv_cmd;
725}
726
727/*
728 * Map a user memory range into a scatterlist
729 *
730 * Returns the number of scatterlist entries used or -errno on error.
731 */
732static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
Asias He18100532013-01-22 11:20:27 +0800733 unsigned int sgl_count, struct iovec *iov, int write)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700734{
Asias He18100532013-01-22 11:20:27 +0800735 unsigned int npages = 0, pages_nr, offset, nbytes;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700736 struct scatterlist *sg = sgl;
Asias He18100532013-01-22 11:20:27 +0800737 void __user *ptr = iov->iov_base;
738 size_t len = iov->iov_len;
739 struct page **pages;
740 int ret, i;
741
742 pages_nr = iov_num_pages(iov);
743 if (pages_nr > sgl_count)
744 return -ENOBUFS;
745
746 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
747 if (!pages)
748 return -ENOMEM;
749
750 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
751 /* No pages were pinned */
752 if (ret < 0)
753 goto out;
754 /* Less pages pinned than wanted */
755 if (ret != pages_nr) {
756 for (i = 0; i < ret; i++)
757 put_page(pages[i]);
758 ret = -EFAULT;
759 goto out;
760 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700761
762 while (len > 0) {
Asias He18100532013-01-22 11:20:27 +0800763 offset = (uintptr_t)ptr & ~PAGE_MASK;
764 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
765 sg_set_page(sg, pages[npages], nbytes, offset);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700766 ptr += nbytes;
767 len -= nbytes;
768 sg++;
769 npages++;
770 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700771
Asias He18100532013-01-22 11:20:27 +0800772out:
773 kfree(pages);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700774 return ret;
775}
776
777static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
778 struct iovec *iov, unsigned int niov, int write)
779{
780 int ret;
781 unsigned int i;
782 u32 sgl_count;
783 struct scatterlist *sg;
784
785 /*
786 * Find out how long sglist needs to be
787 */
788 sgl_count = 0;
Asias Hef3158f32013-01-22 11:20:26 +0800789 for (i = 0; i < niov; i++)
790 sgl_count += iov_num_pages(&iov[i]);
791
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700792 /* TODO overflow checking */
793
794 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
795 if (!sg)
796 return -ENOMEM;
Fengguang Wuf0e0e9b2012-07-30 13:19:07 -0700797 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
798 sg, sgl_count, !sg);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700799 sg_init_table(sg, sgl_count);
800
801 tv_cmd->tvc_sgl = sg;
802 tv_cmd->tvc_sgl_count = sgl_count;
803
804 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
805 for (i = 0; i < niov; i++) {
Asias He18100532013-01-22 11:20:27 +0800806 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700807 if (ret < 0) {
808 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
809 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
810 kfree(tv_cmd->tvc_sgl);
811 tv_cmd->tvc_sgl = NULL;
812 tv_cmd->tvc_sgl_count = 0;
813 return ret;
814 }
815
816 sg += ret;
817 sgl_count -= ret;
818 }
819 return 0;
820}
821
822static void tcm_vhost_submission_work(struct work_struct *work)
823{
824 struct tcm_vhost_cmd *tv_cmd =
825 container_of(work, struct tcm_vhost_cmd, work);
Nicholas Bellinger9f0abc12012-10-01 18:40:55 -0700826 struct tcm_vhost_nexus *tv_nexus;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700827 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
828 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
829 int rc, sg_no_bidi = 0;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700830
831 if (tv_cmd->tvc_sgl_count) {
832 sg_ptr = tv_cmd->tvc_sgl;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700833/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
834#if 0
835 if (se_cmd->se_cmd_flags & SCF_BIDI) {
836 sg_bidi_ptr = NULL;
837 sg_no_bidi = 0;
838 }
839#endif
840 } else {
841 sg_ptr = NULL;
842 }
Nicholas Bellinger9f0abc12012-10-01 18:40:55 -0700843 tv_nexus = tv_cmd->tvc_nexus;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700844
Nicholas Bellinger9f0abc12012-10-01 18:40:55 -0700845 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
846 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
847 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
848 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
849 0, sg_ptr, tv_cmd->tvc_sgl_count,
850 sg_bidi_ptr, sg_no_bidi);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700851 if (rc < 0) {
852 transport_send_check_condition_and_sense(se_cmd,
Nicholas Bellinger9f0abc12012-10-01 18:40:55 -0700853 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700854 transport_generic_free_cmd(se_cmd, 0);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700855 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700856}
857
Asias He637ab212013-04-10 15:06:15 +0800858static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
859 struct vhost_virtqueue *vq, int head, unsigned out)
860{
861 struct virtio_scsi_cmd_resp __user *resp;
862 struct virtio_scsi_cmd_resp rsp;
863 int ret;
864
865 memset(&rsp, 0, sizeof(rsp));
866 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
867 resp = vq->iov[out].iov_base;
868 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
869 if (!ret)
870 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
871 else
872 pr_err("Faulted on virtio_scsi_cmd_resp\n");
873}
874
Asias He1b7f3902013-02-06 13:20:59 +0800875static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
876 struct vhost_virtqueue *vq)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700877{
Asias He4f7f46d2013-04-03 14:17:37 +0800878 struct tcm_vhost_tpg **vs_tpg;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700879 struct virtio_scsi_cmd_req v_req;
880 struct tcm_vhost_tpg *tv_tpg;
881 struct tcm_vhost_cmd *tv_cmd;
882 u32 exp_data_len, data_first, data_num, data_direction;
883 unsigned out, in, i;
884 int head, ret;
Asias He67e18cf2013-02-05 12:31:57 +0800885 u8 target;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700886
Asias He4f7f46d2013-04-03 14:17:37 +0800887 /*
888 * We can handle the vq only after the endpoint is setup by calling the
889 * VHOST_SCSI_SET_ENDPOINT ioctl.
890 *
891 * TODO: Check that we are running from vhost_worker which acts
892 * as read-side critical section for vhost kind of RCU.
893 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
894 */
895 vs_tpg = rcu_dereference_check(vq->private_data, 1);
896 if (!vs_tpg)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700897 return;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700898
899 mutex_lock(&vq->mutex);
900 vhost_disable_notify(&vs->dev, vq);
901
902 for (;;) {
903 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
904 ARRAY_SIZE(vq->iov), &out, &in,
905 NULL, NULL);
906 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
907 head, out, in);
908 /* On error, stop handling until the next kick. */
909 if (unlikely(head < 0))
910 break;
911 /* Nothing new? Wait for eventfd to tell us they refilled. */
912 if (head == vq->num) {
913 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
914 vhost_disable_notify(&vs->dev, vq);
915 continue;
916 }
917 break;
918 }
919
920/* FIXME: BIDI operation */
921 if (out == 1 && in == 1) {
922 data_direction = DMA_NONE;
923 data_first = 0;
924 data_num = 0;
925 } else if (out == 1 && in > 1) {
926 data_direction = DMA_FROM_DEVICE;
927 data_first = out + 1;
928 data_num = in - 1;
929 } else if (out > 1 && in == 1) {
930 data_direction = DMA_TO_DEVICE;
931 data_first = 1;
932 data_num = out - 1;
933 } else {
934 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
935 out, in);
936 break;
937 }
938
939 /*
940 * Check for a sane resp buffer so we can report errors to
941 * the guest.
942 */
943 if (unlikely(vq->iov[out].iov_len !=
944 sizeof(struct virtio_scsi_cmd_resp))) {
945 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
946 " bytes\n", vq->iov[out].iov_len);
947 break;
948 }
949
950 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
951 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
952 " bytes\n", vq->iov[0].iov_len);
953 break;
954 }
955 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
956 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
957 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
958 sizeof(v_req));
959 if (unlikely(ret)) {
960 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
961 break;
962 }
963
Asias He67e18cf2013-02-05 12:31:57 +0800964 /* Extract the tpgt */
965 target = v_req.lun[1];
Asias He4f7f46d2013-04-03 14:17:37 +0800966 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
Asias He67e18cf2013-02-05 12:31:57 +0800967
968 /* Target does not exist, fail the request */
969 if (unlikely(!tv_tpg)) {
Asias He637ab212013-04-10 15:06:15 +0800970 vhost_scsi_send_bad_target(vs, vq, head, out);
Asias He67e18cf2013-02-05 12:31:57 +0800971 continue;
972 }
973
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700974 exp_data_len = 0;
975 for (i = 0; i < data_num; i++)
976 exp_data_len += vq->iov[data_first + i].iov_len;
977
Asias Hef2f0173d2013-04-27 11:16:49 +0800978 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700979 exp_data_len, data_direction);
980 if (IS_ERR(tv_cmd)) {
981 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
982 PTR_ERR(tv_cmd));
Asias He055f6482013-04-10 15:06:16 +0800983 goto err_cmd;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700984 }
985 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
986 ": %d\n", tv_cmd, exp_data_len, data_direction);
987
988 tv_cmd->tvc_vhost = vs;
Asias He1b7f3902013-02-06 13:20:59 +0800989 tv_cmd->tvc_vq = vq;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -0700990 tv_cmd->tvc_resp = vq->iov[out].iov_base;
991
992 /*
993 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
994 * that will be used by tcm_vhost_new_cmd_map() and down into
995 * target_setup_cmd_from_cdb()
996 */
997 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
998 /*
999 * Check that the recieved CDB size does not exceeded our
1000 * hardcoded max for tcm_vhost
1001 */
1002 /* TODO what if cdb was too small for varlen cdb header? */
1003 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
1004 TCM_VHOST_MAX_CDB_SIZE)) {
1005 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1006 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1007 scsi_command_size(tv_cmd->tvc_cdb),
1008 TCM_VHOST_MAX_CDB_SIZE);
Asias He055f6482013-04-10 15:06:16 +08001009 goto err_free;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001010 }
1011 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1012
1013 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1014 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
1015
1016 if (data_direction != DMA_NONE) {
1017 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1018 &vq->iov[data_first], data_num,
1019 data_direction == DMA_TO_DEVICE);
1020 if (unlikely(ret)) {
1021 vq_err(vq, "Failed to map iov to sgl\n");
Asias He055f6482013-04-10 15:06:16 +08001022 goto err_free;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001023 }
1024 }
1025
1026 /*
1027 * Save the descriptor from vhost_get_vq_desc() to be used to
1028 * complete the virtio-scsi request in TCM callback context via
1029 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1030 */
1031 tv_cmd->tvc_vq_desc = head;
1032 /*
1033 * Dispatch tv_cmd descriptor for cmwq execution in process
1034 * context provided by tcm_vhost_workqueue. This also ensures
1035 * tv_cmd is executed on the same kworker CPU as this vhost
1036 * thread to gain positive L2 cache locality effects..
1037 */
1038 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
1039 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
1040 }
1041
1042 mutex_unlock(&vq->mutex);
Asias He7ea206c2013-04-10 15:06:14 +08001043 return;
1044
Asias He055f6482013-04-10 15:06:16 +08001045err_free:
Asias He7ea206c2013-04-10 15:06:14 +08001046 vhost_scsi_free_cmd(tv_cmd);
Asias He055f6482013-04-10 15:06:16 +08001047err_cmd:
1048 vhost_scsi_send_bad_target(vs, vq, head, out);
Asias He7ea206c2013-04-10 15:06:14 +08001049 mutex_unlock(&vq->mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001050}
1051
1052static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1053{
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001054 pr_debug("%s: The handling func for control queue.\n", __func__);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001055}
1056
Asias Hea6c9af82013-04-25 15:35:21 +08001057static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
1058 struct se_lun *lun, u32 event, u32 reason)
1059{
1060 struct tcm_vhost_evt *evt;
1061
1062 evt = tcm_vhost_allocate_evt(vs, event, reason);
1063 if (!evt)
1064 return;
1065
1066 if (tpg && lun) {
1067 /* TODO: share lun setup code with virtio-scsi.ko */
1068 /*
1069 * Note: evt->event is zeroed when we allocate it and
1070 * lun[4-7] need to be zero according to virtio-scsi spec.
1071 */
1072 evt->event.lun[0] = 0x01;
1073 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1074 if (lun->unpacked_lun >= 256)
1075 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1076 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1077 }
1078
1079 llist_add(&evt->list, &vs->vs_event_list);
1080 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1081}
1082
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001083static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1084{
Asias Hea6c9af82013-04-25 15:35:21 +08001085 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1086 poll.work);
1087 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1088
1089 mutex_lock(&vq->mutex);
1090 if (!vq->private_data)
1091 goto out;
1092
1093 if (vs->vs_events_missed)
1094 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1095out:
1096 mutex_unlock(&vq->mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001097}
1098
1099static void vhost_scsi_handle_kick(struct vhost_work *work)
1100{
1101 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1102 poll.work);
1103 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1104
Asias He1b7f3902013-02-06 13:20:59 +08001105 vhost_scsi_handle_vq(vs, vq);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001106}
1107
Asias He4f7f46d2013-04-03 14:17:37 +08001108static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1109{
Asias He3ab2e422013-04-27 11:16:48 +08001110 vhost_poll_flush(&vs->vqs[index].vq.poll);
Asias He4f7f46d2013-04-03 14:17:37 +08001111}
1112
Michael S. Tsirkin3dfbff32013-04-28 15:38:52 +03001113/* Callers must hold dev mutex */
Asias He4f7f46d2013-04-03 14:17:37 +08001114static void vhost_scsi_flush(struct vhost_scsi *vs)
1115{
Asias Hef2f0173d2013-04-27 11:16:49 +08001116 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
Asias He4f7f46d2013-04-03 14:17:37 +08001117 int i;
1118
Asias Hef2f0173d2013-04-27 11:16:49 +08001119 /* Init new inflight and remember the old inflight */
1120 tcm_vhost_init_inflight(vs, old_inflight);
1121
1122 /*
1123 * The inflight->kref was initialized to 1. We decrement it here to
1124 * indicate the start of the flush operation so that it will reach 0
1125 * when all the reqs are finished.
1126 */
1127 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1128 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1129
1130 /* Flush both the vhost poll and vhost work */
Asias He4f7f46d2013-04-03 14:17:37 +08001131 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1132 vhost_scsi_flush_vq(vs, i);
1133 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
Asias Hea6c9af82013-04-25 15:35:21 +08001134 vhost_work_flush(&vs->dev, &vs->vs_event_work);
Asias Hef2f0173d2013-04-27 11:16:49 +08001135
1136 /* Wait for all reqs issued before the flush to be finished */
1137 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1138 wait_for_completion(&old_inflight[i]->comp);
Asias He4f7f46d2013-04-03 14:17:37 +08001139}
1140
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001141/*
1142 * Called from vhost_scsi_ioctl() context to walk the list of available
1143 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
Asias Hef2b7daf2013-04-25 15:35:20 +08001144 *
1145 * The lock nesting rule is:
1146 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001147 */
1148static int vhost_scsi_set_endpoint(
1149 struct vhost_scsi *vs,
1150 struct vhost_scsi_target *t)
1151{
1152 struct tcm_vhost_tport *tv_tport;
1153 struct tcm_vhost_tpg *tv_tpg;
Asias He4f7f46d2013-04-03 14:17:37 +08001154 struct tcm_vhost_tpg **vs_tpg;
1155 struct vhost_virtqueue *vq;
1156 int index, ret, i, len;
Asias He67e18cf2013-02-05 12:31:57 +08001157 bool match = false;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001158
Asias Hef2b7daf2013-04-25 15:35:20 +08001159 mutex_lock(&tcm_vhost_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001160 mutex_lock(&vs->dev.mutex);
Asias Hef2b7daf2013-04-25 15:35:20 +08001161
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001162 /* Verify that ring has been setup correctly. */
1163 for (index = 0; index < vs->dev.nvqs; ++index) {
1164 /* Verify that ring has been setup correctly. */
Asias He3ab2e422013-04-27 11:16:48 +08001165 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
Asias Hef2b7daf2013-04-25 15:35:20 +08001166 ret = -EFAULT;
1167 goto out;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001168 }
1169 }
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001170
Asias He4f7f46d2013-04-03 14:17:37 +08001171 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1172 vs_tpg = kzalloc(len, GFP_KERNEL);
1173 if (!vs_tpg) {
Asias Hef2b7daf2013-04-25 15:35:20 +08001174 ret = -ENOMEM;
1175 goto out;
Asias He4f7f46d2013-04-03 14:17:37 +08001176 }
1177 if (vs->vs_tpg)
1178 memcpy(vs_tpg, vs->vs_tpg, len);
1179
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001180 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1181 mutex_lock(&tv_tpg->tv_tpg_mutex);
1182 if (!tv_tpg->tpg_nexus) {
1183 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1184 continue;
1185 }
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001186 if (tv_tpg->tv_tpg_vhost_count != 0) {
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001187 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1188 continue;
1189 }
1190 tv_tport = tv_tpg->tport;
1191
Asias He67e18cf2013-02-05 12:31:57 +08001192 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
Asias He4f7f46d2013-04-03 14:17:37 +08001193 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
Asias He4f7f46d2013-04-03 14:17:37 +08001194 kfree(vs_tpg);
Asias Hef2b7daf2013-04-25 15:35:20 +08001195 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1196 ret = -EEXIST;
1197 goto out;
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001198 }
Asias He67e18cf2013-02-05 12:31:57 +08001199 tv_tpg->tv_tpg_vhost_count++;
Asias Hea6c9af82013-04-25 15:35:21 +08001200 tv_tpg->vhost_scsi = vs;
Asias He4f7f46d2013-04-03 14:17:37 +08001201 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001202 smp_mb__after_atomic_inc();
Asias He67e18cf2013-02-05 12:31:57 +08001203 match = true;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001204 }
1205 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1206 }
Asias He67e18cf2013-02-05 12:31:57 +08001207
1208 if (match) {
1209 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1210 sizeof(vs->vs_vhost_wwpn));
Asias He4f7f46d2013-04-03 14:17:37 +08001211 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
Asias He3ab2e422013-04-27 11:16:48 +08001212 vq = &vs->vqs[i].vq;
Asias He4f7f46d2013-04-03 14:17:37 +08001213 /* Flushing the vhost_work acts as synchronize_rcu */
1214 mutex_lock(&vq->mutex);
1215 rcu_assign_pointer(vq->private_data, vs_tpg);
Asias Hedfd5d562013-04-03 14:17:38 +08001216 vhost_init_used(vq);
Asias He4f7f46d2013-04-03 14:17:37 +08001217 mutex_unlock(&vq->mutex);
1218 }
Asias He67e18cf2013-02-05 12:31:57 +08001219 ret = 0;
1220 } else {
1221 ret = -EEXIST;
1222 }
1223
Asias He4f7f46d2013-04-03 14:17:37 +08001224 /*
1225 * Act as synchronize_rcu to make sure access to
1226 * old vs->vs_tpg is finished.
1227 */
1228 vhost_scsi_flush(vs);
1229 kfree(vs->vs_tpg);
1230 vs->vs_tpg = vs_tpg;
1231
Asias Hef2b7daf2013-04-25 15:35:20 +08001232out:
Asias He67e18cf2013-02-05 12:31:57 +08001233 mutex_unlock(&vs->dev.mutex);
Asias Hef2b7daf2013-04-25 15:35:20 +08001234 mutex_unlock(&tcm_vhost_mutex);
Asias He67e18cf2013-02-05 12:31:57 +08001235 return ret;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001236}
1237
1238static int vhost_scsi_clear_endpoint(
1239 struct vhost_scsi *vs,
1240 struct vhost_scsi_target *t)
1241{
1242 struct tcm_vhost_tport *tv_tport;
1243 struct tcm_vhost_tpg *tv_tpg;
Asias He4f7f46d2013-04-03 14:17:37 +08001244 struct vhost_virtqueue *vq;
1245 bool match = false;
Asias He67e18cf2013-02-05 12:31:57 +08001246 int index, ret, i;
1247 u8 target;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001248
Asias Hef2b7daf2013-04-25 15:35:20 +08001249 mutex_lock(&tcm_vhost_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001250 mutex_lock(&vs->dev.mutex);
1251 /* Verify that ring has been setup correctly. */
1252 for (index = 0; index < vs->dev.nvqs; ++index) {
Asias He3ab2e422013-04-27 11:16:48 +08001253 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001254 ret = -EFAULT;
Asias He038e0af2013-03-15 09:14:05 +08001255 goto err_dev;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001256 }
1257 }
Asias He4f7f46d2013-04-03 14:17:37 +08001258
1259 if (!vs->vs_tpg) {
Asias Hef2b7daf2013-04-25 15:35:20 +08001260 ret = 0;
1261 goto err_dev;
Asias He4f7f46d2013-04-03 14:17:37 +08001262 }
1263
Asias He67e18cf2013-02-05 12:31:57 +08001264 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1265 target = i;
Asias He67e18cf2013-02-05 12:31:57 +08001266 tv_tpg = vs->vs_tpg[target];
1267 if (!tv_tpg)
1268 continue;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001269
Asias He038e0af2013-03-15 09:14:05 +08001270 mutex_lock(&tv_tpg->tv_tpg_mutex);
Asias He67e18cf2013-02-05 12:31:57 +08001271 tv_tport = tv_tpg->tport;
1272 if (!tv_tport) {
1273 ret = -ENODEV;
Asias He038e0af2013-03-15 09:14:05 +08001274 goto err_tpg;
Asias He67e18cf2013-02-05 12:31:57 +08001275 }
1276
1277 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1278 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1279 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1280 tv_tport->tport_name, tv_tpg->tport_tpgt,
1281 t->vhost_wwpn, t->vhost_tpgt);
1282 ret = -EINVAL;
Asias He038e0af2013-03-15 09:14:05 +08001283 goto err_tpg;
Asias He67e18cf2013-02-05 12:31:57 +08001284 }
1285 tv_tpg->tv_tpg_vhost_count--;
Asias Hea6c9af82013-04-25 15:35:21 +08001286 tv_tpg->vhost_scsi = NULL;
Asias He67e18cf2013-02-05 12:31:57 +08001287 vs->vs_tpg[target] = NULL;
Asias He4f7f46d2013-04-03 14:17:37 +08001288 match = true;
Asias He038e0af2013-03-15 09:14:05 +08001289 mutex_unlock(&tv_tpg->tv_tpg_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001290 }
Asias He4f7f46d2013-04-03 14:17:37 +08001291 if (match) {
1292 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
Asias He3ab2e422013-04-27 11:16:48 +08001293 vq = &vs->vqs[i].vq;
Asias He4f7f46d2013-04-03 14:17:37 +08001294 /* Flushing the vhost_work acts as synchronize_rcu */
1295 mutex_lock(&vq->mutex);
1296 rcu_assign_pointer(vq->private_data, NULL);
1297 mutex_unlock(&vq->mutex);
1298 }
1299 }
1300 /*
1301 * Act as synchronize_rcu to make sure access to
1302 * old vs->vs_tpg is finished.
1303 */
1304 vhost_scsi_flush(vs);
1305 kfree(vs->vs_tpg);
1306 vs->vs_tpg = NULL;
Asias Hea6c9af82013-04-25 15:35:21 +08001307 WARN_ON(vs->vs_events_nr);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001308 mutex_unlock(&vs->dev.mutex);
Asias Hef2b7daf2013-04-25 15:35:20 +08001309 mutex_unlock(&tcm_vhost_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001310 return 0;
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001311
Asias He038e0af2013-03-15 09:14:05 +08001312err_tpg:
1313 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1314err_dev:
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001315 mutex_unlock(&vs->dev.mutex);
Asias Hef2b7daf2013-04-25 15:35:20 +08001316 mutex_unlock(&tcm_vhost_mutex);
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001317 return ret;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001318}
1319
Asias He4f7f46d2013-04-03 14:17:37 +08001320static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1321{
1322 if (features & ~VHOST_SCSI_FEATURES)
1323 return -EOPNOTSUPP;
1324
1325 mutex_lock(&vs->dev.mutex);
1326 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1327 !vhost_log_access_ok(&vs->dev)) {
1328 mutex_unlock(&vs->dev.mutex);
1329 return -EFAULT;
1330 }
1331 vs->dev.acked_features = features;
1332 smp_wmb();
1333 vhost_scsi_flush(vs);
1334 mutex_unlock(&vs->dev.mutex);
1335 return 0;
1336}
1337
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001338static int vhost_scsi_open(struct inode *inode, struct file *f)
1339{
Asias Hec7289312013-05-06 16:38:26 +08001340 struct vhost_scsi *vs;
Asias He3ab2e422013-04-27 11:16:48 +08001341 struct vhost_virtqueue **vqs;
Asias He1b7f3902013-02-06 13:20:59 +08001342 int r, i;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001343
Asias Hec7289312013-05-06 16:38:26 +08001344 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
1345 if (!vs)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001346 return -ENOMEM;
1347
Asias He3ab2e422013-04-27 11:16:48 +08001348 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1349 if (!vqs) {
Asias Hec7289312013-05-06 16:38:26 +08001350 kfree(vs);
Asias He3ab2e422013-04-27 11:16:48 +08001351 return -ENOMEM;
1352 }
1353
Asias Hec7289312013-05-06 16:38:26 +08001354 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1355 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
Asias Hea6c9af82013-04-25 15:35:21 +08001356
Asias Hec7289312013-05-06 16:38:26 +08001357 vs->vs_events_nr = 0;
1358 vs->vs_events_missed = false;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001359
Asias Hec7289312013-05-06 16:38:26 +08001360 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1361 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1362 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1363 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
Asias He3ab2e422013-04-27 11:16:48 +08001364 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
Asias Hec7289312013-05-06 16:38:26 +08001365 vqs[i] = &vs->vqs[i].vq;
1366 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
Asias He3ab2e422013-04-27 11:16:48 +08001367 }
Asias Hec7289312013-05-06 16:38:26 +08001368 r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
Asias Hef2f0173d2013-04-27 11:16:49 +08001369
Asias Hec7289312013-05-06 16:38:26 +08001370 tcm_vhost_init_inflight(vs, NULL);
Asias Hef2f0173d2013-04-27 11:16:49 +08001371
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001372 if (r < 0) {
Asias He3ab2e422013-04-27 11:16:48 +08001373 kfree(vqs);
Asias Hec7289312013-05-06 16:38:26 +08001374 kfree(vs);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001375 return r;
1376 }
1377
Asias Hec7289312013-05-06 16:38:26 +08001378 f->private_data = vs;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001379 return 0;
1380}
1381
1382static int vhost_scsi_release(struct inode *inode, struct file *f)
1383{
Asias Hec7289312013-05-06 16:38:26 +08001384 struct vhost_scsi *vs = f->private_data;
Asias He67e18cf2013-02-05 12:31:57 +08001385 struct vhost_scsi_target t;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001386
Asias Hec7289312013-05-06 16:38:26 +08001387 mutex_lock(&vs->dev.mutex);
1388 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1389 mutex_unlock(&vs->dev.mutex);
1390 vhost_scsi_clear_endpoint(vs, &t);
1391 vhost_dev_stop(&vs->dev);
1392 vhost_dev_cleanup(&vs->dev, false);
Asias Hea6c9af82013-04-25 15:35:21 +08001393 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
Asias Hec7289312013-05-06 16:38:26 +08001394 vhost_scsi_flush(vs);
1395 kfree(vs->dev.vqs);
1396 kfree(vs);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001397 return 0;
1398}
1399
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001400static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1401 unsigned long arg)
1402{
1403 struct vhost_scsi *vs = f->private_data;
1404 struct vhost_scsi_target backend;
1405 void __user *argp = (void __user *)arg;
1406 u64 __user *featurep = argp;
Asias He11c634182013-04-25 15:35:22 +08001407 u32 __user *eventsp = argp;
1408 u32 events_missed;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001409 u64 features;
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001410 int r, abi_version = VHOST_SCSI_ABI_VERSION;
Asias He3ab2e422013-04-27 11:16:48 +08001411 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001412
1413 switch (ioctl) {
1414 case VHOST_SCSI_SET_ENDPOINT:
1415 if (copy_from_user(&backend, argp, sizeof backend))
1416 return -EFAULT;
Michael S. Tsirkin6de71452012-08-18 15:44:09 -07001417 if (backend.reserved != 0)
1418 return -EOPNOTSUPP;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001419
1420 return vhost_scsi_set_endpoint(vs, &backend);
1421 case VHOST_SCSI_CLEAR_ENDPOINT:
1422 if (copy_from_user(&backend, argp, sizeof backend))
1423 return -EFAULT;
Michael S. Tsirkin6de71452012-08-18 15:44:09 -07001424 if (backend.reserved != 0)
1425 return -EOPNOTSUPP;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001426
1427 return vhost_scsi_clear_endpoint(vs, &backend);
1428 case VHOST_SCSI_GET_ABI_VERSION:
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001429 if (copy_to_user(argp, &abi_version, sizeof abi_version))
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001430 return -EFAULT;
1431 return 0;
Asias He11c634182013-04-25 15:35:22 +08001432 case VHOST_SCSI_SET_EVENTS_MISSED:
1433 if (get_user(events_missed, eventsp))
1434 return -EFAULT;
1435 mutex_lock(&vq->mutex);
1436 vs->vs_events_missed = events_missed;
1437 mutex_unlock(&vq->mutex);
1438 return 0;
1439 case VHOST_SCSI_GET_EVENTS_MISSED:
1440 mutex_lock(&vq->mutex);
1441 events_missed = vs->vs_events_missed;
1442 mutex_unlock(&vq->mutex);
1443 if (put_user(events_missed, eventsp))
1444 return -EFAULT;
1445 return 0;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001446 case VHOST_GET_FEATURES:
Nicholas Bellinger5dade712013-03-27 17:23:41 -07001447 features = VHOST_SCSI_FEATURES;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001448 if (copy_to_user(featurep, &features, sizeof features))
1449 return -EFAULT;
1450 return 0;
1451 case VHOST_SET_FEATURES:
1452 if (copy_from_user(&features, featurep, sizeof features))
1453 return -EFAULT;
1454 return vhost_scsi_set_features(vs, features);
1455 default:
1456 mutex_lock(&vs->dev.mutex);
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001457 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1458 /* TODO: flush backend after dev ioctl. */
1459 if (r == -ENOIOCTLCMD)
1460 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001461 mutex_unlock(&vs->dev.mutex);
1462 return r;
1463 }
1464}
1465
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001466#ifdef CONFIG_COMPAT
1467static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1468 unsigned long arg)
1469{
1470 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1471}
1472#endif
1473
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001474static const struct file_operations vhost_scsi_fops = {
1475 .owner = THIS_MODULE,
1476 .release = vhost_scsi_release,
1477 .unlocked_ioctl = vhost_scsi_ioctl,
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001478#ifdef CONFIG_COMPAT
1479 .compat_ioctl = vhost_scsi_compat_ioctl,
1480#endif
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001481 .open = vhost_scsi_open,
1482 .llseek = noop_llseek,
1483};
1484
1485static struct miscdevice vhost_scsi_misc = {
1486 MISC_DYNAMIC_MINOR,
1487 "vhost-scsi",
1488 &vhost_scsi_fops,
1489};
1490
1491static int __init vhost_scsi_register(void)
1492{
1493 return misc_register(&vhost_scsi_misc);
1494}
1495
1496static int vhost_scsi_deregister(void)
1497{
1498 return misc_deregister(&vhost_scsi_misc);
1499}
1500
1501static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1502{
1503 switch (tport->tport_proto_id) {
1504 case SCSI_PROTOCOL_SAS:
1505 return "SAS";
1506 case SCSI_PROTOCOL_FCP:
1507 return "FCP";
1508 case SCSI_PROTOCOL_ISCSI:
1509 return "iSCSI";
1510 default:
1511 break;
1512 }
1513
1514 return "Unknown";
1515}
1516
Asias Hea6c9af82013-04-25 15:35:21 +08001517static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1518 struct se_lun *lun, bool plug)
1519{
1520
1521 struct vhost_scsi *vs = tpg->vhost_scsi;
1522 struct vhost_virtqueue *vq;
1523 u32 reason;
1524
1525 if (!vs)
1526 return;
1527
1528 mutex_lock(&vs->dev.mutex);
1529 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1530 mutex_unlock(&vs->dev.mutex);
1531 return;
1532 }
1533
1534 if (plug)
1535 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1536 else
1537 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1538
Asias He3ab2e422013-04-27 11:16:48 +08001539 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
Asias Hea6c9af82013-04-25 15:35:21 +08001540 mutex_lock(&vq->mutex);
1541 tcm_vhost_send_evt(vs, tpg, lun,
1542 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1543 mutex_unlock(&vq->mutex);
1544 mutex_unlock(&vs->dev.mutex);
1545}
1546
1547static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1548{
1549 tcm_vhost_do_plug(tpg, lun, true);
1550}
1551
1552static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1553{
1554 tcm_vhost_do_plug(tpg, lun, false);
1555}
1556
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001557static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001558 struct se_lun *lun)
1559{
1560 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1561 struct tcm_vhost_tpg, se_tpg);
1562
Asias Hea6c9af82013-04-25 15:35:21 +08001563 mutex_lock(&tcm_vhost_mutex);
1564
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001565 mutex_lock(&tv_tpg->tv_tpg_mutex);
1566 tv_tpg->tv_tpg_port_count++;
1567 mutex_unlock(&tv_tpg->tv_tpg_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001568
Asias Hea6c9af82013-04-25 15:35:21 +08001569 tcm_vhost_hotplug(tv_tpg, lun);
1570
1571 mutex_unlock(&tcm_vhost_mutex);
1572
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001573 return 0;
1574}
1575
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001576static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
Asias Hea6c9af82013-04-25 15:35:21 +08001577 struct se_lun *lun)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001578{
1579 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1580 struct tcm_vhost_tpg, se_tpg);
1581
Asias Hea6c9af82013-04-25 15:35:21 +08001582 mutex_lock(&tcm_vhost_mutex);
1583
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001584 mutex_lock(&tv_tpg->tv_tpg_mutex);
1585 tv_tpg->tv_tpg_port_count--;
1586 mutex_unlock(&tv_tpg->tv_tpg_mutex);
Asias Hea6c9af82013-04-25 15:35:21 +08001587
1588 tcm_vhost_hotunplug(tv_tpg, lun);
1589
1590 mutex_unlock(&tcm_vhost_mutex);
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001591}
1592
1593static struct se_node_acl *tcm_vhost_make_nodeacl(
1594 struct se_portal_group *se_tpg,
1595 struct config_group *group,
1596 const char *name)
1597{
1598 struct se_node_acl *se_nacl, *se_nacl_new;
1599 struct tcm_vhost_nacl *nacl;
1600 u64 wwpn = 0;
1601 u32 nexus_depth;
1602
1603 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1604 return ERR_PTR(-EINVAL); */
1605 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1606 if (!se_nacl_new)
1607 return ERR_PTR(-ENOMEM);
1608
1609 nexus_depth = 1;
1610 /*
1611 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1612 * when converting a NodeACL from demo mode -> explict
1613 */
1614 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1615 name, nexus_depth);
1616 if (IS_ERR(se_nacl)) {
1617 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1618 return se_nacl;
1619 }
1620 /*
1621 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1622 */
1623 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1624 nacl->iport_wwpn = wwpn;
1625
1626 return se_nacl;
1627}
1628
1629static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1630{
1631 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1632 struct tcm_vhost_nacl, se_node_acl);
1633 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1634 kfree(nacl);
1635}
1636
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001637static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001638 const char *name)
1639{
1640 struct se_portal_group *se_tpg;
1641 struct tcm_vhost_nexus *tv_nexus;
1642
1643 mutex_lock(&tv_tpg->tv_tpg_mutex);
1644 if (tv_tpg->tpg_nexus) {
1645 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1646 pr_debug("tv_tpg->tpg_nexus already exists\n");
1647 return -EEXIST;
1648 }
1649 se_tpg = &tv_tpg->se_tpg;
1650
1651 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1652 if (!tv_nexus) {
1653 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1654 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1655 return -ENOMEM;
1656 }
1657 /*
1658 * Initialize the struct se_session pointer
1659 */
1660 tv_nexus->tvn_se_sess = transport_init_session();
1661 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1662 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1663 kfree(tv_nexus);
1664 return -ENOMEM;
1665 }
1666 /*
1667 * Since we are running in 'demo mode' this call with generate a
1668 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1669 * the SCSI Initiator port name of the passed configfs group 'name'.
1670 */
1671 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1672 se_tpg, (unsigned char *)name);
1673 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1674 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1675 pr_debug("core_tpg_check_initiator_node_acl() failed"
1676 " for %s\n", name);
1677 transport_free_session(tv_nexus->tvn_se_sess);
1678 kfree(tv_nexus);
1679 return -ENOMEM;
1680 }
1681 /*
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001682 * Now register the TCM vhost virtual I_T Nexus as active with the
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001683 * call to __transport_register_session()
1684 */
1685 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1686 tv_nexus->tvn_se_sess, tv_nexus);
1687 tv_tpg->tpg_nexus = tv_nexus;
1688
1689 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1690 return 0;
1691}
1692
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001693static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001694{
1695 struct se_session *se_sess;
1696 struct tcm_vhost_nexus *tv_nexus;
1697
1698 mutex_lock(&tpg->tv_tpg_mutex);
1699 tv_nexus = tpg->tpg_nexus;
1700 if (!tv_nexus) {
1701 mutex_unlock(&tpg->tv_tpg_mutex);
1702 return -ENODEV;
1703 }
1704
1705 se_sess = tv_nexus->tvn_se_sess;
1706 if (!se_sess) {
1707 mutex_unlock(&tpg->tv_tpg_mutex);
1708 return -ENODEV;
1709 }
1710
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001711 if (tpg->tv_tpg_port_count != 0) {
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001712 mutex_unlock(&tpg->tv_tpg_mutex);
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001713 pr_err("Unable to remove TCM_vhost I_T Nexus with"
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001714 " active TPG port count: %d\n",
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001715 tpg->tv_tpg_port_count);
1716 return -EBUSY;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001717 }
1718
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001719 if (tpg->tv_tpg_vhost_count != 0) {
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001720 mutex_unlock(&tpg->tv_tpg_mutex);
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001721 pr_err("Unable to remove TCM_vhost I_T Nexus with"
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001722 " active TPG vhost count: %d\n",
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001723 tpg->tv_tpg_vhost_count);
1724 return -EBUSY;
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001725 }
1726
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001727 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001728 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1729 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1730 /*
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001731 * Release the SCSI I_T Nexus to the emulated vhost Target Port
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001732 */
1733 transport_deregister_session(tv_nexus->tvn_se_sess);
1734 tpg->tpg_nexus = NULL;
1735 mutex_unlock(&tpg->tv_tpg_mutex);
1736
1737 kfree(tv_nexus);
1738 return 0;
1739}
1740
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001741static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001742 char *page)
1743{
1744 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1745 struct tcm_vhost_tpg, se_tpg);
1746 struct tcm_vhost_nexus *tv_nexus;
1747 ssize_t ret;
1748
1749 mutex_lock(&tv_tpg->tv_tpg_mutex);
1750 tv_nexus = tv_tpg->tpg_nexus;
1751 if (!tv_nexus) {
1752 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1753 return -ENODEV;
1754 }
1755 ret = snprintf(page, PAGE_SIZE, "%s\n",
1756 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1757 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1758
1759 return ret;
1760}
1761
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001762static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001763 const char *page,
1764 size_t count)
1765{
1766 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1767 struct tcm_vhost_tpg, se_tpg);
1768 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1769 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1770 int ret;
1771 /*
1772 * Shutdown the active I_T nexus if 'NULL' is passed..
1773 */
1774 if (!strncmp(page, "NULL", 4)) {
1775 ret = tcm_vhost_drop_nexus(tv_tpg);
1776 return (!ret) ? count : ret;
1777 }
1778 /*
1779 * Otherwise make sure the passed virtual Initiator port WWN matches
1780 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1781 * tcm_vhost_make_nexus().
1782 */
1783 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1784 pr_err("Emulated NAA Sas Address: %s, exceeds"
1785 " max: %d\n", page, TCM_VHOST_NAMELEN);
1786 return -EINVAL;
1787 }
1788 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1789
1790 ptr = strstr(i_port, "naa.");
1791 if (ptr) {
1792 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1793 pr_err("Passed SAS Initiator Port %s does not"
1794 " match target port protoid: %s\n", i_port,
1795 tcm_vhost_dump_proto_id(tport_wwn));
1796 return -EINVAL;
1797 }
1798 port_ptr = &i_port[0];
1799 goto check_newline;
1800 }
1801 ptr = strstr(i_port, "fc.");
1802 if (ptr) {
1803 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1804 pr_err("Passed FCP Initiator Port %s does not"
1805 " match target port protoid: %s\n", i_port,
1806 tcm_vhost_dump_proto_id(tport_wwn));
1807 return -EINVAL;
1808 }
1809 port_ptr = &i_port[3]; /* Skip over "fc." */
1810 goto check_newline;
1811 }
1812 ptr = strstr(i_port, "iqn.");
1813 if (ptr) {
1814 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1815 pr_err("Passed iSCSI Initiator Port %s does not"
1816 " match target port protoid: %s\n", i_port,
1817 tcm_vhost_dump_proto_id(tport_wwn));
1818 return -EINVAL;
1819 }
1820 port_ptr = &i_port[0];
1821 goto check_newline;
1822 }
1823 pr_err("Unable to locate prefix for emulated Initiator Port:"
1824 " %s\n", i_port);
1825 return -EINVAL;
1826 /*
1827 * Clear any trailing newline for the NAA WWN
1828 */
1829check_newline:
1830 if (i_port[strlen(i_port)-1] == '\n')
1831 i_port[strlen(i_port)-1] = '\0';
1832
1833 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1834 if (ret < 0)
1835 return ret;
1836
1837 return count;
1838}
1839
1840TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1841
1842static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1843 &tcm_vhost_tpg_nexus.attr,
1844 NULL,
1845};
1846
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001847static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001848 struct config_group *group,
1849 const char *name)
1850{
1851 struct tcm_vhost_tport *tport = container_of(wwn,
1852 struct tcm_vhost_tport, tport_wwn);
1853
1854 struct tcm_vhost_tpg *tpg;
1855 unsigned long tpgt;
1856 int ret;
1857
1858 if (strstr(name, "tpgt_") != name)
1859 return ERR_PTR(-EINVAL);
1860 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1861 return ERR_PTR(-EINVAL);
1862
1863 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1864 if (!tpg) {
1865 pr_err("Unable to allocate struct tcm_vhost_tpg");
1866 return ERR_PTR(-ENOMEM);
1867 }
1868 mutex_init(&tpg->tv_tpg_mutex);
1869 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1870 tpg->tport = tport;
1871 tpg->tport_tpgt = tpgt;
1872
1873 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1874 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1875 if (ret < 0) {
1876 kfree(tpg);
1877 return NULL;
1878 }
1879 mutex_lock(&tcm_vhost_mutex);
1880 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1881 mutex_unlock(&tcm_vhost_mutex);
1882
1883 return &tpg->se_tpg;
1884}
1885
1886static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1887{
1888 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1889 struct tcm_vhost_tpg, se_tpg);
1890
1891 mutex_lock(&tcm_vhost_mutex);
1892 list_del(&tpg->tv_tpg_list);
1893 mutex_unlock(&tcm_vhost_mutex);
1894 /*
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001895 * Release the virtual I_T Nexus for this vhost TPG
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001896 */
1897 tcm_vhost_drop_nexus(tpg);
1898 /*
1899 * Deregister the se_tpg from TCM..
1900 */
1901 core_tpg_deregister(se_tpg);
1902 kfree(tpg);
1903}
1904
Nicholas Bellinger101998f2012-07-30 13:30:00 -07001905static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07001906 struct config_group *group,
1907 const char *name)
1908{
1909 struct tcm_vhost_tport *tport;
1910 char *ptr;
1911 u64 wwpn = 0;
1912 int off = 0;
1913
1914 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1915 return ERR_PTR(-EINVAL); */
1916
1917 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1918 if (!tport) {
1919 pr_err("Unable to allocate struct tcm_vhost_tport");
1920 return ERR_PTR(-ENOMEM);
1921 }
1922 tport->tport_wwpn = wwpn;
1923 /*
1924 * Determine the emulated Protocol Identifier and Target Port Name
1925 * based on the incoming configfs directory name.
1926 */
1927 ptr = strstr(name, "naa.");
1928 if (ptr) {
1929 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1930 goto check_len;
1931 }
1932 ptr = strstr(name, "fc.");
1933 if (ptr) {
1934 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1935 off = 3; /* Skip over "fc." */
1936 goto check_len;
1937 }
1938 ptr = strstr(name, "iqn.");
1939 if (ptr) {
1940 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1941 goto check_len;
1942 }
1943
1944 pr_err("Unable to locate prefix for emulated Target Port:"
1945 " %s\n", name);
1946 kfree(tport);
1947 return ERR_PTR(-EINVAL);
1948
1949check_len:
1950 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1951 pr_err("Emulated %s Address: %s, exceeds"
1952 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1953 TCM_VHOST_NAMELEN);
1954 kfree(tport);
1955 return ERR_PTR(-EINVAL);
1956 }
1957 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1958
1959 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1960 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1961
1962 return &tport->tport_wwn;
1963}
1964
1965static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1966{
1967 struct tcm_vhost_tport *tport = container_of(wwn,
1968 struct tcm_vhost_tport, tport_wwn);
1969
1970 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1971 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1972 tport->tport_name);
1973
1974 kfree(tport);
1975}
1976
1977static ssize_t tcm_vhost_wwn_show_attr_version(
1978 struct target_fabric_configfs *tf,
1979 char *page)
1980{
1981 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1982 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1983 utsname()->machine);
1984}
1985
1986TF_WWN_ATTR_RO(tcm_vhost, version);
1987
1988static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1989 &tcm_vhost_wwn_version.attr,
1990 NULL,
1991};
1992
1993static struct target_core_fabric_ops tcm_vhost_ops = {
1994 .get_fabric_name = tcm_vhost_get_fabric_name,
1995 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1996 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1997 .tpg_get_tag = tcm_vhost_get_tag,
1998 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1999 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
2000 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
2001 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
2002 .tpg_check_demo_mode = tcm_vhost_check_true,
2003 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
2004 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2005 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2006 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
2007 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
2008 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
2009 .release_cmd = tcm_vhost_release_cmd,
2010 .shutdown_session = tcm_vhost_shutdown_session,
2011 .close_session = tcm_vhost_close_session,
2012 .sess_get_index = tcm_vhost_sess_get_index,
2013 .sess_get_initiator_sid = NULL,
2014 .write_pending = tcm_vhost_write_pending,
2015 .write_pending_status = tcm_vhost_write_pending_status,
2016 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
2017 .get_task_tag = tcm_vhost_get_task_tag,
2018 .get_cmd_state = tcm_vhost_get_cmd_state,
2019 .queue_data_in = tcm_vhost_queue_data_in,
2020 .queue_status = tcm_vhost_queue_status,
2021 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07002022 /*
2023 * Setup callers for generic logic in target_core_fabric_configfs.c
2024 */
2025 .fabric_make_wwn = tcm_vhost_make_tport,
2026 .fabric_drop_wwn = tcm_vhost_drop_tport,
2027 .fabric_make_tpg = tcm_vhost_make_tpg,
2028 .fabric_drop_tpg = tcm_vhost_drop_tpg,
2029 .fabric_post_link = tcm_vhost_port_link,
2030 .fabric_pre_unlink = tcm_vhost_port_unlink,
2031 .fabric_make_np = NULL,
2032 .fabric_drop_np = NULL,
2033 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
2034 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
2035};
2036
2037static int tcm_vhost_register_configfs(void)
2038{
2039 struct target_fabric_configfs *fabric;
2040 int ret;
2041
2042 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2043 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2044 utsname()->machine);
2045 /*
2046 * Register the top level struct config_item_type with TCM core
2047 */
2048 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2049 if (IS_ERR(fabric)) {
2050 pr_err("target_fabric_configfs_init() failed\n");
2051 return PTR_ERR(fabric);
2052 }
2053 /*
2054 * Setup fabric->tf_ops from our local tcm_vhost_ops
2055 */
2056 fabric->tf_ops = tcm_vhost_ops;
2057 /*
2058 * Setup default attribute lists for various fabric->tf_cit_tmpl
2059 */
2060 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2061 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2062 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2063 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2064 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2065 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2066 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2067 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2068 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2069 /*
2070 * Register the fabric for use within TCM
2071 */
2072 ret = target_fabric_configfs_register(fabric);
2073 if (ret < 0) {
2074 pr_err("target_fabric_configfs_register() failed"
2075 " for TCM_VHOST\n");
2076 return ret;
2077 }
2078 /*
2079 * Setup our local pointer to *fabric
2080 */
2081 tcm_vhost_fabric_configfs = fabric;
2082 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2083 return 0;
2084};
2085
2086static void tcm_vhost_deregister_configfs(void)
2087{
2088 if (!tcm_vhost_fabric_configfs)
2089 return;
2090
2091 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2092 tcm_vhost_fabric_configfs = NULL;
2093 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2094};
2095
2096static int __init tcm_vhost_init(void)
2097{
2098 int ret = -ENOMEM;
Nicholas Bellinger101998f2012-07-30 13:30:00 -07002099 /*
2100 * Use our own dedicated workqueue for submitting I/O into
2101 * target core to avoid contention within system_wq.
2102 */
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07002103 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2104 if (!tcm_vhost_workqueue)
2105 goto out;
2106
2107 ret = vhost_scsi_register();
2108 if (ret < 0)
2109 goto out_destroy_workqueue;
2110
2111 ret = tcm_vhost_register_configfs();
2112 if (ret < 0)
2113 goto out_vhost_scsi_deregister;
2114
2115 return 0;
2116
2117out_vhost_scsi_deregister:
2118 vhost_scsi_deregister();
2119out_destroy_workqueue:
2120 destroy_workqueue(tcm_vhost_workqueue);
2121out:
2122 return ret;
2123};
2124
2125static void tcm_vhost_exit(void)
2126{
2127 tcm_vhost_deregister_configfs();
2128 vhost_scsi_deregister();
2129 destroy_workqueue(tcm_vhost_workqueue);
2130};
2131
Michael S. Tsirkin181c04a2013-05-02 03:52:59 +03002132MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2133MODULE_ALIAS("tcm_vhost");
Nicholas Bellinger057cbf42012-07-18 14:31:32 -07002134MODULE_LICENSE("GPL");
2135module_init(tcm_vhost_init);
2136module_exit(tcm_vhost_exit);