blob: 6fb82f1abe5c8049d0924a3567f27edfe8dec4fb [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
Andy Grovere3d6f902011-07-19 08:55:10 +00004 * This file contains the TCM Virtual Device and Disk Transport
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08005 * agnostic related functions.
6 *
Nicholas Bellingerfd9a11d2012-11-09 14:51:48 -08007 * (c) Copyright 2003-2012 RisingTide Systems LLC.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08008 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/net.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080033#include <linux/kthread.h>
34#include <linux/in.h>
Paul Gortmakerc53181a2011-08-30 18:16:43 -040035#include <linux/export.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080036#include <net/sock.h>
37#include <net/tcp.h>
38#include <scsi/scsi.h>
Nicholas Bellinger1078da12011-05-19 20:19:13 -070039#include <scsi/scsi_device.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080040
41#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050042#include <target/target_core_backend.h>
43#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080044
Christoph Hellwige26d99a2011-11-14 12:30:30 -050045#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080046#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047#include "target_core_pr.h"
48#include "target_core_ua.h"
49
Andy Grovere3d6f902011-07-19 08:55:10 +000050static struct se_hba *lun0_hba;
Andy Grovere3d6f902011-07-19 08:55:10 +000051/* not static, needed by tpg.c */
52struct se_device *g_lun0_dev;
53
Christoph Hellwigde103c92012-11-06 12:24:09 -080054sense_reason_t
55transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080056{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080057 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +000058 struct se_session *se_sess = se_cmd->se_sess;
Andy Grover5951146d2011-07-19 10:26:37 +000059 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080060 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080061
Christoph Hellwigde103c92012-11-06 12:24:09 -080062 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
63 return TCM_NON_EXISTENT_LUN;
Fubo Chend8144952011-02-13 15:13:42 -080064
Roland Dreier78faae32011-07-20 09:09:10 +000065 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -040066 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +000067 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
68 struct se_dev_entry *deve = se_cmd->se_deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080069
Andy Grover5951146d2011-07-19 10:26:37 +000070 deve->total_cmds++;
71 deve->total_bytes += se_cmd->data_length;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080072
Andy Grover5951146d2011-07-19 10:26:37 +000073 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
74 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
Andy Grover6708bb22011-06-08 10:36:43 -070075 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080076 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +000077 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080078 unpacked_lun);
Roland Dreier78faae32011-07-20 09:09:10 +000079 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Christoph Hellwigde103c92012-11-06 12:24:09 -080080 return TCM_WRITE_PROTECTED;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080081 }
Andy Grover5951146d2011-07-19 10:26:37 +000082
83 if (se_cmd->data_direction == DMA_TO_DEVICE)
84 deve->write_bytes += se_cmd->data_length;
85 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
86 deve->read_bytes += se_cmd->data_length;
87
88 deve->deve_cmds++;
89
90 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun;
Andy Grover5951146d2011-07-19 10:26:37 +000094 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95 }
Roland Dreier78faae32011-07-20 09:09:10 +000096 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +000097
98 if (!se_lun) {
99 /*
100 * Use the se_portal_group->tpg_virt_lun0 to allow for
101 * REPORT_LUNS, et al to be returned when no active
102 * MappedLUN=0 exists for this Initiator Port.
103 */
104 if (unpacked_lun != 0) {
Andy Grover6708bb22011-06-08 10:36:43 -0700105 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Andy Grover5951146d2011-07-19 10:26:37 +0000106 " Access for 0x%08x\n",
107 se_cmd->se_tfo->get_fabric_name(),
108 unpacked_lun);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800109 return TCM_NON_EXISTENT_LUN;
Andy Grover5951146d2011-07-19 10:26:37 +0000110 }
111 /*
112 * Force WRITE PROTECT for virtual LUN 0
113 */
114 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
Christoph Hellwigde103c92012-11-06 12:24:09 -0800115 (se_cmd->data_direction != DMA_NONE))
116 return TCM_WRITE_PROTECTED;
Andy Grover5951146d2011-07-19 10:26:37 +0000117
118 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
119 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->orig_fe_lun = 0;
Andy Grover5951146d2011-07-19 10:26:37 +0000121 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800122 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800123
Andy Grover5951146d2011-07-19 10:26:37 +0000124 /* Directly associate cmd with se_dev */
125 se_cmd->se_dev = se_lun->lun_se_dev;
126
127 /* TODO: get rid of this and use atomics for stats */
128 dev = se_lun->lun_se_dev;
Roland Dreier78faae32011-07-20 09:09:10 +0000129 spin_lock_irqsave(&dev->stats_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800130 dev->num_cmds++;
131 if (se_cmd->data_direction == DMA_TO_DEVICE)
132 dev->write_bytes += se_cmd->data_length;
133 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
134 dev->read_bytes += se_cmd->data_length;
Roland Dreier78faae32011-07-20 09:09:10 +0000135 spin_unlock_irqrestore(&dev->stats_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800136
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800137 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000138 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800139 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
140
141 return 0;
142}
Andy Grover5951146d2011-07-19 10:26:37 +0000143EXPORT_SYMBOL(transport_lookup_cmd_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800144
Andy Grover5951146d2011-07-19 10:26:37 +0000145int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800146{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800147 struct se_dev_entry *deve;
148 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +0000149 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800150 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
Roland Dreier5e1be912011-07-20 09:28:56 +0000151 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800152
Christoph Hellwigde103c92012-11-06 12:24:09 -0800153 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
Andy Grovere3d6f902011-07-19 08:55:10 +0000154 return -ENODEV;
Fubo Chend8144952011-02-13 15:13:42 -0800155
Roland Dreier5e1be912011-07-20 09:28:56 +0000156 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -0400157 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +0000158 deve = se_cmd->se_deve;
159
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800160 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
Andy Grover5951146d2011-07-19 10:26:37 +0000161 se_tmr->tmr_lun = deve->se_lun;
162 se_cmd->se_lun = deve->se_lun;
163 se_lun = deve->se_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800164 se_cmd->pr_res_key = deve->pr_res_key;
165 se_cmd->orig_fe_lun = unpacked_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800166 }
Roland Dreier5e1be912011-07-20 09:28:56 +0000167 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800168
169 if (!se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800171 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000172 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800173 unpacked_lun);
Andy Grovere3d6f902011-07-19 08:55:10 +0000174 return -ENODEV;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800175 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800176
Andy Grover5951146d2011-07-19 10:26:37 +0000177 /* Directly associate cmd with se_dev */
178 se_cmd->se_dev = se_lun->lun_se_dev;
179 se_tmr->tmr_dev = se_lun->lun_se_dev;
180
Roland Dreier5e1be912011-07-20 09:28:56 +0000181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
Roland Dreier5e1be912011-07-20 09:28:56 +0000183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800184
185 return 0;
186}
Andy Grover5951146d2011-07-19 10:26:37 +0000187EXPORT_SYMBOL(transport_lookup_tmr_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800188
189/*
190 * This function is called from core_scsi3_emulate_pro_register_and_move()
191 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
192 * when a matching rtpi is found.
193 */
194struct se_dev_entry *core_get_se_deve_from_rtpi(
195 struct se_node_acl *nacl,
196 u16 rtpi)
197{
198 struct se_dev_entry *deve;
199 struct se_lun *lun;
200 struct se_port *port;
201 struct se_portal_group *tpg = nacl->se_tpg;
202 u32 i;
203
204 spin_lock_irq(&nacl->device_list_lock);
205 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400206 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800207
208 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
209 continue;
210
211 lun = deve->se_lun;
Andy Grover6708bb22011-06-08 10:36:43 -0700212 if (!lun) {
213 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800214 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000215 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800216 continue;
217 }
218 port = lun->lun_sep;
Andy Grover6708bb22011-06-08 10:36:43 -0700219 if (!port) {
220 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800221 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000222 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800223 continue;
224 }
225 if (port->sep_rtpi != rtpi)
226 continue;
227
228 atomic_inc(&deve->pr_ref_count);
229 smp_mb__after_atomic_inc();
230 spin_unlock_irq(&nacl->device_list_lock);
231
232 return deve;
233 }
234 spin_unlock_irq(&nacl->device_list_lock);
235
236 return NULL;
237}
238
239int core_free_device_list_for_node(
240 struct se_node_acl *nacl,
241 struct se_portal_group *tpg)
242{
243 struct se_dev_entry *deve;
244 struct se_lun *lun;
245 u32 i;
246
247 if (!nacl->device_list)
248 return 0;
249
250 spin_lock_irq(&nacl->device_list_lock);
251 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400252 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800253
254 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
255 continue;
256
257 if (!deve->se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700258 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800259 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000260 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800261 continue;
262 }
263 lun = deve->se_lun;
264
265 spin_unlock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700266 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
267 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800268 spin_lock_irq(&nacl->device_list_lock);
269 }
270 spin_unlock_irq(&nacl->device_list_lock);
271
Jörn Engelf2083242012-03-15 15:05:40 -0400272 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800273 nacl->device_list = NULL;
274
275 return 0;
276}
277
278void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
279{
280 struct se_dev_entry *deve;
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100281 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800282
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100283 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -0400284 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800285 deve->deve_cmds--;
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100286 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800287}
288
289void core_update_device_list_access(
290 u32 mapped_lun,
291 u32 lun_access,
292 struct se_node_acl *nacl)
293{
294 struct se_dev_entry *deve;
295
296 spin_lock_irq(&nacl->device_list_lock);
Jörn Engelf2083242012-03-15 15:05:40 -0400297 deve = nacl->device_list[mapped_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800298 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
299 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
300 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
301 } else {
302 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
303 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
304 }
305 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800306}
307
Andy Grovere80ac6c2012-07-12 17:34:58 -0700308/* core_enable_device_list_for_node():
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800309 *
310 *
311 */
Andy Grovere80ac6c2012-07-12 17:34:58 -0700312int core_enable_device_list_for_node(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800313 struct se_lun *lun,
314 struct se_lun_acl *lun_acl,
315 u32 mapped_lun,
316 u32 lun_access,
317 struct se_node_acl *nacl,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700318 struct se_portal_group *tpg)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800319{
320 struct se_port *port = lun->lun_sep;
Andy Grovere80ac6c2012-07-12 17:34:58 -0700321 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800322
323 spin_lock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700324
325 deve = nacl->device_list[mapped_lun];
326
327 /*
328 * Check if the call is handling demo mode -> explict LUN ACL
329 * transition. This transition must be for the same struct se_lun
330 * + mapped_lun that was setup in demo mode..
331 */
332 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
333 if (deve->se_lun_acl != NULL) {
334 pr_err("struct se_dev_entry->se_lun_acl"
335 " already set for demo mode -> explict"
336 " LUN ACL transition\n");
337 spin_unlock_irq(&nacl->device_list_lock);
338 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800339 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700340 if (deve->se_lun != lun) {
341 pr_err("struct se_dev_entry->se_lun does"
342 " match passed struct se_lun for demo mode"
343 " -> explict LUN ACL transition\n");
344 spin_unlock_irq(&nacl->device_list_lock);
345 return -EINVAL;
346 }
347 deve->se_lun_acl = lun_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800348
349 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
350 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
351 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
352 } else {
353 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
354 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
355 }
356
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800357 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800358 return 0;
359 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700360
361 deve->se_lun = lun;
362 deve->se_lun_acl = lun_acl;
363 deve->mapped_lun = mapped_lun;
364 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
365
366 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
367 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
368 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
369 } else {
370 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
371 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
372 }
373
374 deve->creation_time = get_jiffies_64();
375 deve->attach_count++;
376 spin_unlock_irq(&nacl->device_list_lock);
377
378 spin_lock_bh(&port->sep_alua_lock);
379 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
380 spin_unlock_bh(&port->sep_alua_lock);
381
382 return 0;
383}
384
385/* core_disable_device_list_for_node():
386 *
387 *
388 */
389int core_disable_device_list_for_node(
390 struct se_lun *lun,
391 struct se_lun_acl *lun_acl,
392 u32 mapped_lun,
393 u32 lun_access,
394 struct se_node_acl *nacl,
395 struct se_portal_group *tpg)
396{
397 struct se_port *port = lun->lun_sep;
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700398 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
Andy Grovere80ac6c2012-07-12 17:34:58 -0700399
400 /*
401 * If the MappedLUN entry is being disabled, the entry in
402 * port->sep_alua_list must be removed now before clearing the
403 * struct se_dev_entry pointers below as logic in
404 * core_alua_do_transition_tg_pt() depends on these being present.
405 *
406 * deve->se_lun_acl will be NULL for demo-mode created LUNs
407 * that have not been explicitly converted to MappedLUNs ->
408 * struct se_lun_acl, but we remove deve->alua_port_list from
409 * port->sep_alua_list. This also means that active UAs and
410 * NodeACL context specific PR metadata for demo-mode
411 * MappedLUN *deve will be released below..
412 */
413 spin_lock_bh(&port->sep_alua_lock);
414 list_del(&deve->alua_port_list);
415 spin_unlock_bh(&port->sep_alua_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800416 /*
417 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
418 * PR operation to complete.
419 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800420 while (atomic_read(&deve->pr_ref_count) != 0)
421 cpu_relax();
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700422
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800423 spin_lock_irq(&nacl->device_list_lock);
424 /*
425 * Disable struct se_dev_entry LUN ACL mapping
426 */
427 core_scsi3_ua_release_all(deve);
428 deve->se_lun = NULL;
429 deve->se_lun_acl = NULL;
430 deve->lun_flags = 0;
431 deve->creation_time = 0;
432 deve->attach_count--;
433 spin_unlock_irq(&nacl->device_list_lock);
434
435 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
436 return 0;
437}
438
439/* core_clear_lun_from_tpg():
440 *
441 *
442 */
443void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
444{
445 struct se_node_acl *nacl;
446 struct se_dev_entry *deve;
447 u32 i;
448
Roland Dreier28638882011-08-16 09:40:01 -0700449 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800450 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
Roland Dreier28638882011-08-16 09:40:01 -0700451 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800452
453 spin_lock_irq(&nacl->device_list_lock);
454 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400455 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800456 if (lun != deve->se_lun)
457 continue;
458 spin_unlock_irq(&nacl->device_list_lock);
459
Andy Grovere80ac6c2012-07-12 17:34:58 -0700460 core_disable_device_list_for_node(lun, NULL,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800461 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700462 nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800463
464 spin_lock_irq(&nacl->device_list_lock);
465 }
466 spin_unlock_irq(&nacl->device_list_lock);
467
Roland Dreier28638882011-08-16 09:40:01 -0700468 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800469 }
Roland Dreier28638882011-08-16 09:40:01 -0700470 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800471}
472
473static struct se_port *core_alloc_port(struct se_device *dev)
474{
475 struct se_port *port, *port_tmp;
476
477 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700478 if (!port) {
479 pr_err("Unable to allocate struct se_port\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000480 return ERR_PTR(-ENOMEM);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800481 }
482 INIT_LIST_HEAD(&port->sep_alua_list);
483 INIT_LIST_HEAD(&port->sep_list);
484 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
485 spin_lock_init(&port->sep_alua_lock);
486 mutex_init(&port->sep_tg_pt_md_mutex);
487
488 spin_lock(&dev->se_port_lock);
489 if (dev->dev_port_count == 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -0700490 pr_warn("Reached dev->dev_port_count =="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800491 " 0x0000ffff\n");
492 spin_unlock(&dev->se_port_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +0000493 return ERR_PTR(-ENOSPC);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800494 }
495again:
496 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900497 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800498 * Here is the table from spc4r17 section 7.7.3.8.
499 *
500 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
501 *
502 * Code Description
503 * 0h Reserved
504 * 1h Relative port 1, historically known as port A
505 * 2h Relative port 2, historically known as port B
506 * 3h to FFFFh Relative port 3 through 65 535
507 */
508 port->sep_rtpi = dev->dev_rpti_counter++;
Andy Grover6708bb22011-06-08 10:36:43 -0700509 if (!port->sep_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800510 goto again;
511
512 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
513 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900514 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800515 * for 16-bit wrap..
516 */
517 if (port->sep_rtpi == port_tmp->sep_rtpi)
518 goto again;
519 }
520 spin_unlock(&dev->se_port_lock);
521
522 return port;
523}
524
525static void core_export_port(
526 struct se_device *dev,
527 struct se_portal_group *tpg,
528 struct se_port *port,
529 struct se_lun *lun)
530{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800531 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
532
533 spin_lock(&dev->se_port_lock);
534 spin_lock(&lun->lun_sep_lock);
535 port->sep_tpg = tpg;
536 port->sep_lun = lun;
537 lun->lun_sep = port;
538 spin_unlock(&lun->lun_sep_lock);
539
540 list_add_tail(&port->sep_list, &dev->dev_sep_list);
541 spin_unlock(&dev->se_port_lock);
542
Christoph Hellwigc87fbd52012-10-10 17:37:16 -0400543 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
544 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800545 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
546 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
Andy Grover6708bb22011-06-08 10:36:43 -0700547 pr_err("Unable to allocate t10_alua_tg_pt"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800548 "_gp_member_t\n");
549 return;
550 }
551 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
552 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400553 dev->t10_alua.default_tg_pt_gp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800554 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
Andy Grover6708bb22011-06-08 10:36:43 -0700555 pr_debug("%s/%s: Adding to default ALUA Target Port"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800556 " Group: alua/default_tg_pt_gp\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000557 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800558 }
559
560 dev->dev_port_count++;
Masanari Iida35d1efe2012-08-16 22:43:13 +0900561 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800562}
563
564/*
565 * Called with struct se_device->se_port_lock spinlock held.
566 */
567static void core_release_port(struct se_device *dev, struct se_port *port)
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700568 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800569{
570 /*
571 * Wait for any port reference for PR ALL_TG_PT=1 operation
572 * to complete in __core_scsi3_alloc_registration()
573 */
574 spin_unlock(&dev->se_port_lock);
575 if (atomic_read(&port->sep_tg_pt_ref_cnt))
576 cpu_relax();
577 spin_lock(&dev->se_port_lock);
578
579 core_alua_free_tg_pt_gp_mem(port);
580
581 list_del(&port->sep_list);
582 dev->dev_port_count--;
583 kfree(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800584}
585
586int core_dev_export(
587 struct se_device *dev,
588 struct se_portal_group *tpg,
589 struct se_lun *lun)
590{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400591 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800592 struct se_port *port;
593
594 port = core_alloc_port(dev);
Andy Grovere3d6f902011-07-19 08:55:10 +0000595 if (IS_ERR(port))
596 return PTR_ERR(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800597
598 lun->lun_se_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800599
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400600 spin_lock(&hba->device_lock);
601 dev->export_count++;
602 spin_unlock(&hba->device_lock);
603
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800604 core_export_port(dev, tpg, port, lun);
605 return 0;
606}
607
608void core_dev_unexport(
609 struct se_device *dev,
610 struct se_portal_group *tpg,
611 struct se_lun *lun)
612{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400613 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800614 struct se_port *port = lun->lun_sep;
615
616 spin_lock(&lun->lun_sep_lock);
617 if (lun->lun_se_dev == NULL) {
618 spin_unlock(&lun->lun_sep_lock);
619 return;
620 }
621 spin_unlock(&lun->lun_sep_lock);
622
623 spin_lock(&dev->se_port_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800624 core_release_port(dev, port);
625 spin_unlock(&dev->se_port_lock);
626
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400627 spin_lock(&hba->device_lock);
628 dev->export_count--;
629 spin_unlock(&hba->device_lock);
630
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800631 lun->lun_se_dev = NULL;
632}
633
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400634static void se_release_vpd_for_dev(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800635{
636 struct t10_vpd *vpd, *vpd_tmp;
637
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400638 spin_lock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800639 list_for_each_entry_safe(vpd, vpd_tmp,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400640 &dev->t10_wwn.t10_vpd_list, vpd_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800641 list_del(&vpd->vpd_list);
642 kfree(vpd);
643 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400644 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800645}
646
Roland Dreierc8045372012-07-16 15:17:12 -0700647static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700648{
Roland Dreier3e039892012-10-31 09:16:45 -0700649 u32 aligned_max_sectors;
650 u32 alignment;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700651 /*
652 * Limit max_sectors to a PAGE_SIZE aligned value for modern
653 * transport_allocate_data_tasks() operation.
654 */
Roland Dreier3e039892012-10-31 09:16:45 -0700655 alignment = max(1ul, PAGE_SIZE / block_size);
656 aligned_max_sectors = rounddown(max_sectors, alignment);
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700657
Roland Dreier3e039892012-10-31 09:16:45 -0700658 if (max_sectors != aligned_max_sectors)
659 pr_info("Rounding down aligned max_sectors from %u to %u\n",
660 max_sectors, aligned_max_sectors);
661
662 return aligned_max_sectors;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700663}
664
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800665int se_dev_set_max_unmap_lba_count(
666 struct se_device *dev,
667 u32 max_unmap_lba_count)
668{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400669 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700670 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400671 dev, dev->dev_attrib.max_unmap_lba_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800672 return 0;
673}
674
675int se_dev_set_max_unmap_block_desc_count(
676 struct se_device *dev,
677 u32 max_unmap_block_desc_count)
678{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400679 dev->dev_attrib.max_unmap_block_desc_count =
Andy Grovere3d6f902011-07-19 08:55:10 +0000680 max_unmap_block_desc_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700681 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400682 dev, dev->dev_attrib.max_unmap_block_desc_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800683 return 0;
684}
685
686int se_dev_set_unmap_granularity(
687 struct se_device *dev,
688 u32 unmap_granularity)
689{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400690 dev->dev_attrib.unmap_granularity = unmap_granularity;
Andy Grover6708bb22011-06-08 10:36:43 -0700691 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400692 dev, dev->dev_attrib.unmap_granularity);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800693 return 0;
694}
695
696int se_dev_set_unmap_granularity_alignment(
697 struct se_device *dev,
698 u32 unmap_granularity_alignment)
699{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400700 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
Andy Grover6708bb22011-06-08 10:36:43 -0700701 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400702 dev, dev->dev_attrib.unmap_granularity_alignment);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800703 return 0;
704}
705
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800706int se_dev_set_max_write_same_len(
707 struct se_device *dev,
708 u32 max_write_same_len)
709{
710 dev->dev_attrib.max_write_same_len = max_write_same_len;
711 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
712 dev, dev->dev_attrib.max_write_same_len);
713 return 0;
714}
715
Tregaron Baylyadfa9572013-01-31 15:30:24 -0700716static void dev_set_t10_wwn_model_alias(struct se_device *dev)
717{
718 const char *configname;
719
720 configname = config_item_name(&dev->dev_group.cg_item);
721 if (strlen(configname) >= 16) {
722 pr_warn("dev[%p]: Backstore name '%s' is too long for "
723 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
724 configname);
725 }
726 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
727}
728
729int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
730{
731 if (dev->export_count) {
732 pr_err("dev[%p]: Unable to change model alias"
733 " while export_count is %d\n",
734 dev, dev->export_count);
735 return -EINVAL;
736 }
737
738 if (flag != 0 && flag != 1) {
739 pr_err("Illegal value %d\n", flag);
740 return -EINVAL;
741 }
742
743 if (flag) {
744 dev_set_t10_wwn_model_alias(dev);
745 } else {
746 strncpy(&dev->t10_wwn.model[0],
747 dev->transport->inquiry_prod, 16);
748 }
749 dev->dev_attrib.emulate_model_alias = flag;
750
751 return 0;
752}
753
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800754int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
755{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400756 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700757 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000758 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800759 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400760
Andy Groverc6388302011-11-30 12:11:50 -0800761 if (flag) {
762 pr_err("dpo_emulated not supported\n");
763 return -EINVAL;
764 }
765
766 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800767}
768
769int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
770{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400771 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700772 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000773 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800774 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400775
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700776 if (flag &&
777 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
778 pr_err("emulate_fua_write not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000779 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800780 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400781 dev->dev_attrib.emulate_fua_write = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700782 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400783 dev, dev->dev_attrib.emulate_fua_write);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800784 return 0;
785}
786
787int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
788{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400789 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700790 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000791 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800792 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400793
Andy Groverc6388302011-11-30 12:11:50 -0800794 if (flag) {
795 pr_err("ua read emulated not supported\n");
796 return -EINVAL;
797 }
798
799 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800800}
801
802int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
803{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400804 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700805 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000806 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800807 }
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700808 if (flag &&
809 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
810 pr_err("emulate_write_cache not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000811 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800812 }
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800813 if (dev->transport->get_write_cache) {
814 pr_warn("emulate_write_cache cannot be changed when underlying"
815 " HW reports WriteCacheEnabled, ignoring request\n");
816 return 0;
817 }
818
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400819 dev->dev_attrib.emulate_write_cache = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700820 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400821 dev, dev->dev_attrib.emulate_write_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800822 return 0;
823}
824
825int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
826{
827 if ((flag != 0) && (flag != 1) && (flag != 2)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700828 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000829 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800830 }
831
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400832 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700833 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400834 " UA_INTRLCK_CTRL while export_count is %d\n",
835 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000836 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800837 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400838 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700839 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400840 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800841
842 return 0;
843}
844
845int se_dev_set_emulate_tas(struct se_device *dev, int flag)
846{
847 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700848 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000849 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800850 }
851
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400852 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700853 pr_err("dev[%p]: Unable to change SE Device TAS while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400854 " export_count is %d\n",
855 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000856 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800857 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400858 dev->dev_attrib.emulate_tas = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700859 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400860 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800861
862 return 0;
863}
864
865int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
866{
867 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700868 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000869 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800870 }
871 /*
872 * We expect this value to be non-zero when generic Block Layer
873 * Discard supported is detected iblock_create_virtdevice().
874 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400875 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700876 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800877 return -ENOSYS;
878 }
879
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400880 dev->dev_attrib.emulate_tpu = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700881 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800882 dev, flag);
883 return 0;
884}
885
886int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
887{
888 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700889 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000890 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800891 }
892 /*
893 * We expect this value to be non-zero when generic Block Layer
894 * Discard supported is detected iblock_create_virtdevice().
895 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400896 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700897 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800898 return -ENOSYS;
899 }
900
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400901 dev->dev_attrib.emulate_tpws = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700902 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800903 dev, flag);
904 return 0;
905}
906
907int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
908{
909 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700910 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000911 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800912 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400913 dev->dev_attrib.enforce_pr_isids = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700914 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400915 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800916 return 0;
917}
918
Roland Dreiere22a7f02011-07-05 13:34:52 -0700919int se_dev_set_is_nonrot(struct se_device *dev, int flag)
920{
921 if ((flag != 0) && (flag != 1)) {
922 printk(KERN_ERR "Illegal value %d\n", flag);
923 return -EINVAL;
924 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400925 dev->dev_attrib.is_nonrot = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700926 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
Roland Dreiere22a7f02011-07-05 13:34:52 -0700927 dev, flag);
928 return 0;
929}
930
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700931int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
932{
933 if (flag != 0) {
934 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
935 " reordering not implemented\n", dev);
936 return -ENOSYS;
937 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400938 dev->dev_attrib.emulate_rest_reord = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700939 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
940 return 0;
941}
942
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800943/*
944 * Note, this can only be called on unexported SE Device Object.
945 */
946int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
947{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400948 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700949 pr_err("dev[%p]: Unable to change SE Device TCQ while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400950 " export_count is %d\n",
951 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000952 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800953 }
Andy Grover6708bb22011-06-08 10:36:43 -0700954 if (!queue_depth) {
955 pr_err("dev[%p]: Illegal ZERO value for queue"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800956 "_depth\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +0000957 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800958 }
959
Andy Grovere3d6f902011-07-19 08:55:10 +0000960 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400961 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -0700962 pr_err("dev[%p]: Passed queue_depth: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800963 " exceeds TCM/SE_Device TCQ: %u\n",
964 dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400965 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +0000966 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800967 }
968 } else {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400969 if (queue_depth > dev->dev_attrib.queue_depth) {
970 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -0700971 pr_err("dev[%p]: Passed queue_depth:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800972 " %u exceeds TCM/SE_Device MAX"
973 " TCQ: %u\n", dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400974 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +0000975 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800976 }
977 }
978 }
979
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400980 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
Andy Grover6708bb22011-06-08 10:36:43 -0700981 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800982 dev, queue_depth);
983 return 0;
984}
985
Roland Dreier015487b2012-02-13 16:18:17 -0800986int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
987{
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -0800988 int block_size = dev->dev_attrib.block_size;
989
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400990 if (dev->export_count) {
Roland Dreier015487b2012-02-13 16:18:17 -0800991 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400992 " fabric_max_sectors while export_count is %d\n",
993 dev, dev->export_count);
Roland Dreier015487b2012-02-13 16:18:17 -0800994 return -EINVAL;
995 }
996 if (!fabric_max_sectors) {
997 pr_err("dev[%p]: Illegal ZERO value for"
998 " fabric_max_sectors\n", dev);
999 return -EINVAL;
1000 }
1001 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1002 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1003 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1004 DA_STATUS_MAX_SECTORS_MIN);
1005 return -EINVAL;
1006 }
1007 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001008 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
Roland Dreier015487b2012-02-13 16:18:17 -08001009 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1010 " greater than TCM/SE_Device max_sectors:"
1011 " %u\n", dev, fabric_max_sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001012 dev->dev_attrib.hw_max_sectors);
Roland Dreier015487b2012-02-13 16:18:17 -08001013 return -EINVAL;
1014 }
1015 } else {
1016 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1017 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1018 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1019 " %u\n", dev, fabric_max_sectors,
1020 DA_STATUS_MAX_SECTORS_MAX);
1021 return -EINVAL;
1022 }
1023 }
1024 /*
1025 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1026 */
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -08001027 if (!block_size) {
1028 block_size = 512;
1029 pr_warn("Defaulting to 512 for zero block_size\n");
1030 }
Roland Dreier015487b2012-02-13 16:18:17 -08001031 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -08001032 block_size);
Roland Dreier015487b2012-02-13 16:18:17 -08001033
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001034 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
Roland Dreier015487b2012-02-13 16:18:17 -08001035 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1036 dev, fabric_max_sectors);
1037 return 0;
1038}
1039
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001040int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1041{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001042 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001043 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001044 " optimal_sectors while export_count is %d\n",
1045 dev, dev->export_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001046 return -EINVAL;
1047 }
Andy Grovere3d6f902011-07-19 08:55:10 +00001048 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001049 pr_err("dev[%p]: Passed optimal_sectors cannot be"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001050 " changed for TCM/pSCSI\n", dev);
1051 return -EINVAL;
1052 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001053 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
Andy Grover6708bb22011-06-08 10:36:43 -07001054 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
Roland Dreier015487b2012-02-13 16:18:17 -08001055 " greater than fabric_max_sectors: %u\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001056 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001057 return -EINVAL;
1058 }
1059
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001060 dev->dev_attrib.optimal_sectors = optimal_sectors;
Andy Grover6708bb22011-06-08 10:36:43 -07001061 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001062 dev, optimal_sectors);
1063 return 0;
1064}
1065
1066int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1067{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001068 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001069 pr_err("dev[%p]: Unable to change SE Device block_size"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001070 " while export_count is %d\n",
1071 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +00001072 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001073 }
1074
1075 if ((block_size != 512) &&
1076 (block_size != 1024) &&
1077 (block_size != 2048) &&
1078 (block_size != 4096)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001079 pr_err("dev[%p]: Illegal value for block_device: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001080 " for SE device, must be 512, 1024, 2048 or 4096\n",
1081 dev, block_size);
Andy Grovere3d6f902011-07-19 08:55:10 +00001082 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001083 }
1084
Andy Grovere3d6f902011-07-19 08:55:10 +00001085 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001086 pr_err("dev[%p]: Not allowed to change block_size for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001087 " Physical Device, use for Linux/SCSI to change"
1088 " block_size for underlying hardware\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +00001089 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001090 }
1091
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001092 dev->dev_attrib.block_size = block_size;
Andy Grover6708bb22011-06-08 10:36:43 -07001093 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001094 dev, block_size);
1095 return 0;
1096}
1097
1098struct se_lun *core_dev_add_lun(
1099 struct se_portal_group *tpg,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001100 struct se_device *dev,
1101 u32 lun)
1102{
1103 struct se_lun *lun_p;
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001104 int rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001105
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001106 lun_p = core_tpg_pre_addlun(tpg, lun);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001107 if (IS_ERR(lun_p))
1108 return lun_p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001109
Nicholas Bellinger58d92612012-03-20 21:26:48 -07001110 rc = core_tpg_post_addlun(tpg, lun_p,
1111 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001112 if (rc < 0)
1113 return ERR_PTR(rc);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001114
Andy Grover6708bb22011-06-08 10:36:43 -07001115 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001116 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1117 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
Andy Grover2dca6732012-07-12 17:34:55 -07001118 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001119 /*
1120 * Update LUN maps for dynamically added initiators when
1121 * generate_node_acl is enabled.
1122 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001123 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001124 struct se_node_acl *acl;
Roland Dreier28638882011-08-16 09:40:01 -07001125 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001126 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
Nicholas Bellinger052605c2011-07-26 17:48:43 -07001127 if (acl->dynamic_node_acl &&
1128 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1129 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
Roland Dreier28638882011-08-16 09:40:01 -07001130 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001131 core_tpg_add_node_to_devs(acl, tpg);
Roland Dreier28638882011-08-16 09:40:01 -07001132 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001133 }
1134 }
Roland Dreier28638882011-08-16 09:40:01 -07001135 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001136 }
1137
1138 return lun_p;
1139}
1140
1141/* core_dev_del_lun():
1142 *
1143 *
1144 */
1145int core_dev_del_lun(
1146 struct se_portal_group *tpg,
1147 u32 unpacked_lun)
1148{
1149 struct se_lun *lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001150
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001151 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1152 if (IS_ERR(lun))
1153 return PTR_ERR(lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001154
1155 core_tpg_post_dellun(tpg, lun);
1156
Andy Grover6708bb22011-06-08 10:36:43 -07001157 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001158 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1159 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1160 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001161
1162 return 0;
1163}
1164
1165struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1166{
1167 struct se_lun *lun;
1168
1169 spin_lock(&tpg->tpg_lun_lock);
1170 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001171 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001172 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001173 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001174 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001175 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001176 spin_unlock(&tpg->tpg_lun_lock);
1177 return NULL;
1178 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001179 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001180
1181 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001182 pr_err("%s Logical Unit Number: %u is not free on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001183 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001184 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1185 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001186 spin_unlock(&tpg->tpg_lun_lock);
1187 return NULL;
1188 }
1189 spin_unlock(&tpg->tpg_lun_lock);
1190
1191 return lun;
1192}
1193
1194/* core_dev_get_lun():
1195 *
1196 *
1197 */
1198static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1199{
1200 struct se_lun *lun;
1201
1202 spin_lock(&tpg->tpg_lun_lock);
1203 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001204 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001205 "_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001206 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001207 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001208 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001209 spin_unlock(&tpg->tpg_lun_lock);
1210 return NULL;
1211 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001212 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001213
1214 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001215 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001216 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001217 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1218 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001219 spin_unlock(&tpg->tpg_lun_lock);
1220 return NULL;
1221 }
1222 spin_unlock(&tpg->tpg_lun_lock);
1223
1224 return lun;
1225}
1226
1227struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1228 struct se_portal_group *tpg,
1229 u32 mapped_lun,
1230 char *initiatorname,
1231 int *ret)
1232{
1233 struct se_lun_acl *lacl;
1234 struct se_node_acl *nacl;
1235
Dan Carpenter60d645a2011-06-15 10:03:05 -07001236 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001237 pr_err("%s InitiatorName exceeds maximum size.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001238 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001239 *ret = -EOVERFLOW;
1240 return NULL;
1241 }
1242 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
Andy Grover6708bb22011-06-08 10:36:43 -07001243 if (!nacl) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001244 *ret = -EINVAL;
1245 return NULL;
1246 }
1247 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001248 if (!lacl) {
1249 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001250 *ret = -ENOMEM;
1251 return NULL;
1252 }
1253
1254 INIT_LIST_HEAD(&lacl->lacl_list);
1255 lacl->mapped_lun = mapped_lun;
1256 lacl->se_lun_nacl = nacl;
1257 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1258
1259 return lacl;
1260}
1261
1262int core_dev_add_initiator_node_lun_acl(
1263 struct se_portal_group *tpg,
1264 struct se_lun_acl *lacl,
1265 u32 unpacked_lun,
1266 u32 lun_access)
1267{
1268 struct se_lun *lun;
1269 struct se_node_acl *nacl;
1270
1271 lun = core_dev_get_lun(tpg, unpacked_lun);
Andy Grover6708bb22011-06-08 10:36:43 -07001272 if (!lun) {
1273 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001274 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001275 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1276 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001277 return -EINVAL;
1278 }
1279
1280 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001281 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001282 return -EINVAL;
1283
1284 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1285 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1286 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1287
1288 lacl->se_lun = lun;
1289
Andy Grovere80ac6c2012-07-12 17:34:58 -07001290 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1291 lun_access, nacl, tpg) < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001292 return -EINVAL;
1293
1294 spin_lock(&lun->lun_acl_lock);
1295 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1296 atomic_inc(&lun->lun_acl_count);
1297 smp_mb__after_atomic_inc();
1298 spin_unlock(&lun->lun_acl_lock);
1299
Andy Grover6708bb22011-06-08 10:36:43 -07001300 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
Andy Grovere3d6f902011-07-19 08:55:10 +00001301 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1302 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001303 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1304 lacl->initiatorname);
1305 /*
1306 * Check to see if there are any existing persistent reservation APTPL
1307 * pre-registrations that need to be enabled for this LUN ACL..
1308 */
1309 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1310 return 0;
1311}
1312
1313/* core_dev_del_initiator_node_lun_acl():
1314 *
1315 *
1316 */
1317int core_dev_del_initiator_node_lun_acl(
1318 struct se_portal_group *tpg,
1319 struct se_lun *lun,
1320 struct se_lun_acl *lacl)
1321{
1322 struct se_node_acl *nacl;
1323
1324 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001325 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001326 return -EINVAL;
1327
1328 spin_lock(&lun->lun_acl_lock);
1329 list_del(&lacl->lacl_list);
1330 atomic_dec(&lun->lun_acl_count);
1331 smp_mb__after_atomic_dec();
1332 spin_unlock(&lun->lun_acl_lock);
1333
Andy Grovere80ac6c2012-07-12 17:34:58 -07001334 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1335 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001336
1337 lacl->se_lun = NULL;
1338
Andy Grover6708bb22011-06-08 10:36:43 -07001339 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001340 " InitiatorNode: %s Mapped LUN: %u\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001341 tpg->se_tpg_tfo->get_fabric_name(),
1342 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001343 lacl->initiatorname, lacl->mapped_lun);
1344
1345 return 0;
1346}
1347
1348void core_dev_free_initiator_node_lun_acl(
1349 struct se_portal_group *tpg,
1350 struct se_lun_acl *lacl)
1351{
Andy Grover6708bb22011-06-08 10:36:43 -07001352 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
Andy Grovere3d6f902011-07-19 08:55:10 +00001353 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1354 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1355 tpg->se_tpg_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001356 lacl->initiatorname, lacl->mapped_lun);
1357
1358 kfree(lacl);
1359}
1360
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001361static void scsi_dump_inquiry(struct se_device *dev)
1362{
1363 struct t10_wwn *wwn = &dev->t10_wwn;
1364 char buf[17];
1365 int i, device_type;
1366 /*
1367 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1368 */
1369 for (i = 0; i < 8; i++)
1370 if (wwn->vendor[i] >= 0x20)
1371 buf[i] = wwn->vendor[i];
1372 else
1373 buf[i] = ' ';
1374 buf[i] = '\0';
1375 pr_debug(" Vendor: %s\n", buf);
1376
1377 for (i = 0; i < 16; i++)
1378 if (wwn->model[i] >= 0x20)
1379 buf[i] = wwn->model[i];
1380 else
1381 buf[i] = ' ';
1382 buf[i] = '\0';
1383 pr_debug(" Model: %s\n", buf);
1384
1385 for (i = 0; i < 4; i++)
1386 if (wwn->revision[i] >= 0x20)
1387 buf[i] = wwn->revision[i];
1388 else
1389 buf[i] = ' ';
1390 buf[i] = '\0';
1391 pr_debug(" Revision: %s\n", buf);
1392
1393 device_type = dev->transport->get_device_type(dev);
1394 pr_debug(" Type: %s ", scsi_device_type(device_type));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001395}
1396
1397struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1398{
1399 struct se_device *dev;
1400
1401 dev = hba->transport->alloc_device(hba, name);
1402 if (!dev)
1403 return NULL;
1404
Nicholas Bellinger0ff87542012-12-04 23:43:57 -08001405 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001406 dev->se_hba = hba;
1407 dev->transport = hba->transport;
1408
1409 INIT_LIST_HEAD(&dev->dev_list);
1410 INIT_LIST_HEAD(&dev->dev_sep_list);
1411 INIT_LIST_HEAD(&dev->dev_tmr_list);
1412 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1413 INIT_LIST_HEAD(&dev->state_list);
1414 INIT_LIST_HEAD(&dev->qf_cmd_list);
1415 spin_lock_init(&dev->stats_lock);
1416 spin_lock_init(&dev->execute_task_lock);
1417 spin_lock_init(&dev->delayed_cmd_lock);
1418 spin_lock_init(&dev->dev_reservation_lock);
1419 spin_lock_init(&dev->se_port_lock);
1420 spin_lock_init(&dev->se_tmr_lock);
1421 spin_lock_init(&dev->qf_cmd_lock);
1422 atomic_set(&dev->dev_ordered_id, 0);
1423 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1424 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1425 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1426 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1427 spin_lock_init(&dev->t10_pr.registration_lock);
1428 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1429 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1430 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1431
1432 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1433 dev->t10_wwn.t10_dev = dev;
1434 dev->t10_alua.t10_dev = dev;
1435
1436 dev->dev_attrib.da_dev = dev;
Tregaron Baylyadfa9572013-01-31 15:30:24 -07001437 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001438 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1439 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1440 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1441 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1442 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1443 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1444 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1445 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001446 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1447 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1448 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1449 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1450 dev->dev_attrib.max_unmap_block_desc_count =
1451 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1452 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1453 dev->dev_attrib.unmap_granularity_alignment =
1454 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -08001455 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001456 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1457 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1458
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001459 return dev;
1460}
1461
1462int target_configure_device(struct se_device *dev)
1463{
1464 struct se_hba *hba = dev->se_hba;
1465 int ret;
1466
1467 if (dev->dev_flags & DF_CONFIGURED) {
1468 pr_err("se_dev->se_dev_ptr already set for storage"
1469 " object\n");
1470 return -EEXIST;
1471 }
1472
1473 ret = dev->transport->configure_device(dev);
1474 if (ret)
1475 goto out;
1476 dev->dev_flags |= DF_CONFIGURED;
1477
1478 /*
1479 * XXX: there is not much point to have two different values here..
1480 */
1481 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1482 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1483
1484 /*
1485 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1486 */
1487 dev->dev_attrib.hw_max_sectors =
1488 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1489 dev->dev_attrib.hw_block_size);
1490
1491 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1492 dev->creation_time = get_jiffies_64();
1493
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001494 ret = core_setup_alua(dev);
1495 if (ret)
1496 goto out;
1497
1498 /*
1499 * Startup the struct se_device processing thread
1500 */
1501 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1502 dev->transport->name);
1503 if (!dev->tmr_wq) {
1504 pr_err("Unable to create tmr workqueue for %s\n",
1505 dev->transport->name);
1506 ret = -ENOMEM;
1507 goto out_free_alua;
1508 }
1509
1510 /*
1511 * Setup work_queue for QUEUE_FULL
1512 */
1513 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1514
1515 /*
1516 * Preload the initial INQUIRY const values if we are doing
1517 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1518 * passthrough because this is being provided by the backend LLD.
1519 */
1520 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1521 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1522 strncpy(&dev->t10_wwn.model[0],
1523 dev->transport->inquiry_prod, 16);
1524 strncpy(&dev->t10_wwn.revision[0],
1525 dev->transport->inquiry_rev, 4);
1526 }
1527
1528 scsi_dump_inquiry(dev);
1529
1530 spin_lock(&hba->device_lock);
1531 hba->dev_count++;
1532 spin_unlock(&hba->device_lock);
1533 return 0;
1534
1535out_free_alua:
1536 core_alua_free_lu_gp_mem(dev);
1537out:
1538 se_release_vpd_for_dev(dev);
1539 return ret;
1540}
1541
1542void target_free_device(struct se_device *dev)
1543{
1544 struct se_hba *hba = dev->se_hba;
1545
1546 WARN_ON(!list_empty(&dev->dev_sep_list));
1547
1548 if (dev->dev_flags & DF_CONFIGURED) {
1549 destroy_workqueue(dev->tmr_wq);
1550
1551 spin_lock(&hba->device_lock);
1552 hba->dev_count--;
1553 spin_unlock(&hba->device_lock);
1554 }
1555
1556 core_alua_free_lu_gp_mem(dev);
1557 core_scsi3_free_all_registrations(dev);
1558 se_release_vpd_for_dev(dev);
1559
1560 dev->transport->free_device(dev);
1561}
1562
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001563int core_dev_setup_virtual_lun0(void)
1564{
1565 struct se_hba *hba;
1566 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001567 char buf[16];
1568 int ret;
1569
Andy Grover6708bb22011-06-08 10:36:43 -07001570 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001571 if (IS_ERR(hba))
1572 return PTR_ERR(hba);
1573
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001574 dev = target_alloc_device(hba, "virt_lun0");
1575 if (!dev) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001576 ret = -ENOMEM;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001577 goto out_free_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001578 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001579
1580 memset(buf, 0, 16);
1581 sprintf(buf, "rd_pages=8");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001582 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001583
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001584 ret = target_configure_device(dev);
1585 if (ret)
1586 goto out_free_se_dev;
1587
1588 lun0_hba = hba;
Andy Grovere3d6f902011-07-19 08:55:10 +00001589 g_lun0_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001590 return 0;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001591
1592out_free_se_dev:
1593 target_free_device(dev);
1594out_free_hba:
1595 core_delete_hba(hba);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001596 return ret;
1597}
1598
1599
1600void core_dev_release_virtual_lun0(void)
1601{
Andy Grovere3d6f902011-07-19 08:55:10 +00001602 struct se_hba *hba = lun0_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001603
Andy Grover6708bb22011-06-08 10:36:43 -07001604 if (!hba)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001605 return;
1606
Andy Grovere3d6f902011-07-19 08:55:10 +00001607 if (g_lun0_dev)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001608 target_free_device(g_lun0_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001609 core_delete_hba(hba);
1610}