blob: 3c3a3019ce7ba5283861e29e77f36a0bfca43be6 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
Andy Grovere3d6f902011-07-19 08:55:10 +00004 * This file contains the TCM Virtual Device and Disk Transport
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08005 * agnostic related functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/net.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080036#include <linux/kthread.h>
37#include <linux/in.h>
Paul Gortmakerc53181a2011-08-30 18:16:43 -040038#include <linux/export.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080039#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi.h>
Nicholas Bellinger1078da12011-05-19 20:19:13 -070042#include <scsi/scsi_device.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080043
44#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050045#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047
Christoph Hellwige26d99a2011-11-14 12:30:30 -050048#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080049#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080050#include "target_core_pr.h"
51#include "target_core_ua.h"
52
Andy Grovere3d6f902011-07-19 08:55:10 +000053static struct se_hba *lun0_hba;
Andy Grovere3d6f902011-07-19 08:55:10 +000054/* not static, needed by tpg.c */
55struct se_device *g_lun0_dev;
56
Andy Grover5951146d2011-07-19 10:26:37 +000057int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080058{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080059 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +000060 struct se_session *se_sess = se_cmd->se_sess;
Andy Grover5951146d2011-07-19 10:26:37 +000061 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080062 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080063
Fubo Chend8144952011-02-13 15:13:42 -080064 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
65 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
66 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
Andy Grovere3d6f902011-07-19 08:55:10 +000067 return -ENODEV;
Fubo Chend8144952011-02-13 15:13:42 -080068 }
69
Roland Dreier78faae32011-07-20 09:09:10 +000070 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -040071 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +000072 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
73 struct se_dev_entry *deve = se_cmd->se_deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080074
Andy Grover5951146d2011-07-19 10:26:37 +000075 deve->total_cmds++;
76 deve->total_bytes += se_cmd->data_length;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080077
Andy Grover5951146d2011-07-19 10:26:37 +000078 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
79 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080080 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
81 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
Andy Grover6708bb22011-06-08 10:36:43 -070082 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080083 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +000084 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080085 unpacked_lun);
Roland Dreier78faae32011-07-20 09:09:10 +000086 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Andy Grovere3d6f902011-07-19 08:55:10 +000087 return -EACCES;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080088 }
Andy Grover5951146d2011-07-19 10:26:37 +000089
90 if (se_cmd->data_direction == DMA_TO_DEVICE)
91 deve->write_bytes += se_cmd->data_length;
92 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
93 deve->read_bytes += se_cmd->data_length;
94
95 deve->deve_cmds++;
96
97 se_lun = deve->se_lun;
98 se_cmd->se_lun = deve->se_lun;
99 se_cmd->pr_res_key = deve->pr_res_key;
100 se_cmd->orig_fe_lun = unpacked_lun;
Andy Grover5951146d2011-07-19 10:26:37 +0000101 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
102 }
Roland Dreier78faae32011-07-20 09:09:10 +0000103 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000104
105 if (!se_lun) {
106 /*
107 * Use the se_portal_group->tpg_virt_lun0 to allow for
108 * REPORT_LUNS, et al to be returned when no active
109 * MappedLUN=0 exists for this Initiator Port.
110 */
111 if (unpacked_lun != 0) {
112 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
113 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
Andy Grover6708bb22011-06-08 10:36:43 -0700114 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Andy Grover5951146d2011-07-19 10:26:37 +0000115 " Access for 0x%08x\n",
116 se_cmd->se_tfo->get_fabric_name(),
117 unpacked_lun);
118 return -ENODEV;
119 }
120 /*
121 * Force WRITE PROTECT for virtual LUN 0
122 */
123 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
124 (se_cmd->data_direction != DMA_NONE)) {
125 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
126 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
127 return -EACCES;
128 }
129
130 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
131 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
132 se_cmd->orig_fe_lun = 0;
Andy Grover5951146d2011-07-19 10:26:37 +0000133 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800134 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800135
Andy Grover5951146d2011-07-19 10:26:37 +0000136 /* Directly associate cmd with se_dev */
137 se_cmd->se_dev = se_lun->lun_se_dev;
138
139 /* TODO: get rid of this and use atomics for stats */
140 dev = se_lun->lun_se_dev;
Roland Dreier78faae32011-07-20 09:09:10 +0000141 spin_lock_irqsave(&dev->stats_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800142 dev->num_cmds++;
143 if (se_cmd->data_direction == DMA_TO_DEVICE)
144 dev->write_bytes += se_cmd->data_length;
145 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
146 dev->read_bytes += se_cmd->data_length;
Roland Dreier78faae32011-07-20 09:09:10 +0000147 spin_unlock_irqrestore(&dev->stats_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800148
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800149 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000150 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800151 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
152
153 return 0;
154}
Andy Grover5951146d2011-07-19 10:26:37 +0000155EXPORT_SYMBOL(transport_lookup_cmd_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800156
Andy Grover5951146d2011-07-19 10:26:37 +0000157int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800158{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800159 struct se_dev_entry *deve;
160 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +0000161 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800162 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
Roland Dreier5e1be912011-07-20 09:28:56 +0000163 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800164
Fubo Chend8144952011-02-13 15:13:42 -0800165 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
166 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
167 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
Andy Grovere3d6f902011-07-19 08:55:10 +0000168 return -ENODEV;
Fubo Chend8144952011-02-13 15:13:42 -0800169 }
170
Roland Dreier5e1be912011-07-20 09:28:56 +0000171 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -0400172 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +0000173 deve = se_cmd->se_deve;
174
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800175 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
Andy Grover5951146d2011-07-19 10:26:37 +0000176 se_tmr->tmr_lun = deve->se_lun;
177 se_cmd->se_lun = deve->se_lun;
178 se_lun = deve->se_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800179 se_cmd->pr_res_key = deve->pr_res_key;
180 se_cmd->orig_fe_lun = unpacked_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800181 }
Roland Dreier5e1be912011-07-20 09:28:56 +0000182 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800183
184 if (!se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700185 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800186 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000187 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800188 unpacked_lun);
189 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
Andy Grovere3d6f902011-07-19 08:55:10 +0000190 return -ENODEV;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800191 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800192
Andy Grover5951146d2011-07-19 10:26:37 +0000193 /* Directly associate cmd with se_dev */
194 se_cmd->se_dev = se_lun->lun_se_dev;
195 se_tmr->tmr_dev = se_lun->lun_se_dev;
196
Roland Dreier5e1be912011-07-20 09:28:56 +0000197 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000198 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
Roland Dreier5e1be912011-07-20 09:28:56 +0000199 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800200
201 return 0;
202}
Andy Grover5951146d2011-07-19 10:26:37 +0000203EXPORT_SYMBOL(transport_lookup_tmr_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800204
205/*
206 * This function is called from core_scsi3_emulate_pro_register_and_move()
207 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
208 * when a matching rtpi is found.
209 */
210struct se_dev_entry *core_get_se_deve_from_rtpi(
211 struct se_node_acl *nacl,
212 u16 rtpi)
213{
214 struct se_dev_entry *deve;
215 struct se_lun *lun;
216 struct se_port *port;
217 struct se_portal_group *tpg = nacl->se_tpg;
218 u32 i;
219
220 spin_lock_irq(&nacl->device_list_lock);
221 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400222 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800223
224 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
225 continue;
226
227 lun = deve->se_lun;
Andy Grover6708bb22011-06-08 10:36:43 -0700228 if (!lun) {
229 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800230 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000231 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800232 continue;
233 }
234 port = lun->lun_sep;
Andy Grover6708bb22011-06-08 10:36:43 -0700235 if (!port) {
236 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800237 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000238 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800239 continue;
240 }
241 if (port->sep_rtpi != rtpi)
242 continue;
243
244 atomic_inc(&deve->pr_ref_count);
245 smp_mb__after_atomic_inc();
246 spin_unlock_irq(&nacl->device_list_lock);
247
248 return deve;
249 }
250 spin_unlock_irq(&nacl->device_list_lock);
251
252 return NULL;
253}
254
255int core_free_device_list_for_node(
256 struct se_node_acl *nacl,
257 struct se_portal_group *tpg)
258{
259 struct se_dev_entry *deve;
260 struct se_lun *lun;
261 u32 i;
262
263 if (!nacl->device_list)
264 return 0;
265
266 spin_lock_irq(&nacl->device_list_lock);
267 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400268 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800269
270 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
271 continue;
272
273 if (!deve->se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700274 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800275 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000276 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800277 continue;
278 }
279 lun = deve->se_lun;
280
281 spin_unlock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700282 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
283 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800284 spin_lock_irq(&nacl->device_list_lock);
285 }
286 spin_unlock_irq(&nacl->device_list_lock);
287
Jörn Engelf2083242012-03-15 15:05:40 -0400288 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800289 nacl->device_list = NULL;
290
291 return 0;
292}
293
294void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
295{
296 struct se_dev_entry *deve;
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100297 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800298
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100299 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -0400300 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800301 deve->deve_cmds--;
Sebastian Andrzej Siewior1dd0a062012-01-10 14:16:58 +0100302 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800303}
304
305void core_update_device_list_access(
306 u32 mapped_lun,
307 u32 lun_access,
308 struct se_node_acl *nacl)
309{
310 struct se_dev_entry *deve;
311
312 spin_lock_irq(&nacl->device_list_lock);
Jörn Engelf2083242012-03-15 15:05:40 -0400313 deve = nacl->device_list[mapped_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800314 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
315 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
316 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
317 } else {
318 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
319 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
320 }
321 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800322}
323
Andy Grovere80ac6c2012-07-12 17:34:58 -0700324/* core_enable_device_list_for_node():
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800325 *
326 *
327 */
Andy Grovere80ac6c2012-07-12 17:34:58 -0700328int core_enable_device_list_for_node(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800329 struct se_lun *lun,
330 struct se_lun_acl *lun_acl,
331 u32 mapped_lun,
332 u32 lun_access,
333 struct se_node_acl *nacl,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700334 struct se_portal_group *tpg)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800335{
336 struct se_port *port = lun->lun_sep;
Andy Grovere80ac6c2012-07-12 17:34:58 -0700337 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800338
339 spin_lock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700340
341 deve = nacl->device_list[mapped_lun];
342
343 /*
344 * Check if the call is handling demo mode -> explict LUN ACL
345 * transition. This transition must be for the same struct se_lun
346 * + mapped_lun that was setup in demo mode..
347 */
348 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
349 if (deve->se_lun_acl != NULL) {
350 pr_err("struct se_dev_entry->se_lun_acl"
351 " already set for demo mode -> explict"
352 " LUN ACL transition\n");
353 spin_unlock_irq(&nacl->device_list_lock);
354 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800355 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700356 if (deve->se_lun != lun) {
357 pr_err("struct se_dev_entry->se_lun does"
358 " match passed struct se_lun for demo mode"
359 " -> explict LUN ACL transition\n");
360 spin_unlock_irq(&nacl->device_list_lock);
361 return -EINVAL;
362 }
363 deve->se_lun_acl = lun_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800364
365 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
366 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
367 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
368 } else {
369 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
370 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
371 }
372
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800373 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800374 return 0;
375 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700376
377 deve->se_lun = lun;
378 deve->se_lun_acl = lun_acl;
379 deve->mapped_lun = mapped_lun;
380 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
381
382 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
383 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
384 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
385 } else {
386 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
387 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
388 }
389
390 deve->creation_time = get_jiffies_64();
391 deve->attach_count++;
392 spin_unlock_irq(&nacl->device_list_lock);
393
394 spin_lock_bh(&port->sep_alua_lock);
395 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
396 spin_unlock_bh(&port->sep_alua_lock);
397
398 return 0;
399}
400
401/* core_disable_device_list_for_node():
402 *
403 *
404 */
405int core_disable_device_list_for_node(
406 struct se_lun *lun,
407 struct se_lun_acl *lun_acl,
408 u32 mapped_lun,
409 u32 lun_access,
410 struct se_node_acl *nacl,
411 struct se_portal_group *tpg)
412{
413 struct se_port *port = lun->lun_sep;
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700414 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
Andy Grovere80ac6c2012-07-12 17:34:58 -0700415
416 /*
417 * If the MappedLUN entry is being disabled, the entry in
418 * port->sep_alua_list must be removed now before clearing the
419 * struct se_dev_entry pointers below as logic in
420 * core_alua_do_transition_tg_pt() depends on these being present.
421 *
422 * deve->se_lun_acl will be NULL for demo-mode created LUNs
423 * that have not been explicitly converted to MappedLUNs ->
424 * struct se_lun_acl, but we remove deve->alua_port_list from
425 * port->sep_alua_list. This also means that active UAs and
426 * NodeACL context specific PR metadata for demo-mode
427 * MappedLUN *deve will be released below..
428 */
429 spin_lock_bh(&port->sep_alua_lock);
430 list_del(&deve->alua_port_list);
431 spin_unlock_bh(&port->sep_alua_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800432 /*
433 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
434 * PR operation to complete.
435 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800436 while (atomic_read(&deve->pr_ref_count) != 0)
437 cpu_relax();
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700438
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800439 spin_lock_irq(&nacl->device_list_lock);
440 /*
441 * Disable struct se_dev_entry LUN ACL mapping
442 */
443 core_scsi3_ua_release_all(deve);
444 deve->se_lun = NULL;
445 deve->se_lun_acl = NULL;
446 deve->lun_flags = 0;
447 deve->creation_time = 0;
448 deve->attach_count--;
449 spin_unlock_irq(&nacl->device_list_lock);
450
451 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
452 return 0;
453}
454
455/* core_clear_lun_from_tpg():
456 *
457 *
458 */
459void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
460{
461 struct se_node_acl *nacl;
462 struct se_dev_entry *deve;
463 u32 i;
464
Roland Dreier28638882011-08-16 09:40:01 -0700465 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800466 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
Roland Dreier28638882011-08-16 09:40:01 -0700467 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800468
469 spin_lock_irq(&nacl->device_list_lock);
470 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400471 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800472 if (lun != deve->se_lun)
473 continue;
474 spin_unlock_irq(&nacl->device_list_lock);
475
Andy Grovere80ac6c2012-07-12 17:34:58 -0700476 core_disable_device_list_for_node(lun, NULL,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800477 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700478 nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800479
480 spin_lock_irq(&nacl->device_list_lock);
481 }
482 spin_unlock_irq(&nacl->device_list_lock);
483
Roland Dreier28638882011-08-16 09:40:01 -0700484 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800485 }
Roland Dreier28638882011-08-16 09:40:01 -0700486 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800487}
488
489static struct se_port *core_alloc_port(struct se_device *dev)
490{
491 struct se_port *port, *port_tmp;
492
493 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700494 if (!port) {
495 pr_err("Unable to allocate struct se_port\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000496 return ERR_PTR(-ENOMEM);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800497 }
498 INIT_LIST_HEAD(&port->sep_alua_list);
499 INIT_LIST_HEAD(&port->sep_list);
500 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
501 spin_lock_init(&port->sep_alua_lock);
502 mutex_init(&port->sep_tg_pt_md_mutex);
503
504 spin_lock(&dev->se_port_lock);
505 if (dev->dev_port_count == 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -0700506 pr_warn("Reached dev->dev_port_count =="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800507 " 0x0000ffff\n");
508 spin_unlock(&dev->se_port_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +0000509 return ERR_PTR(-ENOSPC);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800510 }
511again:
512 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900513 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800514 * Here is the table from spc4r17 section 7.7.3.8.
515 *
516 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
517 *
518 * Code Description
519 * 0h Reserved
520 * 1h Relative port 1, historically known as port A
521 * 2h Relative port 2, historically known as port B
522 * 3h to FFFFh Relative port 3 through 65 535
523 */
524 port->sep_rtpi = dev->dev_rpti_counter++;
Andy Grover6708bb22011-06-08 10:36:43 -0700525 if (!port->sep_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800526 goto again;
527
528 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
529 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900530 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800531 * for 16-bit wrap..
532 */
533 if (port->sep_rtpi == port_tmp->sep_rtpi)
534 goto again;
535 }
536 spin_unlock(&dev->se_port_lock);
537
538 return port;
539}
540
541static void core_export_port(
542 struct se_device *dev,
543 struct se_portal_group *tpg,
544 struct se_port *port,
545 struct se_lun *lun)
546{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800547 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
548
549 spin_lock(&dev->se_port_lock);
550 spin_lock(&lun->lun_sep_lock);
551 port->sep_tpg = tpg;
552 port->sep_lun = lun;
553 lun->lun_sep = port;
554 spin_unlock(&lun->lun_sep_lock);
555
556 list_add_tail(&port->sep_list, &dev->dev_sep_list);
557 spin_unlock(&dev->se_port_lock);
558
Christoph Hellwigc87fbd52012-10-10 17:37:16 -0400559 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
560 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800561 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
562 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
Andy Grover6708bb22011-06-08 10:36:43 -0700563 pr_err("Unable to allocate t10_alua_tg_pt"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800564 "_gp_member_t\n");
565 return;
566 }
567 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
568 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400569 dev->t10_alua.default_tg_pt_gp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800570 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
Andy Grover6708bb22011-06-08 10:36:43 -0700571 pr_debug("%s/%s: Adding to default ALUA Target Port"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800572 " Group: alua/default_tg_pt_gp\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000573 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800574 }
575
576 dev->dev_port_count++;
Masanari Iida35d1efe2012-08-16 22:43:13 +0900577 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800578}
579
580/*
581 * Called with struct se_device->se_port_lock spinlock held.
582 */
583static void core_release_port(struct se_device *dev, struct se_port *port)
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700584 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800585{
586 /*
587 * Wait for any port reference for PR ALL_TG_PT=1 operation
588 * to complete in __core_scsi3_alloc_registration()
589 */
590 spin_unlock(&dev->se_port_lock);
591 if (atomic_read(&port->sep_tg_pt_ref_cnt))
592 cpu_relax();
593 spin_lock(&dev->se_port_lock);
594
595 core_alua_free_tg_pt_gp_mem(port);
596
597 list_del(&port->sep_list);
598 dev->dev_port_count--;
599 kfree(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800600}
601
602int core_dev_export(
603 struct se_device *dev,
604 struct se_portal_group *tpg,
605 struct se_lun *lun)
606{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400607 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800608 struct se_port *port;
609
610 port = core_alloc_port(dev);
Andy Grovere3d6f902011-07-19 08:55:10 +0000611 if (IS_ERR(port))
612 return PTR_ERR(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800613
614 lun->lun_se_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800615
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400616 spin_lock(&hba->device_lock);
617 dev->export_count++;
618 spin_unlock(&hba->device_lock);
619
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800620 core_export_port(dev, tpg, port, lun);
621 return 0;
622}
623
624void core_dev_unexport(
625 struct se_device *dev,
626 struct se_portal_group *tpg,
627 struct se_lun *lun)
628{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400629 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800630 struct se_port *port = lun->lun_sep;
631
632 spin_lock(&lun->lun_sep_lock);
633 if (lun->lun_se_dev == NULL) {
634 spin_unlock(&lun->lun_sep_lock);
635 return;
636 }
637 spin_unlock(&lun->lun_sep_lock);
638
639 spin_lock(&dev->se_port_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800640 core_release_port(dev, port);
641 spin_unlock(&dev->se_port_lock);
642
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400643 spin_lock(&hba->device_lock);
644 dev->export_count--;
645 spin_unlock(&hba->device_lock);
646
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800647 lun->lun_se_dev = NULL;
648}
649
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400650static void se_release_vpd_for_dev(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800651{
652 struct t10_vpd *vpd, *vpd_tmp;
653
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400654 spin_lock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800655 list_for_each_entry_safe(vpd, vpd_tmp,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400656 &dev->t10_wwn.t10_vpd_list, vpd_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800657 list_del(&vpd->vpd_list);
658 kfree(vpd);
659 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400660 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800661}
662
Roland Dreierc8045372012-07-16 15:17:12 -0700663static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700664{
Roland Dreier3e039892012-10-31 09:16:45 -0700665 u32 aligned_max_sectors;
666 u32 alignment;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700667 /*
668 * Limit max_sectors to a PAGE_SIZE aligned value for modern
669 * transport_allocate_data_tasks() operation.
670 */
Roland Dreier3e039892012-10-31 09:16:45 -0700671 alignment = max(1ul, PAGE_SIZE / block_size);
672 aligned_max_sectors = rounddown(max_sectors, alignment);
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700673
Roland Dreier3e039892012-10-31 09:16:45 -0700674 if (max_sectors != aligned_max_sectors)
675 pr_info("Rounding down aligned max_sectors from %u to %u\n",
676 max_sectors, aligned_max_sectors);
677
678 return aligned_max_sectors;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700679}
680
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800681int se_dev_set_max_unmap_lba_count(
682 struct se_device *dev,
683 u32 max_unmap_lba_count)
684{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400685 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700686 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400687 dev, dev->dev_attrib.max_unmap_lba_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800688 return 0;
689}
690
691int se_dev_set_max_unmap_block_desc_count(
692 struct se_device *dev,
693 u32 max_unmap_block_desc_count)
694{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400695 dev->dev_attrib.max_unmap_block_desc_count =
Andy Grovere3d6f902011-07-19 08:55:10 +0000696 max_unmap_block_desc_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700697 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400698 dev, dev->dev_attrib.max_unmap_block_desc_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800699 return 0;
700}
701
702int se_dev_set_unmap_granularity(
703 struct se_device *dev,
704 u32 unmap_granularity)
705{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400706 dev->dev_attrib.unmap_granularity = unmap_granularity;
Andy Grover6708bb22011-06-08 10:36:43 -0700707 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400708 dev, dev->dev_attrib.unmap_granularity);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800709 return 0;
710}
711
712int se_dev_set_unmap_granularity_alignment(
713 struct se_device *dev,
714 u32 unmap_granularity_alignment)
715{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400716 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
Andy Grover6708bb22011-06-08 10:36:43 -0700717 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400718 dev, dev->dev_attrib.unmap_granularity_alignment);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800719 return 0;
720}
721
722int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
723{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400724 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700725 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000726 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800727 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400728
Andy Groverc6388302011-11-30 12:11:50 -0800729 if (flag) {
730 pr_err("dpo_emulated not supported\n");
731 return -EINVAL;
732 }
733
734 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800735}
736
737int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
738{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400739 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700740 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000741 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800742 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400743
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700744 if (flag &&
745 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
746 pr_err("emulate_fua_write not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000747 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800748 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400749 dev->dev_attrib.emulate_fua_write = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700750 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400751 dev, dev->dev_attrib.emulate_fua_write);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800752 return 0;
753}
754
755int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
756{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400757 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700758 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000759 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800760 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400761
Andy Groverc6388302011-11-30 12:11:50 -0800762 if (flag) {
763 pr_err("ua read emulated not supported\n");
764 return -EINVAL;
765 }
766
767 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800768}
769
770int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
771{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400772 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700773 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000774 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800775 }
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700776 if (flag &&
777 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
778 pr_err("emulate_write_cache not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000779 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800780 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400781 dev->dev_attrib.emulate_write_cache = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700782 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400783 dev, dev->dev_attrib.emulate_write_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800784 return 0;
785}
786
787int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
788{
789 if ((flag != 0) && (flag != 1) && (flag != 2)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700790 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000791 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800792 }
793
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400794 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700795 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400796 " UA_INTRLCK_CTRL while export_count is %d\n",
797 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000798 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800799 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400800 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700801 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400802 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800803
804 return 0;
805}
806
807int se_dev_set_emulate_tas(struct se_device *dev, int flag)
808{
809 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700810 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000811 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800812 }
813
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400814 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700815 pr_err("dev[%p]: Unable to change SE Device TAS while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400816 " export_count is %d\n",
817 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000818 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800819 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400820 dev->dev_attrib.emulate_tas = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700821 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400822 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800823
824 return 0;
825}
826
827int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
828{
829 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700830 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000831 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800832 }
833 /*
834 * We expect this value to be non-zero when generic Block Layer
835 * Discard supported is detected iblock_create_virtdevice().
836 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400837 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700838 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800839 return -ENOSYS;
840 }
841
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400842 dev->dev_attrib.emulate_tpu = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700843 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800844 dev, flag);
845 return 0;
846}
847
848int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
849{
850 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700851 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000852 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800853 }
854 /*
855 * We expect this value to be non-zero when generic Block Layer
856 * Discard supported is detected iblock_create_virtdevice().
857 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400858 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700859 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800860 return -ENOSYS;
861 }
862
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400863 dev->dev_attrib.emulate_tpws = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700864 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800865 dev, flag);
866 return 0;
867}
868
869int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
870{
871 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700872 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000873 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800874 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400875 dev->dev_attrib.enforce_pr_isids = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700876 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400877 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800878 return 0;
879}
880
Roland Dreiere22a7f02011-07-05 13:34:52 -0700881int se_dev_set_is_nonrot(struct se_device *dev, int flag)
882{
883 if ((flag != 0) && (flag != 1)) {
884 printk(KERN_ERR "Illegal value %d\n", flag);
885 return -EINVAL;
886 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400887 dev->dev_attrib.is_nonrot = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700888 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
Roland Dreiere22a7f02011-07-05 13:34:52 -0700889 dev, flag);
890 return 0;
891}
892
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700893int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
894{
895 if (flag != 0) {
896 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
897 " reordering not implemented\n", dev);
898 return -ENOSYS;
899 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400900 dev->dev_attrib.emulate_rest_reord = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700901 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
902 return 0;
903}
904
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800905/*
906 * Note, this can only be called on unexported SE Device Object.
907 */
908int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
909{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400910 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700911 pr_err("dev[%p]: Unable to change SE Device TCQ while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400912 " export_count is %d\n",
913 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000914 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800915 }
Andy Grover6708bb22011-06-08 10:36:43 -0700916 if (!queue_depth) {
917 pr_err("dev[%p]: Illegal ZERO value for queue"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800918 "_depth\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +0000919 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800920 }
921
Andy Grovere3d6f902011-07-19 08:55:10 +0000922 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400923 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -0700924 pr_err("dev[%p]: Passed queue_depth: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800925 " exceeds TCM/SE_Device TCQ: %u\n",
926 dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400927 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +0000928 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800929 }
930 } else {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400931 if (queue_depth > dev->dev_attrib.queue_depth) {
932 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -0700933 pr_err("dev[%p]: Passed queue_depth:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800934 " %u exceeds TCM/SE_Device MAX"
935 " TCQ: %u\n", dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400936 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +0000937 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800938 }
939 }
940 }
941
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400942 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
Andy Grover6708bb22011-06-08 10:36:43 -0700943 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800944 dev, queue_depth);
945 return 0;
946}
947
Roland Dreier015487b2012-02-13 16:18:17 -0800948int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
949{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400950 if (dev->export_count) {
Roland Dreier015487b2012-02-13 16:18:17 -0800951 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400952 " fabric_max_sectors while export_count is %d\n",
953 dev, dev->export_count);
Roland Dreier015487b2012-02-13 16:18:17 -0800954 return -EINVAL;
955 }
956 if (!fabric_max_sectors) {
957 pr_err("dev[%p]: Illegal ZERO value for"
958 " fabric_max_sectors\n", dev);
959 return -EINVAL;
960 }
961 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
962 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
963 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
964 DA_STATUS_MAX_SECTORS_MIN);
965 return -EINVAL;
966 }
967 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400968 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
Roland Dreier015487b2012-02-13 16:18:17 -0800969 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
970 " greater than TCM/SE_Device max_sectors:"
971 " %u\n", dev, fabric_max_sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400972 dev->dev_attrib.hw_max_sectors);
Roland Dreier015487b2012-02-13 16:18:17 -0800973 return -EINVAL;
974 }
975 } else {
976 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
977 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
978 " greater than DA_STATUS_MAX_SECTORS_MAX:"
979 " %u\n", dev, fabric_max_sectors,
980 DA_STATUS_MAX_SECTORS_MAX);
981 return -EINVAL;
982 }
983 }
984 /*
985 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
986 */
987 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400988 dev->dev_attrib.block_size);
Roland Dreier015487b2012-02-13 16:18:17 -0800989
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400990 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
Roland Dreier015487b2012-02-13 16:18:17 -0800991 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
992 dev, fabric_max_sectors);
993 return 0;
994}
995
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800996int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
997{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400998 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700999 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001000 " optimal_sectors while export_count is %d\n",
1001 dev, dev->export_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001002 return -EINVAL;
1003 }
Andy Grovere3d6f902011-07-19 08:55:10 +00001004 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001005 pr_err("dev[%p]: Passed optimal_sectors cannot be"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001006 " changed for TCM/pSCSI\n", dev);
1007 return -EINVAL;
1008 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001009 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
Andy Grover6708bb22011-06-08 10:36:43 -07001010 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
Roland Dreier015487b2012-02-13 16:18:17 -08001011 " greater than fabric_max_sectors: %u\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001012 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001013 return -EINVAL;
1014 }
1015
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001016 dev->dev_attrib.optimal_sectors = optimal_sectors;
Andy Grover6708bb22011-06-08 10:36:43 -07001017 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001018 dev, optimal_sectors);
1019 return 0;
1020}
1021
1022int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1023{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001024 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001025 pr_err("dev[%p]: Unable to change SE Device block_size"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001026 " while export_count is %d\n",
1027 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +00001028 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001029 }
1030
1031 if ((block_size != 512) &&
1032 (block_size != 1024) &&
1033 (block_size != 2048) &&
1034 (block_size != 4096)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001035 pr_err("dev[%p]: Illegal value for block_device: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001036 " for SE device, must be 512, 1024, 2048 or 4096\n",
1037 dev, block_size);
Andy Grovere3d6f902011-07-19 08:55:10 +00001038 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001039 }
1040
Andy Grovere3d6f902011-07-19 08:55:10 +00001041 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001042 pr_err("dev[%p]: Not allowed to change block_size for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001043 " Physical Device, use for Linux/SCSI to change"
1044 " block_size for underlying hardware\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +00001045 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001046 }
1047
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001048 dev->dev_attrib.block_size = block_size;
Andy Grover6708bb22011-06-08 10:36:43 -07001049 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001050 dev, block_size);
1051 return 0;
1052}
1053
1054struct se_lun *core_dev_add_lun(
1055 struct se_portal_group *tpg,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001056 struct se_device *dev,
1057 u32 lun)
1058{
1059 struct se_lun *lun_p;
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001060 int rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001061
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001062 lun_p = core_tpg_pre_addlun(tpg, lun);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001063 if (IS_ERR(lun_p))
1064 return lun_p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001065
Nicholas Bellinger58d92612012-03-20 21:26:48 -07001066 rc = core_tpg_post_addlun(tpg, lun_p,
1067 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001068 if (rc < 0)
1069 return ERR_PTR(rc);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001070
Andy Grover6708bb22011-06-08 10:36:43 -07001071 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001072 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1073 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
Andy Grover2dca6732012-07-12 17:34:55 -07001074 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001075 /*
1076 * Update LUN maps for dynamically added initiators when
1077 * generate_node_acl is enabled.
1078 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001079 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001080 struct se_node_acl *acl;
Roland Dreier28638882011-08-16 09:40:01 -07001081 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001082 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
Nicholas Bellinger052605c2011-07-26 17:48:43 -07001083 if (acl->dynamic_node_acl &&
1084 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1085 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
Roland Dreier28638882011-08-16 09:40:01 -07001086 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001087 core_tpg_add_node_to_devs(acl, tpg);
Roland Dreier28638882011-08-16 09:40:01 -07001088 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001089 }
1090 }
Roland Dreier28638882011-08-16 09:40:01 -07001091 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001092 }
1093
1094 return lun_p;
1095}
1096
1097/* core_dev_del_lun():
1098 *
1099 *
1100 */
1101int core_dev_del_lun(
1102 struct se_portal_group *tpg,
1103 u32 unpacked_lun)
1104{
1105 struct se_lun *lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001106
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001107 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1108 if (IS_ERR(lun))
1109 return PTR_ERR(lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001110
1111 core_tpg_post_dellun(tpg, lun);
1112
Andy Grover6708bb22011-06-08 10:36:43 -07001113 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001114 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1115 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1116 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001117
1118 return 0;
1119}
1120
1121struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1122{
1123 struct se_lun *lun;
1124
1125 spin_lock(&tpg->tpg_lun_lock);
1126 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001127 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001128 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001129 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001130 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001131 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001132 spin_unlock(&tpg->tpg_lun_lock);
1133 return NULL;
1134 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001135 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001136
1137 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001138 pr_err("%s Logical Unit Number: %u is not free on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001139 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001140 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1141 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001142 spin_unlock(&tpg->tpg_lun_lock);
1143 return NULL;
1144 }
1145 spin_unlock(&tpg->tpg_lun_lock);
1146
1147 return lun;
1148}
1149
1150/* core_dev_get_lun():
1151 *
1152 *
1153 */
1154static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1155{
1156 struct se_lun *lun;
1157
1158 spin_lock(&tpg->tpg_lun_lock);
1159 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001160 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001161 "_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001162 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001163 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001164 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001165 spin_unlock(&tpg->tpg_lun_lock);
1166 return NULL;
1167 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001168 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001169
1170 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001171 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001172 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001173 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1174 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001175 spin_unlock(&tpg->tpg_lun_lock);
1176 return NULL;
1177 }
1178 spin_unlock(&tpg->tpg_lun_lock);
1179
1180 return lun;
1181}
1182
1183struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1184 struct se_portal_group *tpg,
1185 u32 mapped_lun,
1186 char *initiatorname,
1187 int *ret)
1188{
1189 struct se_lun_acl *lacl;
1190 struct se_node_acl *nacl;
1191
Dan Carpenter60d645a2011-06-15 10:03:05 -07001192 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001193 pr_err("%s InitiatorName exceeds maximum size.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001194 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001195 *ret = -EOVERFLOW;
1196 return NULL;
1197 }
1198 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
Andy Grover6708bb22011-06-08 10:36:43 -07001199 if (!nacl) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001200 *ret = -EINVAL;
1201 return NULL;
1202 }
1203 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001204 if (!lacl) {
1205 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001206 *ret = -ENOMEM;
1207 return NULL;
1208 }
1209
1210 INIT_LIST_HEAD(&lacl->lacl_list);
1211 lacl->mapped_lun = mapped_lun;
1212 lacl->se_lun_nacl = nacl;
1213 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1214
1215 return lacl;
1216}
1217
1218int core_dev_add_initiator_node_lun_acl(
1219 struct se_portal_group *tpg,
1220 struct se_lun_acl *lacl,
1221 u32 unpacked_lun,
1222 u32 lun_access)
1223{
1224 struct se_lun *lun;
1225 struct se_node_acl *nacl;
1226
1227 lun = core_dev_get_lun(tpg, unpacked_lun);
Andy Grover6708bb22011-06-08 10:36:43 -07001228 if (!lun) {
1229 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001230 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001231 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1232 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001233 return -EINVAL;
1234 }
1235
1236 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001237 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001238 return -EINVAL;
1239
1240 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1241 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1242 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1243
1244 lacl->se_lun = lun;
1245
Andy Grovere80ac6c2012-07-12 17:34:58 -07001246 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1247 lun_access, nacl, tpg) < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001248 return -EINVAL;
1249
1250 spin_lock(&lun->lun_acl_lock);
1251 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1252 atomic_inc(&lun->lun_acl_count);
1253 smp_mb__after_atomic_inc();
1254 spin_unlock(&lun->lun_acl_lock);
1255
Andy Grover6708bb22011-06-08 10:36:43 -07001256 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
Andy Grovere3d6f902011-07-19 08:55:10 +00001257 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1258 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001259 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1260 lacl->initiatorname);
1261 /*
1262 * Check to see if there are any existing persistent reservation APTPL
1263 * pre-registrations that need to be enabled for this LUN ACL..
1264 */
1265 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1266 return 0;
1267}
1268
1269/* core_dev_del_initiator_node_lun_acl():
1270 *
1271 *
1272 */
1273int core_dev_del_initiator_node_lun_acl(
1274 struct se_portal_group *tpg,
1275 struct se_lun *lun,
1276 struct se_lun_acl *lacl)
1277{
1278 struct se_node_acl *nacl;
1279
1280 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001281 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001282 return -EINVAL;
1283
1284 spin_lock(&lun->lun_acl_lock);
1285 list_del(&lacl->lacl_list);
1286 atomic_dec(&lun->lun_acl_count);
1287 smp_mb__after_atomic_dec();
1288 spin_unlock(&lun->lun_acl_lock);
1289
Andy Grovere80ac6c2012-07-12 17:34:58 -07001290 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1291 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001292
1293 lacl->se_lun = NULL;
1294
Andy Grover6708bb22011-06-08 10:36:43 -07001295 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001296 " InitiatorNode: %s Mapped LUN: %u\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001297 tpg->se_tpg_tfo->get_fabric_name(),
1298 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001299 lacl->initiatorname, lacl->mapped_lun);
1300
1301 return 0;
1302}
1303
1304void core_dev_free_initiator_node_lun_acl(
1305 struct se_portal_group *tpg,
1306 struct se_lun_acl *lacl)
1307{
Andy Grover6708bb22011-06-08 10:36:43 -07001308 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
Andy Grovere3d6f902011-07-19 08:55:10 +00001309 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1310 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1311 tpg->se_tpg_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001312 lacl->initiatorname, lacl->mapped_lun);
1313
1314 kfree(lacl);
1315}
1316
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001317static void scsi_dump_inquiry(struct se_device *dev)
1318{
1319 struct t10_wwn *wwn = &dev->t10_wwn;
1320 char buf[17];
1321 int i, device_type;
1322 /*
1323 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1324 */
1325 for (i = 0; i < 8; i++)
1326 if (wwn->vendor[i] >= 0x20)
1327 buf[i] = wwn->vendor[i];
1328 else
1329 buf[i] = ' ';
1330 buf[i] = '\0';
1331 pr_debug(" Vendor: %s\n", buf);
1332
1333 for (i = 0; i < 16; i++)
1334 if (wwn->model[i] >= 0x20)
1335 buf[i] = wwn->model[i];
1336 else
1337 buf[i] = ' ';
1338 buf[i] = '\0';
1339 pr_debug(" Model: %s\n", buf);
1340
1341 for (i = 0; i < 4; i++)
1342 if (wwn->revision[i] >= 0x20)
1343 buf[i] = wwn->revision[i];
1344 else
1345 buf[i] = ' ';
1346 buf[i] = '\0';
1347 pr_debug(" Revision: %s\n", buf);
1348
1349 device_type = dev->transport->get_device_type(dev);
1350 pr_debug(" Type: %s ", scsi_device_type(device_type));
1351 pr_debug(" ANSI SCSI revision: %02x\n",
1352 dev->transport->get_device_rev(dev));
1353}
1354
1355struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1356{
1357 struct se_device *dev;
1358
1359 dev = hba->transport->alloc_device(hba, name);
1360 if (!dev)
1361 return NULL;
1362
1363 dev->se_hba = hba;
1364 dev->transport = hba->transport;
1365
1366 INIT_LIST_HEAD(&dev->dev_list);
1367 INIT_LIST_HEAD(&dev->dev_sep_list);
1368 INIT_LIST_HEAD(&dev->dev_tmr_list);
1369 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1370 INIT_LIST_HEAD(&dev->state_list);
1371 INIT_LIST_HEAD(&dev->qf_cmd_list);
1372 spin_lock_init(&dev->stats_lock);
1373 spin_lock_init(&dev->execute_task_lock);
1374 spin_lock_init(&dev->delayed_cmd_lock);
1375 spin_lock_init(&dev->dev_reservation_lock);
1376 spin_lock_init(&dev->se_port_lock);
1377 spin_lock_init(&dev->se_tmr_lock);
1378 spin_lock_init(&dev->qf_cmd_lock);
1379 atomic_set(&dev->dev_ordered_id, 0);
1380 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1381 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1382 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1383 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1384 spin_lock_init(&dev->t10_pr.registration_lock);
1385 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1386 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1387 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1388
1389 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1390 dev->t10_wwn.t10_dev = dev;
1391 dev->t10_alua.t10_dev = dev;
1392
1393 dev->dev_attrib.da_dev = dev;
1394 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1395 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1396 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1397 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1398 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1399 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1400 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1401 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001402 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1403 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1404 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1405 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1406 dev->dev_attrib.max_unmap_block_desc_count =
1407 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1408 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1409 dev->dev_attrib.unmap_granularity_alignment =
1410 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1411 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1412 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1413
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001414 return dev;
1415}
1416
1417int target_configure_device(struct se_device *dev)
1418{
1419 struct se_hba *hba = dev->se_hba;
1420 int ret;
1421
1422 if (dev->dev_flags & DF_CONFIGURED) {
1423 pr_err("se_dev->se_dev_ptr already set for storage"
1424 " object\n");
1425 return -EEXIST;
1426 }
1427
1428 ret = dev->transport->configure_device(dev);
1429 if (ret)
1430 goto out;
1431 dev->dev_flags |= DF_CONFIGURED;
1432
1433 /*
1434 * XXX: there is not much point to have two different values here..
1435 */
1436 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1437 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1438
1439 /*
1440 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1441 */
1442 dev->dev_attrib.hw_max_sectors =
1443 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1444 dev->dev_attrib.hw_block_size);
1445
1446 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1447 dev->creation_time = get_jiffies_64();
1448
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001449 ret = core_setup_alua(dev);
1450 if (ret)
1451 goto out;
1452
1453 /*
1454 * Startup the struct se_device processing thread
1455 */
1456 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1457 dev->transport->name);
1458 if (!dev->tmr_wq) {
1459 pr_err("Unable to create tmr workqueue for %s\n",
1460 dev->transport->name);
1461 ret = -ENOMEM;
1462 goto out_free_alua;
1463 }
1464
1465 /*
1466 * Setup work_queue for QUEUE_FULL
1467 */
1468 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1469
1470 /*
1471 * Preload the initial INQUIRY const values if we are doing
1472 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1473 * passthrough because this is being provided by the backend LLD.
1474 */
1475 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1476 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1477 strncpy(&dev->t10_wwn.model[0],
1478 dev->transport->inquiry_prod, 16);
1479 strncpy(&dev->t10_wwn.revision[0],
1480 dev->transport->inquiry_rev, 4);
1481 }
1482
1483 scsi_dump_inquiry(dev);
1484
1485 spin_lock(&hba->device_lock);
1486 hba->dev_count++;
1487 spin_unlock(&hba->device_lock);
1488 return 0;
1489
1490out_free_alua:
1491 core_alua_free_lu_gp_mem(dev);
1492out:
1493 se_release_vpd_for_dev(dev);
1494 return ret;
1495}
1496
1497void target_free_device(struct se_device *dev)
1498{
1499 struct se_hba *hba = dev->se_hba;
1500
1501 WARN_ON(!list_empty(&dev->dev_sep_list));
1502
1503 if (dev->dev_flags & DF_CONFIGURED) {
1504 destroy_workqueue(dev->tmr_wq);
1505
1506 spin_lock(&hba->device_lock);
1507 hba->dev_count--;
1508 spin_unlock(&hba->device_lock);
1509 }
1510
1511 core_alua_free_lu_gp_mem(dev);
1512 core_scsi3_free_all_registrations(dev);
1513 se_release_vpd_for_dev(dev);
1514
1515 dev->transport->free_device(dev);
1516}
1517
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001518int core_dev_setup_virtual_lun0(void)
1519{
1520 struct se_hba *hba;
1521 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001522 char buf[16];
1523 int ret;
1524
Andy Grover6708bb22011-06-08 10:36:43 -07001525 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001526 if (IS_ERR(hba))
1527 return PTR_ERR(hba);
1528
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001529 dev = target_alloc_device(hba, "virt_lun0");
1530 if (!dev) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001531 ret = -ENOMEM;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001532 goto out_free_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001533 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001534
1535 memset(buf, 0, 16);
1536 sprintf(buf, "rd_pages=8");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001537 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001538
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001539 ret = target_configure_device(dev);
1540 if (ret)
1541 goto out_free_se_dev;
1542
1543 lun0_hba = hba;
Andy Grovere3d6f902011-07-19 08:55:10 +00001544 g_lun0_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001545 return 0;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001546
1547out_free_se_dev:
1548 target_free_device(dev);
1549out_free_hba:
1550 core_delete_hba(hba);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001551 return ret;
1552}
1553
1554
1555void core_dev_release_virtual_lun0(void)
1556{
Andy Grovere3d6f902011-07-19 08:55:10 +00001557 struct se_hba *hba = lun0_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001558
Andy Grover6708bb22011-06-08 10:36:43 -07001559 if (!hba)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001560 return;
1561
Andy Grovere3d6f902011-07-19 08:55:10 +00001562 if (g_lun0_dev)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001563 target_free_device(g_lun0_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001564 core_delete_hba(hba);
1565}