blob: 98da90167159b198dfc65a8ebdc912b350e4c6f0 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
Andy Grovere3d6f902011-07-19 08:55:10 +00004 * This file contains the TCM Virtual Device and Disk Transport
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08005 * agnostic related functions.
6 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07007 * (c) Copyright 2003-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08008 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/net.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080033#include <linux/kthread.h>
34#include <linux/in.h>
Paul Gortmakerc53181a2011-08-30 18:16:43 -040035#include <linux/export.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080036#include <net/sock.h>
37#include <net/tcp.h>
38#include <scsi/scsi.h>
Nicholas Bellinger1078da12011-05-19 20:19:13 -070039#include <scsi/scsi_device.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080040
41#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050042#include <target/target_core_backend.h>
43#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080044
Christoph Hellwige26d99a2011-11-14 12:30:30 -050045#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080046#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047#include "target_core_pr.h"
48#include "target_core_ua.h"
49
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -070050DEFINE_MUTEX(g_device_mutex);
51LIST_HEAD(g_device_list);
52
Andy Grovere3d6f902011-07-19 08:55:10 +000053static struct se_hba *lun0_hba;
Andy Grovere3d6f902011-07-19 08:55:10 +000054/* not static, needed by tpg.c */
55struct se_device *g_lun0_dev;
56
Christoph Hellwigde103c92012-11-06 12:24:09 -080057sense_reason_t
58transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080059{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080060 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +000061 struct se_session *se_sess = se_cmd->se_sess;
Andy Grover5951146d2011-07-19 10:26:37 +000062 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080063 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080064
Christoph Hellwigde103c92012-11-06 12:24:09 -080065 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
66 return TCM_NON_EXISTENT_LUN;
Fubo Chend8144952011-02-13 15:13:42 -080067
Roland Dreier78faae32011-07-20 09:09:10 +000068 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -040069 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +000070 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
71 struct se_dev_entry *deve = se_cmd->se_deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080072
Andy Grover5951146d2011-07-19 10:26:37 +000073 deve->total_cmds++;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080074
Andy Grover5951146d2011-07-19 10:26:37 +000075 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
Andy Grover6708bb22011-06-08 10:36:43 -070077 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080078 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +000079 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080080 unpacked_lun);
Roland Dreier78faae32011-07-20 09:09:10 +000081 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Christoph Hellwigde103c92012-11-06 12:24:09 -080082 return TCM_WRITE_PROTECTED;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080083 }
Andy Grover5951146d2011-07-19 10:26:37 +000084
85 if (se_cmd->data_direction == DMA_TO_DEVICE)
86 deve->write_bytes += se_cmd->data_length;
87 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
88 deve->read_bytes += se_cmd->data_length;
89
Andy Grover5951146d2011-07-19 10:26:37 +000090 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun;
Andy Grover5951146d2011-07-19 10:26:37 +000094 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -080095
96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true;
Andy Grover5951146d2011-07-19 10:26:37 +000098 }
Roland Dreier78faae32011-07-20 09:09:10 +000099 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000100
101 if (!se_lun) {
102 /*
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
106 */
107 if (unpacked_lun != 0) {
Andy Grover6708bb22011-06-08 10:36:43 -0700108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Andy Grover5951146d2011-07-19 10:26:37 +0000109 " Access for 0x%08x\n",
110 se_cmd->se_tfo->get_fabric_name(),
111 unpacked_lun);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800112 return TCM_NON_EXISTENT_LUN;
Andy Grover5951146d2011-07-19 10:26:37 +0000113 }
114 /*
115 * Force WRITE PROTECT for virtual LUN 0
116 */
117 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
Christoph Hellwigde103c92012-11-06 12:24:09 -0800118 (se_cmd->data_direction != DMA_NONE))
119 return TCM_WRITE_PROTECTED;
Andy Grover5951146d2011-07-19 10:26:37 +0000120
121 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->orig_fe_lun = 0;
Andy Grover5951146d2011-07-19 10:26:37 +0000124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -0800125
126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800128 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800129
Andy Grover5951146d2011-07-19 10:26:37 +0000130 /* Directly associate cmd with se_dev */
131 se_cmd->se_dev = se_lun->lun_se_dev;
132
Andy Grover5951146d2011-07-19 10:26:37 +0000133 dev = se_lun->lun_se_dev;
Nicholas Bellingeree480682013-11-13 18:34:55 -0800134 atomic_long_inc(&dev->num_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800135 if (se_cmd->data_direction == DMA_TO_DEVICE)
Nicholas Bellingeree480682013-11-13 18:34:55 -0800136 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800137 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
Nicholas Bellingeree480682013-11-13 18:34:55 -0800138 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800139
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800140 return 0;
141}
Andy Grover5951146d2011-07-19 10:26:37 +0000142EXPORT_SYMBOL(transport_lookup_cmd_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800143
Andy Grover5951146d2011-07-19 10:26:37 +0000144int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800145{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800146 struct se_dev_entry *deve;
147 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +0000148 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
Roland Dreier5e1be912011-07-20 09:28:56 +0000150 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800151
Christoph Hellwigde103c92012-11-06 12:24:09 -0800152 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
Andy Grovere3d6f902011-07-19 08:55:10 +0000153 return -ENODEV;
Fubo Chend8144952011-02-13 15:13:42 -0800154
Roland Dreier5e1be912011-07-20 09:28:56 +0000155 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
Jörn Engelf2083242012-03-15 15:05:40 -0400156 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
Andy Grover5951146d2011-07-19 10:26:37 +0000157 deve = se_cmd->se_deve;
158
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800159 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
Andy Grover5951146d2011-07-19 10:26:37 +0000160 se_tmr->tmr_lun = deve->se_lun;
161 se_cmd->se_lun = deve->se_lun;
162 se_lun = deve->se_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800163 se_cmd->pr_res_key = deve->pr_res_key;
164 se_cmd->orig_fe_lun = unpacked_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800165 }
Roland Dreier5e1be912011-07-20 09:28:56 +0000166 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800167
168 if (!se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800170 " Access for 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000171 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800172 unpacked_lun);
Andy Grovere3d6f902011-07-19 08:55:10 +0000173 return -ENODEV;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800174 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800175
Andy Grover5951146d2011-07-19 10:26:37 +0000176 /* Directly associate cmd with se_dev */
177 se_cmd->se_dev = se_lun->lun_se_dev;
178 se_tmr->tmr_dev = se_lun->lun_se_dev;
179
Roland Dreier5e1be912011-07-20 09:28:56 +0000180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
Roland Dreier5e1be912011-07-20 09:28:56 +0000182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800183
184 return 0;
185}
Andy Grover5951146d2011-07-19 10:26:37 +0000186EXPORT_SYMBOL(transport_lookup_tmr_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800187
188/*
189 * This function is called from core_scsi3_emulate_pro_register_and_move()
190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
191 * when a matching rtpi is found.
192 */
193struct se_dev_entry *core_get_se_deve_from_rtpi(
194 struct se_node_acl *nacl,
195 u16 rtpi)
196{
197 struct se_dev_entry *deve;
198 struct se_lun *lun;
199 struct se_port *port;
200 struct se_portal_group *tpg = nacl->se_tpg;
201 u32 i;
202
203 spin_lock_irq(&nacl->device_list_lock);
204 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400205 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800206
207 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
208 continue;
209
210 lun = deve->se_lun;
Andy Grover6708bb22011-06-08 10:36:43 -0700211 if (!lun) {
212 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800213 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000214 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800215 continue;
216 }
217 port = lun->lun_sep;
Andy Grover6708bb22011-06-08 10:36:43 -0700218 if (!port) {
219 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800220 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000221 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800222 continue;
223 }
224 if (port->sep_rtpi != rtpi)
225 continue;
226
227 atomic_inc(&deve->pr_ref_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100228 smp_mb__after_atomic();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800229 spin_unlock_irq(&nacl->device_list_lock);
230
231 return deve;
232 }
233 spin_unlock_irq(&nacl->device_list_lock);
234
235 return NULL;
236}
237
238int core_free_device_list_for_node(
239 struct se_node_acl *nacl,
240 struct se_portal_group *tpg)
241{
242 struct se_dev_entry *deve;
243 struct se_lun *lun;
244 u32 i;
245
246 if (!nacl->device_list)
247 return 0;
248
249 spin_lock_irq(&nacl->device_list_lock);
250 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400251 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800252
253 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
254 continue;
255
256 if (!deve->se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700257 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800258 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000259 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800260 continue;
261 }
262 lun = deve->se_lun;
263
264 spin_unlock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700265 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
266 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800267 spin_lock_irq(&nacl->device_list_lock);
268 }
269 spin_unlock_irq(&nacl->device_list_lock);
270
Jörn Engelf2083242012-03-15 15:05:40 -0400271 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800272 nacl->device_list = NULL;
273
274 return 0;
275}
276
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800277void core_update_device_list_access(
278 u32 mapped_lun,
279 u32 lun_access,
280 struct se_node_acl *nacl)
281{
282 struct se_dev_entry *deve;
283
284 spin_lock_irq(&nacl->device_list_lock);
Jörn Engelf2083242012-03-15 15:05:40 -0400285 deve = nacl->device_list[mapped_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800286 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
287 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
288 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
289 } else {
290 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
291 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
292 }
293 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800294}
295
Andy Grovere80ac6c2012-07-12 17:34:58 -0700296/* core_enable_device_list_for_node():
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800297 *
298 *
299 */
Andy Grovere80ac6c2012-07-12 17:34:58 -0700300int core_enable_device_list_for_node(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800301 struct se_lun *lun,
302 struct se_lun_acl *lun_acl,
303 u32 mapped_lun,
304 u32 lun_access,
305 struct se_node_acl *nacl,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700306 struct se_portal_group *tpg)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800307{
308 struct se_port *port = lun->lun_sep;
Andy Grovere80ac6c2012-07-12 17:34:58 -0700309 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800310
311 spin_lock_irq(&nacl->device_list_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700312
313 deve = nacl->device_list[mapped_lun];
314
315 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +0100316 * Check if the call is handling demo mode -> explicit LUN ACL
Andy Grovere80ac6c2012-07-12 17:34:58 -0700317 * transition. This transition must be for the same struct se_lun
318 * + mapped_lun that was setup in demo mode..
319 */
320 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
321 if (deve->se_lun_acl != NULL) {
322 pr_err("struct se_dev_entry->se_lun_acl"
Hannes Reinecke125d0112013-11-19 09:07:46 +0100323 " already set for demo mode -> explicit"
Andy Grovere80ac6c2012-07-12 17:34:58 -0700324 " LUN ACL transition\n");
325 spin_unlock_irq(&nacl->device_list_lock);
326 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800327 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700328 if (deve->se_lun != lun) {
329 pr_err("struct se_dev_entry->se_lun does"
330 " match passed struct se_lun for demo mode"
Hannes Reinecke125d0112013-11-19 09:07:46 +0100331 " -> explicit LUN ACL transition\n");
Andy Grovere80ac6c2012-07-12 17:34:58 -0700332 spin_unlock_irq(&nacl->device_list_lock);
333 return -EINVAL;
334 }
335 deve->se_lun_acl = lun_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800336
337 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
338 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
339 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
340 } else {
341 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
342 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
343 }
344
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800345 spin_unlock_irq(&nacl->device_list_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800346 return 0;
347 }
Andy Grovere80ac6c2012-07-12 17:34:58 -0700348
349 deve->se_lun = lun;
350 deve->se_lun_acl = lun_acl;
351 deve->mapped_lun = mapped_lun;
352 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
353
354 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
355 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
356 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
357 } else {
358 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
359 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
360 }
361
362 deve->creation_time = get_jiffies_64();
363 deve->attach_count++;
364 spin_unlock_irq(&nacl->device_list_lock);
365
366 spin_lock_bh(&port->sep_alua_lock);
367 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
368 spin_unlock_bh(&port->sep_alua_lock);
369
370 return 0;
371}
372
373/* core_disable_device_list_for_node():
374 *
375 *
376 */
377int core_disable_device_list_for_node(
378 struct se_lun *lun,
379 struct se_lun_acl *lun_acl,
380 u32 mapped_lun,
381 u32 lun_access,
382 struct se_node_acl *nacl,
383 struct se_portal_group *tpg)
384{
385 struct se_port *port = lun->lun_sep;
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700386 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
Andy Grovere80ac6c2012-07-12 17:34:58 -0700387
388 /*
389 * If the MappedLUN entry is being disabled, the entry in
390 * port->sep_alua_list must be removed now before clearing the
391 * struct se_dev_entry pointers below as logic in
392 * core_alua_do_transition_tg_pt() depends on these being present.
393 *
394 * deve->se_lun_acl will be NULL for demo-mode created LUNs
395 * that have not been explicitly converted to MappedLUNs ->
396 * struct se_lun_acl, but we remove deve->alua_port_list from
397 * port->sep_alua_list. This also means that active UAs and
398 * NodeACL context specific PR metadata for demo-mode
399 * MappedLUN *deve will be released below..
400 */
401 spin_lock_bh(&port->sep_alua_lock);
402 list_del(&deve->alua_port_list);
403 spin_unlock_bh(&port->sep_alua_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800404 /*
405 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
406 * PR operation to complete.
407 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800408 while (atomic_read(&deve->pr_ref_count) != 0)
409 cpu_relax();
Nicholas Bellinger77d4c742012-07-14 15:11:41 -0700410
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800411 spin_lock_irq(&nacl->device_list_lock);
412 /*
413 * Disable struct se_dev_entry LUN ACL mapping
414 */
415 core_scsi3_ua_release_all(deve);
416 deve->se_lun = NULL;
417 deve->se_lun_acl = NULL;
418 deve->lun_flags = 0;
419 deve->creation_time = 0;
420 deve->attach_count--;
421 spin_unlock_irq(&nacl->device_list_lock);
422
423 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
424 return 0;
425}
426
427/* core_clear_lun_from_tpg():
428 *
429 *
430 */
431void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
432{
433 struct se_node_acl *nacl;
434 struct se_dev_entry *deve;
435 u32 i;
436
Roland Dreier28638882011-08-16 09:40:01 -0700437 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800438 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
Roland Dreier28638882011-08-16 09:40:01 -0700439 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800440
441 spin_lock_irq(&nacl->device_list_lock);
442 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
Jörn Engelf2083242012-03-15 15:05:40 -0400443 deve = nacl->device_list[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800444 if (lun != deve->se_lun)
445 continue;
446 spin_unlock_irq(&nacl->device_list_lock);
447
Andy Grovere80ac6c2012-07-12 17:34:58 -0700448 core_disable_device_list_for_node(lun, NULL,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800449 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700450 nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800451
452 spin_lock_irq(&nacl->device_list_lock);
453 }
454 spin_unlock_irq(&nacl->device_list_lock);
455
Roland Dreier28638882011-08-16 09:40:01 -0700456 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800457 }
Roland Dreier28638882011-08-16 09:40:01 -0700458 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800459}
460
461static struct se_port *core_alloc_port(struct se_device *dev)
462{
463 struct se_port *port, *port_tmp;
464
465 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700466 if (!port) {
467 pr_err("Unable to allocate struct se_port\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000468 return ERR_PTR(-ENOMEM);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800469 }
470 INIT_LIST_HEAD(&port->sep_alua_list);
471 INIT_LIST_HEAD(&port->sep_list);
472 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
473 spin_lock_init(&port->sep_alua_lock);
474 mutex_init(&port->sep_tg_pt_md_mutex);
475
476 spin_lock(&dev->se_port_lock);
477 if (dev->dev_port_count == 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -0700478 pr_warn("Reached dev->dev_port_count =="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800479 " 0x0000ffff\n");
480 spin_unlock(&dev->se_port_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +0000481 return ERR_PTR(-ENOSPC);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800482 }
483again:
484 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900485 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800486 * Here is the table from spc4r17 section 7.7.3.8.
487 *
488 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
489 *
490 * Code Description
491 * 0h Reserved
492 * 1h Relative port 1, historically known as port A
493 * 2h Relative port 2, historically known as port B
494 * 3h to FFFFh Relative port 3 through 65 535
495 */
496 port->sep_rtpi = dev->dev_rpti_counter++;
Andy Grover6708bb22011-06-08 10:36:43 -0700497 if (!port->sep_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800498 goto again;
499
500 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
501 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800503 * for 16-bit wrap..
504 */
505 if (port->sep_rtpi == port_tmp->sep_rtpi)
506 goto again;
507 }
508 spin_unlock(&dev->se_port_lock);
509
510 return port;
511}
512
513static void core_export_port(
514 struct se_device *dev,
515 struct se_portal_group *tpg,
516 struct se_port *port,
517 struct se_lun *lun)
518{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800519 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
520
521 spin_lock(&dev->se_port_lock);
522 spin_lock(&lun->lun_sep_lock);
523 port->sep_tpg = tpg;
524 port->sep_lun = lun;
525 lun->lun_sep = port;
526 spin_unlock(&lun->lun_sep_lock);
527
528 list_add_tail(&port->sep_list, &dev->dev_sep_list);
529 spin_unlock(&dev->se_port_lock);
530
Christoph Hellwigc87fbd52012-10-10 17:37:16 -0400531 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
532 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800533 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
534 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
Andy Grover6708bb22011-06-08 10:36:43 -0700535 pr_err("Unable to allocate t10_alua_tg_pt"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800536 "_gp_member_t\n");
537 return;
538 }
539 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
540 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400541 dev->t10_alua.default_tg_pt_gp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800542 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
Andy Grover6708bb22011-06-08 10:36:43 -0700543 pr_debug("%s/%s: Adding to default ALUA Target Port"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800544 " Group: alua/default_tg_pt_gp\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000545 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800546 }
547
548 dev->dev_port_count++;
Masanari Iida35d1efe2012-08-16 22:43:13 +0900549 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800550}
551
552/*
553 * Called with struct se_device->se_port_lock spinlock held.
554 */
555static void core_release_port(struct se_device *dev, struct se_port *port)
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700556 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800557{
558 /*
559 * Wait for any port reference for PR ALL_TG_PT=1 operation
560 * to complete in __core_scsi3_alloc_registration()
561 */
562 spin_unlock(&dev->se_port_lock);
563 if (atomic_read(&port->sep_tg_pt_ref_cnt))
564 cpu_relax();
565 spin_lock(&dev->se_port_lock);
566
567 core_alua_free_tg_pt_gp_mem(port);
568
569 list_del(&port->sep_list);
570 dev->dev_port_count--;
571 kfree(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800572}
573
574int core_dev_export(
575 struct se_device *dev,
576 struct se_portal_group *tpg,
577 struct se_lun *lun)
578{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400579 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800580 struct se_port *port;
581
582 port = core_alloc_port(dev);
Andy Grovere3d6f902011-07-19 08:55:10 +0000583 if (IS_ERR(port))
584 return PTR_ERR(port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800585
586 lun->lun_se_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800587
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400588 spin_lock(&hba->device_lock);
589 dev->export_count++;
590 spin_unlock(&hba->device_lock);
591
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800592 core_export_port(dev, tpg, port, lun);
593 return 0;
594}
595
596void core_dev_unexport(
597 struct se_device *dev,
598 struct se_portal_group *tpg,
599 struct se_lun *lun)
600{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400601 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800602 struct se_port *port = lun->lun_sep;
603
604 spin_lock(&lun->lun_sep_lock);
605 if (lun->lun_se_dev == NULL) {
606 spin_unlock(&lun->lun_sep_lock);
607 return;
608 }
609 spin_unlock(&lun->lun_sep_lock);
610
611 spin_lock(&dev->se_port_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800612 core_release_port(dev, port);
613 spin_unlock(&dev->se_port_lock);
614
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400615 spin_lock(&hba->device_lock);
616 dev->export_count--;
617 spin_unlock(&hba->device_lock);
618
Nicholas Bellinger83ff42f2014-06-16 20:25:54 +0000619 lun->lun_sep = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800620 lun->lun_se_dev = NULL;
621}
622
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400623static void se_release_vpd_for_dev(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800624{
625 struct t10_vpd *vpd, *vpd_tmp;
626
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400627 spin_lock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800628 list_for_each_entry_safe(vpd, vpd_tmp,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400629 &dev->t10_wwn.t10_vpd_list, vpd_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800630 list_del(&vpd->vpd_list);
631 kfree(vpd);
632 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400633 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800634}
635
Roland Dreierc8045372012-07-16 15:17:12 -0700636static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700637{
Roland Dreier3e039892012-10-31 09:16:45 -0700638 u32 aligned_max_sectors;
639 u32 alignment;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700640 /*
641 * Limit max_sectors to a PAGE_SIZE aligned value for modern
642 * transport_allocate_data_tasks() operation.
643 */
Roland Dreier3e039892012-10-31 09:16:45 -0700644 alignment = max(1ul, PAGE_SIZE / block_size);
645 aligned_max_sectors = rounddown(max_sectors, alignment);
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700646
Roland Dreier3e039892012-10-31 09:16:45 -0700647 if (max_sectors != aligned_max_sectors)
648 pr_info("Rounding down aligned max_sectors from %u to %u\n",
649 max_sectors, aligned_max_sectors);
650
651 return aligned_max_sectors;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700652}
653
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800654int se_dev_set_max_unmap_lba_count(
655 struct se_device *dev,
656 u32 max_unmap_lba_count)
657{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400658 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700659 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400660 dev, dev->dev_attrib.max_unmap_lba_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800661 return 0;
662}
663
664int se_dev_set_max_unmap_block_desc_count(
665 struct se_device *dev,
666 u32 max_unmap_block_desc_count)
667{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400668 dev->dev_attrib.max_unmap_block_desc_count =
Andy Grovere3d6f902011-07-19 08:55:10 +0000669 max_unmap_block_desc_count;
Andy Grover6708bb22011-06-08 10:36:43 -0700670 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400671 dev, dev->dev_attrib.max_unmap_block_desc_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800672 return 0;
673}
674
675int se_dev_set_unmap_granularity(
676 struct se_device *dev,
677 u32 unmap_granularity)
678{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400679 dev->dev_attrib.unmap_granularity = unmap_granularity;
Andy Grover6708bb22011-06-08 10:36:43 -0700680 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400681 dev, dev->dev_attrib.unmap_granularity);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800682 return 0;
683}
684
685int se_dev_set_unmap_granularity_alignment(
686 struct se_device *dev,
687 u32 unmap_granularity_alignment)
688{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400689 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
Andy Grover6708bb22011-06-08 10:36:43 -0700690 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400691 dev, dev->dev_attrib.unmap_granularity_alignment);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800692 return 0;
693}
694
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800695int se_dev_set_max_write_same_len(
696 struct se_device *dev,
697 u32 max_write_same_len)
698{
699 dev->dev_attrib.max_write_same_len = max_write_same_len;
700 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
701 dev, dev->dev_attrib.max_write_same_len);
702 return 0;
703}
704
Tregaron Baylyadfa9572013-01-31 15:30:24 -0700705static void dev_set_t10_wwn_model_alias(struct se_device *dev)
706{
707 const char *configname;
708
709 configname = config_item_name(&dev->dev_group.cg_item);
710 if (strlen(configname) >= 16) {
711 pr_warn("dev[%p]: Backstore name '%s' is too long for "
712 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
713 configname);
714 }
715 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
716}
717
718int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
719{
720 if (dev->export_count) {
721 pr_err("dev[%p]: Unable to change model alias"
722 " while export_count is %d\n",
723 dev, dev->export_count);
724 return -EINVAL;
725 }
726
727 if (flag != 0 && flag != 1) {
728 pr_err("Illegal value %d\n", flag);
729 return -EINVAL;
730 }
731
732 if (flag) {
733 dev_set_t10_wwn_model_alias(dev);
734 } else {
735 strncpy(&dev->t10_wwn.model[0],
736 dev->transport->inquiry_prod, 16);
737 }
738 dev->dev_attrib.emulate_model_alias = flag;
739
740 return 0;
741}
742
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800743int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
744{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400745 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700746 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000747 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800748 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400749
Andy Groverc6388302011-11-30 12:11:50 -0800750 if (flag) {
751 pr_err("dpo_emulated not supported\n");
752 return -EINVAL;
753 }
754
755 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800756}
757
758int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
759{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400760 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700761 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000762 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800763 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400764
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700765 if (flag &&
766 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
767 pr_err("emulate_fua_write not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000768 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800769 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400770 dev->dev_attrib.emulate_fua_write = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400772 dev, dev->dev_attrib.emulate_fua_write);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800773 return 0;
774}
775
776int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
777{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400778 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700779 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000780 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800781 }
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400782
Andy Groverc6388302011-11-30 12:11:50 -0800783 if (flag) {
784 pr_err("ua read emulated not supported\n");
785 return -EINVAL;
786 }
787
788 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800789}
790
791int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
792{
Christoph Hellwigf55918f2011-10-14 07:30:17 -0400793 if (flag != 0 && flag != 1) {
Andy Grover6708bb22011-06-08 10:36:43 -0700794 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000795 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800796 }
Nicholas Bellingerfd30e932012-08-26 13:35:58 -0700797 if (flag &&
798 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
799 pr_err("emulate_write_cache not supported for pSCSI\n");
Andy Grovere3d6f902011-07-19 08:55:10 +0000800 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800801 }
Andy Grover07b8dae32014-05-14 15:48:06 -0700802 if (flag &&
803 dev->transport->get_write_cache) {
804 pr_err("emulate_write_cache not supported for this device\n");
805 return -EINVAL;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800806 }
807
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400808 dev->dev_attrib.emulate_write_cache = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700809 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400810 dev, dev->dev_attrib.emulate_write_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800811 return 0;
812}
813
814int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
815{
816 if ((flag != 0) && (flag != 1) && (flag != 2)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700817 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000818 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800819 }
820
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400821 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700822 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400823 " UA_INTRLCK_CTRL while export_count is %d\n",
824 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000825 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800826 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400827 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700828 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400829 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800830
831 return 0;
832}
833
834int se_dev_set_emulate_tas(struct se_device *dev, int flag)
835{
836 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700837 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000838 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800839 }
840
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400841 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700842 pr_err("dev[%p]: Unable to change SE Device TAS while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400843 " export_count is %d\n",
844 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +0000845 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800846 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400847 dev->dev_attrib.emulate_tas = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700848 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400849 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800850
851 return 0;
852}
853
854int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
855{
856 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700857 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000858 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800859 }
860 /*
861 * We expect this value to be non-zero when generic Block Layer
862 * Discard supported is detected iblock_create_virtdevice().
863 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400864 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700865 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800866 return -ENOSYS;
867 }
868
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400869 dev->dev_attrib.emulate_tpu = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700870 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800871 dev, flag);
872 return 0;
873}
874
875int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
876{
877 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -0700878 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +0000879 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800880 }
881 /*
882 * We expect this value to be non-zero when generic Block Layer
883 * Discard supported is detected iblock_create_virtdevice().
884 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400885 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
Andy Grover6708bb22011-06-08 10:36:43 -0700886 pr_err("Generic Block Discard not supported\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800887 return -ENOSYS;
888 }
889
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400890 dev->dev_attrib.emulate_tpws = flag;
Andy Grover6708bb22011-06-08 10:36:43 -0700891 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800892 dev, flag);
893 return 0;
894}
895
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -0700896int se_dev_set_emulate_caw(struct se_device *dev, int flag)
897{
898 if (flag != 0 && flag != 1) {
899 pr_err("Illegal value %d\n", flag);
900 return -EINVAL;
901 }
902 dev->dev_attrib.emulate_caw = flag;
903 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
904 dev, flag);
905
906 return 0;
907}
908
Nicholas Bellingerd397a442013-08-22 14:17:20 -0700909int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
910{
911 if (flag != 0 && flag != 1) {
912 pr_err("Illegal value %d\n", flag);
913 return -EINVAL;
914 }
915 dev->dev_attrib.emulate_3pc = flag;
916 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
917 dev, flag);
918
919 return 0;
920}
921
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +0000922int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
923{
924 int rc, old_prot = dev->dev_attrib.pi_prot_type;
925
926 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
927 pr_err("Illegal value %d for pi_prot_type\n", flag);
928 return -EINVAL;
929 }
930 if (flag == 2) {
931 pr_err("DIF TYPE2 protection currently not supported\n");
932 return -ENOSYS;
933 }
934 if (dev->dev_attrib.hw_pi_prot_type) {
935 pr_warn("DIF protection enabled on underlying hardware,"
936 " ignoring\n");
937 return 0;
938 }
939 if (!dev->transport->init_prot || !dev->transport->free_prot) {
Andy Grover448ba902014-04-15 14:13:12 -0700940 /* 0 is only allowed value for non-supporting backends */
941 if (flag == 0)
942 return 0;
943
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +0000944 pr_err("DIF protection not supported by backend: %s\n",
945 dev->transport->name);
946 return -ENOSYS;
947 }
948 if (!(dev->dev_flags & DF_CONFIGURED)) {
949 pr_err("DIF protection requires device to be configured\n");
950 return -ENODEV;
951 }
952 if (dev->export_count) {
953 pr_err("dev[%p]: Unable to change SE Device PROT type while"
954 " export_count is %d\n", dev, dev->export_count);
955 return -EINVAL;
956 }
957
958 dev->dev_attrib.pi_prot_type = flag;
959
960 if (flag && !old_prot) {
961 rc = dev->transport->init_prot(dev);
962 if (rc) {
963 dev->dev_attrib.pi_prot_type = old_prot;
964 return rc;
965 }
966
967 } else if (!flag && old_prot) {
968 dev->transport->free_prot(dev);
969 }
970 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
971
972 return 0;
973}
974
975int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
976{
977 int rc;
978
979 if (!flag)
980 return 0;
981
982 if (flag != 1) {
983 pr_err("Illegal value %d for pi_prot_format\n", flag);
984 return -EINVAL;
985 }
986 if (!dev->transport->format_prot) {
987 pr_err("DIF protection format not supported by backend %s\n",
988 dev->transport->name);
989 return -ENOSYS;
990 }
991 if (!(dev->dev_flags & DF_CONFIGURED)) {
992 pr_err("DIF protection format requires device to be configured\n");
993 return -ENODEV;
994 }
995 if (dev->export_count) {
996 pr_err("dev[%p]: Unable to format SE Device PROT type while"
997 " export_count is %d\n", dev, dev->export_count);
998 return -EINVAL;
999 }
1000
1001 rc = dev->transport->format_prot(dev);
1002 if (rc)
1003 return rc;
1004
1005 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
1006
1007 return 0;
1008}
1009
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001010int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1011{
1012 if ((flag != 0) && (flag != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001013 pr_err("Illegal value %d\n", flag);
Andy Grovere3d6f902011-07-19 08:55:10 +00001014 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001015 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001016 dev->dev_attrib.enforce_pr_isids = flag;
Andy Grover6708bb22011-06-08 10:36:43 -07001017 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001018 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001019 return 0;
1020}
1021
Roland Dreiere22a7f02011-07-05 13:34:52 -07001022int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1023{
1024 if ((flag != 0) && (flag != 1)) {
1025 printk(KERN_ERR "Illegal value %d\n", flag);
1026 return -EINVAL;
1027 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001028 dev->dev_attrib.is_nonrot = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -07001029 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
Roland Dreiere22a7f02011-07-05 13:34:52 -07001030 dev, flag);
1031 return 0;
1032}
1033
Nicholas Bellinger5de619a2011-07-17 02:57:58 -07001034int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1035{
1036 if (flag != 0) {
1037 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1038 " reordering not implemented\n", dev);
1039 return -ENOSYS;
1040 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001041 dev->dev_attrib.emulate_rest_reord = flag;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -07001042 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1043 return 0;
1044}
1045
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001046/*
1047 * Note, this can only be called on unexported SE Device Object.
1048 */
1049int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1050{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001051 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001052 pr_err("dev[%p]: Unable to change SE Device TCQ while"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001053 " export_count is %d\n",
1054 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +00001055 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001056 }
Andy Grover6708bb22011-06-08 10:36:43 -07001057 if (!queue_depth) {
1058 pr_err("dev[%p]: Illegal ZERO value for queue"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001059 "_depth\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +00001060 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001061 }
1062
Andy Grovere3d6f902011-07-19 08:55:10 +00001063 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001064 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -07001065 pr_err("dev[%p]: Passed queue_depth: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001066 " exceeds TCM/SE_Device TCQ: %u\n",
1067 dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001068 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +00001069 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001070 }
1071 } else {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001072 if (queue_depth > dev->dev_attrib.queue_depth) {
1073 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
Andy Grover6708bb22011-06-08 10:36:43 -07001074 pr_err("dev[%p]: Passed queue_depth:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001075 " %u exceeds TCM/SE_Device MAX"
1076 " TCQ: %u\n", dev, queue_depth,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001077 dev->dev_attrib.hw_queue_depth);
Andy Grovere3d6f902011-07-19 08:55:10 +00001078 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001079 }
1080 }
1081 }
1082
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001083 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
Andy Grover6708bb22011-06-08 10:36:43 -07001084 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001085 dev, queue_depth);
1086 return 0;
1087}
1088
Roland Dreier015487b2012-02-13 16:18:17 -08001089int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1090{
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -08001091 int block_size = dev->dev_attrib.block_size;
1092
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001093 if (dev->export_count) {
Roland Dreier015487b2012-02-13 16:18:17 -08001094 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001095 " fabric_max_sectors while export_count is %d\n",
1096 dev, dev->export_count);
Roland Dreier015487b2012-02-13 16:18:17 -08001097 return -EINVAL;
1098 }
1099 if (!fabric_max_sectors) {
1100 pr_err("dev[%p]: Illegal ZERO value for"
1101 " fabric_max_sectors\n", dev);
1102 return -EINVAL;
1103 }
1104 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1105 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1106 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1107 DA_STATUS_MAX_SECTORS_MIN);
1108 return -EINVAL;
1109 }
1110 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001111 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
Roland Dreier015487b2012-02-13 16:18:17 -08001112 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1113 " greater than TCM/SE_Device max_sectors:"
1114 " %u\n", dev, fabric_max_sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001115 dev->dev_attrib.hw_max_sectors);
Roland Dreier015487b2012-02-13 16:18:17 -08001116 return -EINVAL;
1117 }
1118 } else {
1119 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1120 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1121 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1122 " %u\n", dev, fabric_max_sectors,
1123 DA_STATUS_MAX_SECTORS_MAX);
1124 return -EINVAL;
1125 }
1126 }
1127 /*
1128 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1129 */
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -08001130 if (!block_size) {
1131 block_size = 512;
1132 pr_warn("Defaulting to 512 for zero block_size\n");
1133 }
Roland Dreier015487b2012-02-13 16:18:17 -08001134 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
Nicholas Bellinger7a3cf6c2013-01-31 15:05:48 -08001135 block_size);
Roland Dreier015487b2012-02-13 16:18:17 -08001136
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001137 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
Roland Dreier015487b2012-02-13 16:18:17 -08001138 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1139 dev, fabric_max_sectors);
1140 return 0;
1141}
1142
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001143int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1144{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001145 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001146 pr_err("dev[%p]: Unable to change SE Device"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001147 " optimal_sectors while export_count is %d\n",
1148 dev, dev->export_count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001149 return -EINVAL;
1150 }
Andy Grovere3d6f902011-07-19 08:55:10 +00001151 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001152 pr_err("dev[%p]: Passed optimal_sectors cannot be"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001153 " changed for TCM/pSCSI\n", dev);
1154 return -EINVAL;
1155 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001156 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
Andy Grover6708bb22011-06-08 10:36:43 -07001157 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
Roland Dreier015487b2012-02-13 16:18:17 -08001158 " greater than fabric_max_sectors: %u\n", dev,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001159 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001160 return -EINVAL;
1161 }
1162
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001163 dev->dev_attrib.optimal_sectors = optimal_sectors;
Andy Grover6708bb22011-06-08 10:36:43 -07001164 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001165 dev, optimal_sectors);
1166 return 0;
1167}
1168
1169int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1170{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001171 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001172 pr_err("dev[%p]: Unable to change SE Device block_size"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001173 " while export_count is %d\n",
1174 dev, dev->export_count);
Andy Grovere3d6f902011-07-19 08:55:10 +00001175 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001176 }
1177
1178 if ((block_size != 512) &&
1179 (block_size != 1024) &&
1180 (block_size != 2048) &&
1181 (block_size != 4096)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001182 pr_err("dev[%p]: Illegal value for block_device: %u"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001183 " for SE device, must be 512, 1024, 2048 or 4096\n",
1184 dev, block_size);
Andy Grovere3d6f902011-07-19 08:55:10 +00001185 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001186 }
1187
Andy Grovere3d6f902011-07-19 08:55:10 +00001188 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Andy Grover6708bb22011-06-08 10:36:43 -07001189 pr_err("dev[%p]: Not allowed to change block_size for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001190 " Physical Device, use for Linux/SCSI to change"
1191 " block_size for underlying hardware\n", dev);
Andy Grovere3d6f902011-07-19 08:55:10 +00001192 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001193 }
1194
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001195 dev->dev_attrib.block_size = block_size;
Andy Grover6708bb22011-06-08 10:36:43 -07001196 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001197 dev, block_size);
Nicholas Bellinger95cadac2013-12-12 12:24:11 -08001198
1199 if (dev->dev_attrib.max_bytes_per_io)
1200 dev->dev_attrib.hw_max_sectors =
1201 dev->dev_attrib.max_bytes_per_io / block_size;
1202
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001203 return 0;
1204}
1205
1206struct se_lun *core_dev_add_lun(
1207 struct se_portal_group *tpg,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001208 struct se_device *dev,
Andy Grover2af79732013-11-26 11:55:22 -08001209 u32 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001210{
Andy Grover2af79732013-11-26 11:55:22 -08001211 struct se_lun *lun;
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001212 int rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001213
Andy Groverd344f8a2013-11-26 12:07:54 -08001214 lun = core_tpg_alloc_lun(tpg, unpacked_lun);
Andy Grover2af79732013-11-26 11:55:22 -08001215 if (IS_ERR(lun))
1216 return lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001217
Andy Groverd344f8a2013-11-26 12:07:54 -08001218 rc = core_tpg_add_lun(tpg, lun,
Nicholas Bellinger58d92612012-03-20 21:26:48 -07001219 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001220 if (rc < 0)
1221 return ERR_PTR(rc);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001222
Andy Grover6708bb22011-06-08 10:36:43 -07001223 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001224 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grover2af79732013-11-26 11:55:22 -08001225 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Andy Grover2dca6732012-07-12 17:34:55 -07001226 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001227 /*
1228 * Update LUN maps for dynamically added initiators when
1229 * generate_node_acl is enabled.
1230 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001231 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001232 struct se_node_acl *acl;
Roland Dreier28638882011-08-16 09:40:01 -07001233 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001234 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
Nicholas Bellinger052605c2011-07-26 17:48:43 -07001235 if (acl->dynamic_node_acl &&
1236 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1237 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
Roland Dreier28638882011-08-16 09:40:01 -07001238 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001239 core_tpg_add_node_to_devs(acl, tpg);
Roland Dreier28638882011-08-16 09:40:01 -07001240 spin_lock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001241 }
1242 }
Roland Dreier28638882011-08-16 09:40:01 -07001243 spin_unlock_irq(&tpg->acl_node_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001244 }
1245
Andy Grover2af79732013-11-26 11:55:22 -08001246 return lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001247}
1248
1249/* core_dev_del_lun():
1250 *
1251 *
1252 */
1253int core_dev_del_lun(
1254 struct se_portal_group *tpg,
1255 u32 unpacked_lun)
1256{
1257 struct se_lun *lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001258
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +01001259 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1260 if (IS_ERR(lun))
1261 return PTR_ERR(lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001262
1263 core_tpg_post_dellun(tpg, lun);
1264
Andy Grover6708bb22011-06-08 10:36:43 -07001265 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +00001266 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1267 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1268 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001269
1270 return 0;
1271}
1272
1273struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1274{
1275 struct se_lun *lun;
1276
1277 spin_lock(&tpg->tpg_lun_lock);
1278 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001279 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001280 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001281 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001282 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001283 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001284 spin_unlock(&tpg->tpg_lun_lock);
1285 return NULL;
1286 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001287 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001288
1289 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001290 pr_err("%s Logical Unit Number: %u is not free on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001291 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001292 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1293 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001294 spin_unlock(&tpg->tpg_lun_lock);
1295 return NULL;
1296 }
1297 spin_unlock(&tpg->tpg_lun_lock);
1298
1299 return lun;
1300}
1301
1302/* core_dev_get_lun():
1303 *
1304 *
1305 */
1306static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1307{
1308 struct se_lun *lun;
1309
1310 spin_lock(&tpg->tpg_lun_lock);
1311 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001312 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001313 "_TPG-1: %u for Target Portal Group: %hu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001314 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001315 TRANSPORT_MAX_LUNS_PER_TPG-1,
Andy Grovere3d6f902011-07-19 08:55:10 +00001316 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001317 spin_unlock(&tpg->tpg_lun_lock);
1318 return NULL;
1319 }
Jörn Engel4a5a75f2012-03-15 15:05:12 -04001320 lun = tpg->tpg_lun_list[unpacked_lun];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001321
1322 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001323 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001324 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001325 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1326 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001327 spin_unlock(&tpg->tpg_lun_lock);
1328 return NULL;
1329 }
1330 spin_unlock(&tpg->tpg_lun_lock);
1331
1332 return lun;
1333}
1334
1335struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1336 struct se_portal_group *tpg,
Nicholas Bellingerfcf29482013-02-18 18:00:33 -08001337 struct se_node_acl *nacl,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001338 u32 mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001339 int *ret)
1340{
1341 struct se_lun_acl *lacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001342
Nicholas Bellingerfcf29482013-02-18 18:00:33 -08001343 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001344 pr_err("%s InitiatorName exceeds maximum size.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001345 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001346 *ret = -EOVERFLOW;
1347 return NULL;
1348 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001349 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001350 if (!lacl) {
1351 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001352 *ret = -ENOMEM;
1353 return NULL;
1354 }
1355
1356 INIT_LIST_HEAD(&lacl->lacl_list);
1357 lacl->mapped_lun = mapped_lun;
1358 lacl->se_lun_nacl = nacl;
Nicholas Bellingerfcf29482013-02-18 18:00:33 -08001359 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1360 nacl->initiatorname);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001361
1362 return lacl;
1363}
1364
1365int core_dev_add_initiator_node_lun_acl(
1366 struct se_portal_group *tpg,
1367 struct se_lun_acl *lacl,
1368 u32 unpacked_lun,
1369 u32 lun_access)
1370{
1371 struct se_lun *lun;
1372 struct se_node_acl *nacl;
1373
1374 lun = core_dev_get_lun(tpg, unpacked_lun);
Andy Grover6708bb22011-06-08 10:36:43 -07001375 if (!lun) {
1376 pr_err("%s Logical Unit Number: %u is not active on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001377 " Target Portal Group: %hu, ignoring request.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001378 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1379 tpg->se_tpg_tfo->tpg_get_tag(tpg));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001380 return -EINVAL;
1381 }
1382
1383 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001384 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001385 return -EINVAL;
1386
1387 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1388 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1389 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1390
1391 lacl->se_lun = lun;
1392
Andy Grovere80ac6c2012-07-12 17:34:58 -07001393 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1394 lun_access, nacl, tpg) < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001395 return -EINVAL;
1396
1397 spin_lock(&lun->lun_acl_lock);
1398 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1399 atomic_inc(&lun->lun_acl_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001400 smp_mb__after_atomic();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001401 spin_unlock(&lun->lun_acl_lock);
1402
Andy Grover6708bb22011-06-08 10:36:43 -07001403 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
Andy Grovere3d6f902011-07-19 08:55:10 +00001404 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1405 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001406 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1407 lacl->initiatorname);
1408 /*
1409 * Check to see if there are any existing persistent reservation APTPL
1410 * pre-registrations that need to be enabled for this LUN ACL..
1411 */
1412 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1413 return 0;
1414}
1415
1416/* core_dev_del_initiator_node_lun_acl():
1417 *
1418 *
1419 */
1420int core_dev_del_initiator_node_lun_acl(
1421 struct se_portal_group *tpg,
1422 struct se_lun *lun,
1423 struct se_lun_acl *lacl)
1424{
1425 struct se_node_acl *nacl;
1426
1427 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -07001428 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001429 return -EINVAL;
1430
1431 spin_lock(&lun->lun_acl_lock);
1432 list_del(&lacl->lacl_list);
1433 atomic_dec(&lun->lun_acl_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001434 smp_mb__after_atomic();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001435 spin_unlock(&lun->lun_acl_lock);
1436
Andy Grovere80ac6c2012-07-12 17:34:58 -07001437 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1438 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001439
1440 lacl->se_lun = NULL;
1441
Andy Grover6708bb22011-06-08 10:36:43 -07001442 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001443 " InitiatorNode: %s Mapped LUN: %u\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001444 tpg->se_tpg_tfo->get_fabric_name(),
1445 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001446 lacl->initiatorname, lacl->mapped_lun);
1447
1448 return 0;
1449}
1450
1451void core_dev_free_initiator_node_lun_acl(
1452 struct se_portal_group *tpg,
1453 struct se_lun_acl *lacl)
1454{
Andy Grover6708bb22011-06-08 10:36:43 -07001455 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
Andy Grovere3d6f902011-07-19 08:55:10 +00001456 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1457 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1458 tpg->se_tpg_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001459 lacl->initiatorname, lacl->mapped_lun);
1460
1461 kfree(lacl);
1462}
1463
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001464static void scsi_dump_inquiry(struct se_device *dev)
1465{
1466 struct t10_wwn *wwn = &dev->t10_wwn;
1467 char buf[17];
1468 int i, device_type;
1469 /*
1470 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1471 */
1472 for (i = 0; i < 8; i++)
1473 if (wwn->vendor[i] >= 0x20)
1474 buf[i] = wwn->vendor[i];
1475 else
1476 buf[i] = ' ';
1477 buf[i] = '\0';
1478 pr_debug(" Vendor: %s\n", buf);
1479
1480 for (i = 0; i < 16; i++)
1481 if (wwn->model[i] >= 0x20)
1482 buf[i] = wwn->model[i];
1483 else
1484 buf[i] = ' ';
1485 buf[i] = '\0';
1486 pr_debug(" Model: %s\n", buf);
1487
1488 for (i = 0; i < 4; i++)
1489 if (wwn->revision[i] >= 0x20)
1490 buf[i] = wwn->revision[i];
1491 else
1492 buf[i] = ' ';
1493 buf[i] = '\0';
1494 pr_debug(" Revision: %s\n", buf);
1495
1496 device_type = dev->transport->get_device_type(dev);
1497 pr_debug(" Type: %s ", scsi_device_type(device_type));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001498}
1499
1500struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1501{
1502 struct se_device *dev;
Nicholas Bellinger4863e522013-11-08 13:10:44 -08001503 struct se_lun *xcopy_lun;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001504
1505 dev = hba->transport->alloc_device(hba, name);
1506 if (!dev)
1507 return NULL;
1508
Nicholas Bellinger0ff87542012-12-04 23:43:57 -08001509 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001510 dev->se_hba = hba;
1511 dev->transport = hba->transport;
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +00001512 dev->prot_length = sizeof(struct se_dif_v1_tuple);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001513
1514 INIT_LIST_HEAD(&dev->dev_list);
1515 INIT_LIST_HEAD(&dev->dev_sep_list);
1516 INIT_LIST_HEAD(&dev->dev_tmr_list);
1517 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1518 INIT_LIST_HEAD(&dev->state_list);
1519 INIT_LIST_HEAD(&dev->qf_cmd_list);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -07001520 INIT_LIST_HEAD(&dev->g_dev_node);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001521 spin_lock_init(&dev->execute_task_lock);
1522 spin_lock_init(&dev->delayed_cmd_lock);
1523 spin_lock_init(&dev->dev_reservation_lock);
1524 spin_lock_init(&dev->se_port_lock);
1525 spin_lock_init(&dev->se_tmr_lock);
1526 spin_lock_init(&dev->qf_cmd_lock);
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -07001527 sema_init(&dev->caw_sem, 1);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001528 atomic_set(&dev->dev_ordered_id, 0);
1529 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1530 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1531 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1532 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1533 spin_lock_init(&dev->t10_pr.registration_lock);
1534 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1535 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1536 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
Hannes Reineckec66094b2013-12-17 09:18:49 +01001537 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
1538 spin_lock_init(&dev->t10_alua.lba_map_lock);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001539
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001540 dev->t10_wwn.t10_dev = dev;
1541 dev->t10_alua.t10_dev = dev;
1542
1543 dev->dev_attrib.da_dev = dev;
Tregaron Baylyadfa9572013-01-31 15:30:24 -07001544 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001545 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1546 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1547 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1548 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1549 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1550 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1551 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1552 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -07001553 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
Nicholas Bellingerd397a442013-08-22 14:17:20 -07001554 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +00001555 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001556 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1557 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1558 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1559 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1560 dev->dev_attrib.max_unmap_block_desc_count =
1561 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1562 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1563 dev->dev_attrib.unmap_granularity_alignment =
1564 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -08001565 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001566 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1567 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1568
Nicholas Bellinger4863e522013-11-08 13:10:44 -08001569 xcopy_lun = &dev->xcopy_lun;
1570 xcopy_lun->lun_se_dev = dev;
1571 init_completion(&xcopy_lun->lun_shutdown_comp);
1572 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1573 spin_lock_init(&xcopy_lun->lun_acl_lock);
1574 spin_lock_init(&xcopy_lun->lun_sep_lock);
1575 init_completion(&xcopy_lun->lun_ref_comp);
1576
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001577 return dev;
1578}
1579
1580int target_configure_device(struct se_device *dev)
1581{
1582 struct se_hba *hba = dev->se_hba;
1583 int ret;
1584
1585 if (dev->dev_flags & DF_CONFIGURED) {
1586 pr_err("se_dev->se_dev_ptr already set for storage"
1587 " object\n");
1588 return -EEXIST;
1589 }
1590
1591 ret = dev->transport->configure_device(dev);
1592 if (ret)
1593 goto out;
1594 dev->dev_flags |= DF_CONFIGURED;
1595
1596 /*
1597 * XXX: there is not much point to have two different values here..
1598 */
1599 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1600 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1601
1602 /*
1603 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1604 */
1605 dev->dev_attrib.hw_max_sectors =
1606 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1607 dev->dev_attrib.hw_block_size);
1608
1609 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1610 dev->creation_time = get_jiffies_64();
1611
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001612 ret = core_setup_alua(dev);
1613 if (ret)
1614 goto out;
1615
1616 /*
1617 * Startup the struct se_device processing thread
1618 */
1619 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1620 dev->transport->name);
1621 if (!dev->tmr_wq) {
1622 pr_err("Unable to create tmr workqueue for %s\n",
1623 dev->transport->name);
1624 ret = -ENOMEM;
1625 goto out_free_alua;
1626 }
1627
1628 /*
1629 * Setup work_queue for QUEUE_FULL
1630 */
1631 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1632
1633 /*
1634 * Preload the initial INQUIRY const values if we are doing
1635 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1636 * passthrough because this is being provided by the backend LLD.
1637 */
1638 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1639 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1640 strncpy(&dev->t10_wwn.model[0],
1641 dev->transport->inquiry_prod, 16);
1642 strncpy(&dev->t10_wwn.revision[0],
1643 dev->transport->inquiry_rev, 4);
1644 }
1645
1646 scsi_dump_inquiry(dev);
1647
1648 spin_lock(&hba->device_lock);
1649 hba->dev_count++;
1650 spin_unlock(&hba->device_lock);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -07001651
1652 mutex_lock(&g_device_mutex);
1653 list_add_tail(&dev->g_dev_node, &g_device_list);
1654 mutex_unlock(&g_device_mutex);
1655
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001656 return 0;
1657
1658out_free_alua:
1659 core_alua_free_lu_gp_mem(dev);
1660out:
1661 se_release_vpd_for_dev(dev);
1662 return ret;
1663}
1664
1665void target_free_device(struct se_device *dev)
1666{
1667 struct se_hba *hba = dev->se_hba;
1668
1669 WARN_ON(!list_empty(&dev->dev_sep_list));
1670
1671 if (dev->dev_flags & DF_CONFIGURED) {
1672 destroy_workqueue(dev->tmr_wq);
1673
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -07001674 mutex_lock(&g_device_mutex);
1675 list_del(&dev->g_dev_node);
1676 mutex_unlock(&g_device_mutex);
1677
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001678 spin_lock(&hba->device_lock);
1679 hba->dev_count--;
1680 spin_unlock(&hba->device_lock);
1681 }
1682
1683 core_alua_free_lu_gp_mem(dev);
Hannes Reinecke229d4f12013-12-17 09:18:50 +01001684 core_alua_set_lba_map(dev, NULL, 0, 0);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001685 core_scsi3_free_all_registrations(dev);
1686 se_release_vpd_for_dev(dev);
1687
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +00001688 if (dev->transport->free_prot)
1689 dev->transport->free_prot(dev);
1690
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001691 dev->transport->free_device(dev);
1692}
1693
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001694int core_dev_setup_virtual_lun0(void)
1695{
1696 struct se_hba *hba;
1697 struct se_device *dev;
Andy Groverdb5d1c32013-05-28 16:55:20 -07001698 char buf[] = "rd_pages=8,rd_nullio=1";
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001699 int ret;
1700
Andy Grover6708bb22011-06-08 10:36:43 -07001701 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001702 if (IS_ERR(hba))
1703 return PTR_ERR(hba);
1704
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001705 dev = target_alloc_device(hba, "virt_lun0");
1706 if (!dev) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001707 ret = -ENOMEM;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001708 goto out_free_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001709 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001710
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001711 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001712
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001713 ret = target_configure_device(dev);
1714 if (ret)
1715 goto out_free_se_dev;
1716
1717 lun0_hba = hba;
Andy Grovere3d6f902011-07-19 08:55:10 +00001718 g_lun0_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001719 return 0;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001720
1721out_free_se_dev:
1722 target_free_device(dev);
1723out_free_hba:
1724 core_delete_hba(hba);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001725 return ret;
1726}
1727
1728
1729void core_dev_release_virtual_lun0(void)
1730{
Andy Grovere3d6f902011-07-19 08:55:10 +00001731 struct se_hba *hba = lun0_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001732
Andy Grover6708bb22011-06-08 10:36:43 -07001733 if (!hba)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001734 return;
1735
Andy Grovere3d6f902011-07-19 08:55:10 +00001736 if (g_lun0_dev)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001737 target_free_device(g_lun0_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001738 core_delete_hba(hba);
1739}