blob: a4046ca6e60da85d5e0f489ae58981fd5ba45426 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
Andy Grovere3d6f902011-07-19 08:55:10 +00004 * This file contains the TCM Virtual Device and Disk Transport
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08005 * agnostic related functions.
6 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07007 * (c) Copyright 2003-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08008 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/net.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080033#include <linux/kthread.h>
34#include <linux/in.h>
Paul Gortmakerc53181a2011-08-30 18:16:43 -040035#include <linux/export.h>
Andy Grover7bfea53b2015-05-19 14:44:40 -070036#include <asm/unaligned.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080037#include <net/sock.h>
38#include <net/tcp.h>
Bart Van Asscheba929992015-05-08 10:11:12 +020039#include <scsi/scsi_common.h>
40#include <scsi/scsi_proto.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080041
42#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050043#include <target/target_core_backend.h>
44#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080045
Christoph Hellwige26d99a2011-11-14 12:30:30 -050046#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080048#include "target_core_pr.h"
49#include "target_core_ua.h"
50
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -070051DEFINE_MUTEX(g_device_mutex);
52LIST_HEAD(g_device_list);
53
Andy Grovere3d6f902011-07-19 08:55:10 +000054static struct se_hba *lun0_hba;
Andy Grovere3d6f902011-07-19 08:55:10 +000055/* not static, needed by tpg.c */
56struct se_device *g_lun0_dev;
57
Christoph Hellwigde103c92012-11-06 12:24:09 -080058sense_reason_t
Hannes Reineckef2d30682015-06-10 08:41:22 +020059transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080060{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080061 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +000062 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070063 struct se_node_acl *nacl = se_sess->se_node_acl;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070064 struct se_dev_entry *deve;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070065 sense_reason_t ret = TCM_NO_SENSE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080066
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070067 rcu_read_lock();
68 deve = target_nacl_find_deve(nacl, unpacked_lun);
69 if (deve) {
70 atomic_long_inc(&deve->total_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080071
Andy Grover5951146d2011-07-19 10:26:37 +000072 if (se_cmd->data_direction == DMA_TO_DEVICE)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070073 atomic_long_add(se_cmd->data_length,
74 &deve->write_bytes);
Andy Grover5951146d2011-07-19 10:26:37 +000075 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070076 atomic_long_add(se_cmd->data_length,
77 &deve->read_bytes);
Andy Grover5951146d2011-07-19 10:26:37 +000078
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070079 se_lun = rcu_dereference(deve->se_lun);
80 se_cmd->se_lun = rcu_dereference(deve->se_lun);
Andy Grover5951146d2011-07-19 10:26:37 +000081 se_cmd->pr_res_key = deve->pr_res_key;
82 se_cmd->orig_fe_lun = unpacked_lun;
Andy Grover5951146d2011-07-19 10:26:37 +000083 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -080084
85 percpu_ref_get(&se_lun->lun_ref);
86 se_cmd->lun_ref_active = true;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070087
88 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
Andy Grover03a68b42016-02-25 15:14:32 -080089 deve->lun_access_ro) {
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -070090 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 " Access for 0x%08llx\n",
92 se_cmd->se_tfo->get_fabric_name(),
93 unpacked_lun);
94 rcu_read_unlock();
95 ret = TCM_WRITE_PROTECTED;
96 goto ref_dev;
97 }
Andy Grover5951146d2011-07-19 10:26:37 +000098 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -070099 rcu_read_unlock();
Andy Grover5951146d2011-07-19 10:26:37 +0000100
101 if (!se_lun) {
102 /*
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
106 */
107 if (unpacked_lun != 0) {
Andy Grover6708bb22011-06-08 10:36:43 -0700108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200109 " Access for 0x%08llx\n",
Andy Grover5951146d2011-07-19 10:26:37 +0000110 se_cmd->se_tfo->get_fabric_name(),
111 unpacked_lun);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800112 return TCM_NON_EXISTENT_LUN;
Andy Grover5951146d2011-07-19 10:26:37 +0000113 }
Andy Grover5951146d2011-07-19 10:26:37 +0000114
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
116 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
Andy Grover5951146d2011-07-19 10:26:37 +0000117 se_cmd->orig_fe_lun = 0;
Andy Grover5951146d2011-07-19 10:26:37 +0000118 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellinger52777972013-11-06 21:03:43 -0800119
120 percpu_ref_get(&se_lun->lun_ref);
121 se_cmd->lun_ref_active = true;
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700122
123 /*
124 * Force WRITE PROTECT for virtual LUN 0
125 */
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 ret = TCM_WRITE_PROTECTED;
129 goto ref_dev;
130 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800131 }
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700132 /*
133 * RCU reference protected by percpu se_lun->lun_ref taken above that
134 * must drop to zero (including initial reference) before this se_lun
135 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
136 * target_core_fabric_configfs.c:target_fabric_port_release
137 */
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700138ref_dev:
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700139 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
140 atomic_long_inc(&se_cmd->se_dev->num_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800141
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800142 if (se_cmd->data_direction == DMA_TO_DEVICE)
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700143 atomic_long_add(se_cmd->data_length,
144 &se_cmd->se_dev->write_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800145 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700146 atomic_long_add(se_cmd->data_length,
147 &se_cmd->se_dev->read_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800148
Nicholas Bellinger8fa3a862015-09-16 20:23:53 -0700149 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800150}
Andy Grover5951146d2011-07-19 10:26:37 +0000151EXPORT_SYMBOL(transport_lookup_cmd_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800152
Hannes Reineckef2d30682015-06-10 08:41:22 +0200153int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800154{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800155 struct se_dev_entry *deve;
156 struct se_lun *se_lun = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +0000157 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700158 struct se_node_acl *nacl = se_sess->se_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800159 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
Roland Dreier5e1be912011-07-20 09:28:56 +0000160 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800161
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700162 rcu_read_lock();
163 deve = target_nacl_find_deve(nacl, unpacked_lun);
164 if (deve) {
165 se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
166 se_cmd->se_lun = rcu_dereference(deve->se_lun);
167 se_lun = rcu_dereference(deve->se_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800168 se_cmd->pr_res_key = deve->pr_res_key;
169 se_cmd->orig_fe_lun = unpacked_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800170 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700171 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800172
173 if (!se_lun) {
Andy Grover6708bb22011-06-08 10:36:43 -0700174 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200175 " Access for 0x%08llx\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000176 se_cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800177 unpacked_lun);
Andy Grovere3d6f902011-07-19 08:55:10 +0000178 return -ENODEV;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800179 }
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700180 /*
181 * XXX: Add percpu se_lun->lun_ref reference count for TMR
182 */
183 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
184 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
Andy Grover5951146d2011-07-19 10:26:37 +0000185
Roland Dreier5e1be912011-07-20 09:28:56 +0000186 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
Andy Grover5951146d2011-07-19 10:26:37 +0000187 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
Roland Dreier5e1be912011-07-20 09:28:56 +0000188 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800189
190 return 0;
191}
Andy Grover5951146d2011-07-19 10:26:37 +0000192EXPORT_SYMBOL(transport_lookup_tmr_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800193
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700194bool target_lun_is_rdonly(struct se_cmd *cmd)
195{
196 struct se_session *se_sess = cmd->se_sess;
197 struct se_dev_entry *deve;
198 bool ret;
199
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700200 rcu_read_lock();
201 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
Andy Grover03a68b42016-02-25 15:14:32 -0800202 ret = deve && deve->lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700203 rcu_read_unlock();
204
205 return ret;
206}
207EXPORT_SYMBOL(target_lun_is_rdonly);
208
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800209/*
210 * This function is called from core_scsi3_emulate_pro_register_and_move()
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700211 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800212 * when a matching rtpi is found.
213 */
214struct se_dev_entry *core_get_se_deve_from_rtpi(
215 struct se_node_acl *nacl,
216 u16 rtpi)
217{
218 struct se_dev_entry *deve;
219 struct se_lun *lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800220 struct se_portal_group *tpg = nacl->se_tpg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800221
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700222 rcu_read_lock();
223 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
224 lun = rcu_dereference(deve->se_lun);
Andy Grover6708bb22011-06-08 10:36:43 -0700225 if (!lun) {
226 pr_err("%s device entries device pointer is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800227 " NULL, but Initiator has access.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000228 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800229 continue;
230 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700231 if (lun->lun_rtpi != rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800232 continue;
233
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700234 kref_get(&deve->pr_kref);
235 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800236
237 return deve;
238 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700239 rcu_read_unlock();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800240
241 return NULL;
242}
243
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700244void core_free_device_list_for_node(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800245 struct se_node_acl *nacl,
246 struct se_portal_group *tpg)
247{
248 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800249
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700250 mutex_lock(&nacl->lun_entry_mutex);
251 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
252 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
253 lockdep_is_held(&nacl->lun_entry_mutex));
254 core_disable_device_list_for_node(lun, deve, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800255 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700256 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800257}
258
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800259void core_update_device_list_access(
Hannes Reineckef2d30682015-06-10 08:41:22 +0200260 u64 mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800261 bool lun_access_ro,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800262 struct se_node_acl *nacl)
263{
264 struct se_dev_entry *deve;
265
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700266 mutex_lock(&nacl->lun_entry_mutex);
267 deve = target_nacl_find_deve(nacl, mapped_lun);
Andy Grover03a68b42016-02-25 15:14:32 -0800268 if (deve)
269 deve->lun_access_ro = lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700270 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700271}
272
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700273/*
274 * Called with rcu_read_lock or nacl->device_list_lock held.
Andy Grovere80ac6c2012-07-12 17:34:58 -0700275 */
Hannes Reineckef2d30682015-06-10 08:41:22 +0200276struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700277{
278 struct se_dev_entry *deve;
279
280 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
281 if (deve->mapped_lun == mapped_lun)
282 return deve;
283
284 return NULL;
285}
286EXPORT_SYMBOL(target_nacl_find_deve);
287
288void target_pr_kref_release(struct kref *kref)
289{
290 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
291 pr_kref);
292 complete(&deve->pr_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800293}
294
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200295static void
296target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
297 bool skip_new)
298{
299 struct se_dev_entry *tmp;
300
301 rcu_read_lock();
302 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
303 if (skip_new && tmp == new)
304 continue;
305 core_scsi3_ua_allocate(tmp, 0x3F,
306 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
307 }
308 rcu_read_unlock();
309}
310
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800311int core_enable_device_list_for_node(
Andy Grovere80ac6c2012-07-12 17:34:58 -0700312 struct se_lun *lun,
313 struct se_lun_acl *lun_acl,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200314 u64 mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800315 bool lun_access_ro,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700316 struct se_node_acl *nacl,
317 struct se_portal_group *tpg)
318{
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700319 struct se_dev_entry *orig, *new;
Andy Grovere80ac6c2012-07-12 17:34:58 -0700320
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700321 new = kzalloc(sizeof(*new), GFP_KERNEL);
322 if (!new) {
323 pr_err("Unable to allocate se_dev_entry memory\n");
324 return -ENOMEM;
325 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800326
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700327 atomic_set(&new->ua_count, 0);
328 spin_lock_init(&new->ua_lock);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700329 INIT_LIST_HEAD(&new->ua_list);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700330 INIT_LIST_HEAD(&new->lun_link);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800331
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700332 new->mapped_lun = mapped_lun;
333 kref_init(&new->pr_kref);
334 init_completion(&new->pr_comp);
335
Andy Grover03a68b42016-02-25 15:14:32 -0800336 new->lun_access_ro = lun_access_ro;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700337 new->creation_time = get_jiffies_64();
338 new->attach_count++;
339
340 mutex_lock(&nacl->lun_entry_mutex);
341 orig = target_nacl_find_deve(nacl, mapped_lun);
342 if (orig && orig->se_lun) {
343 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
344 lockdep_is_held(&nacl->lun_entry_mutex));
345
346 if (orig_lun != lun) {
347 pr_err("Existing orig->se_lun doesn't match new lun"
348 " for dynamic -> explicit NodeACL conversion:"
349 " %s\n", nacl->initiatorname);
350 mutex_unlock(&nacl->lun_entry_mutex);
351 kfree(new);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800352 return -EINVAL;
353 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700354 BUG_ON(orig->se_lun_acl != NULL);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800355
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700356 rcu_assign_pointer(new->se_lun, lun);
357 rcu_assign_pointer(new->se_lun_acl, lun_acl);
358 hlist_del_rcu(&orig->link);
359 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
360 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700361
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700362 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700363 list_del(&orig->lun_link);
364 list_add_tail(&new->lun_link, &lun->lun_deve_list);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700365 spin_unlock(&lun->lun_deve_lock);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700366
367 kref_put(&orig->pr_kref, target_pr_kref_release);
368 wait_for_completion(&orig->pr_comp);
369
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200370 target_luns_data_has_changed(nacl, new, true);
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700371 kfree_rcu(orig, rcu_head);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700372 return 0;
373 }
374
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700375 rcu_assign_pointer(new->se_lun, lun);
376 rcu_assign_pointer(new->se_lun_acl, lun_acl);
377 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
378 mutex_unlock(&nacl->lun_entry_mutex);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700379
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700380 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700381 list_add_tail(&new->lun_link, &lun->lun_deve_list);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700382 spin_unlock(&lun->lun_deve_lock);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700383
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200384 target_luns_data_has_changed(nacl, new, true);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700385 return 0;
386}
387
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700388/*
389 * Called with se_node_acl->lun_entry_mutex held.
Andy Grovere80ac6c2012-07-12 17:34:58 -0700390 */
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700391void core_disable_device_list_for_node(
Andy Grovere80ac6c2012-07-12 17:34:58 -0700392 struct se_lun *lun,
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700393 struct se_dev_entry *orig,
Andy Grovere80ac6c2012-07-12 17:34:58 -0700394 struct se_node_acl *nacl,
395 struct se_portal_group *tpg)
396{
Andy Grovere80ac6c2012-07-12 17:34:58 -0700397 /*
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700398 * rcu_dereference_raw protected by se_lun->lun_group symlink
399 * reference to se_device->dev_group.
400 */
401 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
Andy Grovere80ac6c2012-07-12 17:34:58 -0700402 /*
403 * If the MappedLUN entry is being disabled, the entry in
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700404 * lun->lun_deve_list must be removed now before clearing the
Andy Grovere80ac6c2012-07-12 17:34:58 -0700405 * struct se_dev_entry pointers below as logic in
406 * core_alua_do_transition_tg_pt() depends on these being present.
407 *
408 * deve->se_lun_acl will be NULL for demo-mode created LUNs
409 * that have not been explicitly converted to MappedLUNs ->
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700410 * struct se_lun_acl, but we remove deve->lun_link from
411 * lun->lun_deve_list. This also means that active UAs and
Andy Grovere80ac6c2012-07-12 17:34:58 -0700412 * NodeACL context specific PR metadata for demo-mode
413 * MappedLUN *deve will be released below..
414 */
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700415 spin_lock(&lun->lun_deve_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700416 list_del(&orig->lun_link);
Nicholas Bellinger1adff1b2015-06-22 23:44:05 -0700417 spin_unlock(&lun->lun_deve_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800418 /*
419 * Disable struct se_dev_entry LUN ACL mapping
420 */
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700421 core_scsi3_ua_release_all(orig);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800422
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700423 hlist_del_rcu(&orig->link);
Nicholas Bellinger80bfdfa2015-03-25 01:02:57 -0700424 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
Andy Grover03a68b42016-02-25 15:14:32 -0800425 orig->lun_access_ro = false;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700426 orig->creation_time = 0;
427 orig->attach_count--;
428 /*
429 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
430 * or REGISTER_AND_MOVE PR operation to complete.
431 */
432 kref_put(&orig->pr_kref, target_pr_kref_release);
433 wait_for_completion(&orig->pr_comp);
434
Nicholas Bellinger3ccd6e82015-09-13 02:30:46 -0700435 rcu_assign_pointer(orig->se_lun, NULL);
436 rcu_assign_pointer(orig->se_lun_acl, NULL);
437
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700438 kfree_rcu(orig, rcu_head);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800439
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700440 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
Hannes Reinecke7c0d0d52015-06-11 10:01:29 +0200441 target_luns_data_has_changed(nacl, NULL, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800442}
443
444/* core_clear_lun_from_tpg():
445 *
446 *
447 */
448void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
449{
450 struct se_node_acl *nacl;
451 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800452
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000453 mutex_lock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800454 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800455
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700456 mutex_lock(&nacl->lun_entry_mutex);
457 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
458 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
459 lockdep_is_held(&nacl->lun_entry_mutex));
460
461 if (lun != tmp_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800462 continue;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800463
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700464 core_disable_device_list_for_node(lun, deve, nacl, tpg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800465 }
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700466 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800467 }
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000468 mutex_unlock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800469}
470
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700471int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800472{
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700473 struct se_lun *tmp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800474
475 spin_lock(&dev->se_port_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700476 if (dev->export_count == 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -0700477 pr_warn("Reached dev->dev_port_count =="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800478 " 0x0000ffff\n");
479 spin_unlock(&dev->se_port_lock);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700480 return -ENOSPC;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800481 }
482again:
483 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800485 * Here is the table from spc4r17 section 7.7.3.8.
486 *
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
488 *
489 * Code Description
490 * 0h Reserved
491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535
494 */
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700495 lun->lun_rtpi = dev->dev_rpti_counter++;
496 if (!lun->lun_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800497 goto again;
498
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700499 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800500 /*
Masanari Iida35d1efe2012-08-16 22:43:13 +0900501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800502 * for 16-bit wrap..
503 */
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700504 if (lun->lun_rtpi == tmp->lun_rtpi)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800505 goto again;
506 }
507 spin_unlock(&dev->se_port_lock);
508
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800509 return 0;
510}
511
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400512static void se_release_vpd_for_dev(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800513{
514 struct t10_vpd *vpd, *vpd_tmp;
515
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400516 spin_lock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800517 list_for_each_entry_safe(vpd, vpd_tmp,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400518 &dev->t10_wwn.t10_vpd_list, vpd_list) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800519 list_del(&vpd->vpd_list);
520 kfree(vpd);
521 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400522 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800523}
524
Roland Dreierc8045372012-07-16 15:17:12 -0700525static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700526{
Roland Dreier3e039892012-10-31 09:16:45 -0700527 u32 aligned_max_sectors;
528 u32 alignment;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700529 /*
530 * Limit max_sectors to a PAGE_SIZE aligned value for modern
531 * transport_allocate_data_tasks() operation.
532 */
Roland Dreier3e039892012-10-31 09:16:45 -0700533 alignment = max(1ul, PAGE_SIZE / block_size);
534 aligned_max_sectors = rounddown(max_sectors, alignment);
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700535
Roland Dreier3e039892012-10-31 09:16:45 -0700536 if (max_sectors != aligned_max_sectors)
537 pr_info("Rounding down aligned max_sectors from %u to %u\n",
538 max_sectors, aligned_max_sectors);
539
540 return aligned_max_sectors;
Nicholas Bellinger525a48a2011-08-13 02:11:38 -0700541}
542
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700543int core_dev_add_lun(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800544 struct se_portal_group *tpg,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800545 struct se_device *dev,
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700546 struct se_lun *lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800547{
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +0100548 int rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800549
Andy Grover03a68b42016-02-25 15:14:32 -0800550 rc = core_tpg_add_lun(tpg, lun, false, dev);
Sebastian Andrzej Siewior8d9efe52012-01-11 21:43:38 +0100551 if (rc < 0)
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700552 return rc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800553
Hannes Reineckef2d30682015-06-10 08:41:22 +0200554 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +0000555 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grover2af79732013-11-26 11:55:22 -0800556 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Andy Grover2dca6732012-07-12 17:34:55 -0700557 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800558 /*
559 * Update LUN maps for dynamically added initiators when
560 * generate_node_acl is enabled.
561 */
Andy Grovere3d6f902011-07-19 08:55:10 +0000562 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800563 struct se_node_acl *acl;
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000564
565 mutex_lock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800566 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
Nicholas Bellinger052605c2011-07-26 17:48:43 -0700567 if (acl->dynamic_node_acl &&
568 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
569 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
Nicholas Bellingerdf9766c2015-05-22 02:05:19 +0000570 core_tpg_add_node_to_devs(acl, tpg, lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800571 }
572 }
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000573 mutex_unlock(&tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800574 }
575
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700576 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800577}
578
579/* core_dev_del_lun():
580 *
581 *
582 */
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700583void core_dev_del_lun(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800584 struct se_portal_group *tpg,
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700585 struct se_lun *lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800586{
Hannes Reineckef2d30682015-06-10 08:41:22 +0200587 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
Andy Grovere3d6f902011-07-19 08:55:10 +0000588 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700589 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Andy Grovere3d6f902011-07-19 08:55:10 +0000590 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800591
Andy Grovercd9d7cb2014-06-30 16:39:44 -0700592 core_tpg_remove_lun(tpg, lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800593}
594
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800595struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
596 struct se_portal_group *tpg,
Nicholas Bellingerfcf29482013-02-18 18:00:33 -0800597 struct se_node_acl *nacl,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200598 u64 mapped_lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800599 int *ret)
600{
601 struct se_lun_acl *lacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800602
Nicholas Bellingerfcf29482013-02-18 18:00:33 -0800603 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -0700604 pr_err("%s InitiatorName exceeds maximum size.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000605 tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800606 *ret = -EOVERFLOW;
607 return NULL;
608 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800609 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700610 if (!lacl) {
611 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800612 *ret = -ENOMEM;
613 return NULL;
614 }
615
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800616 lacl->mapped_lun = mapped_lun;
617 lacl->se_lun_nacl = nacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800618
619 return lacl;
620}
621
622int core_dev_add_initiator_node_lun_acl(
623 struct se_portal_group *tpg,
624 struct se_lun_acl *lacl,
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700625 struct se_lun *lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800626 bool lun_access_ro)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800627{
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700628 struct se_node_acl *nacl = lacl->se_lun_nacl;
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700629 /*
630 * rcu_dereference_raw protected by se_lun->lun_group symlink
631 * reference to se_device->dev_group.
632 */
633 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800634
Andy Grover6708bb22011-06-08 10:36:43 -0700635 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800636 return -EINVAL;
637
Andy Grover03a68b42016-02-25 15:14:32 -0800638 if (lun->lun_access_ro)
639 lun_access_ro = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800640
641 lacl->se_lun = lun;
642
Andy Grovere80ac6c2012-07-12 17:34:58 -0700643 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800644 lun_access_ro, nacl, tpg) < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800645 return -EINVAL;
646
Hannes Reineckef2d30682015-06-10 08:41:22 +0200647 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
Andy Grovere3d6f902011-07-19 08:55:10 +0000648 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700649 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
Andy Grover03a68b42016-02-25 15:14:32 -0800650 lun_access_ro ? "RO" : "RW",
Chris Zankelb6a54b82015-07-20 16:29:50 -0700651 nacl->initiatorname);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800652 /*
653 * Check to see if there are any existing persistent reservation APTPL
654 * pre-registrations that need to be enabled for this LUN ACL..
655 */
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700656 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
Nicholas Bellingere2480562014-10-04 04:23:15 +0000657 lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800658 return 0;
659}
660
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800661int core_dev_del_initiator_node_lun_acl(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800662 struct se_lun *lun,
663 struct se_lun_acl *lacl)
664{
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700665 struct se_portal_group *tpg = lun->lun_tpg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800666 struct se_node_acl *nacl;
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700667 struct se_dev_entry *deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800668
669 nacl = lacl->se_lun_nacl;
Andy Grover6708bb22011-06-08 10:36:43 -0700670 if (!nacl)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800671 return -EINVAL;
672
Nicholas Bellinger29a05de2015-03-22 20:42:19 -0700673 mutex_lock(&nacl->lun_entry_mutex);
674 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
675 if (deve)
676 core_disable_device_list_for_node(lun, deve, nacl, tpg);
677 mutex_unlock(&nacl->lun_entry_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800678
Hannes Reineckef2d30682015-06-10 08:41:22 +0200679 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
680 " InitiatorNode: %s Mapped LUN: %llu\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000681 tpg->se_tpg_tfo->get_fabric_name(),
682 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
Chris Zankelb6a54b82015-07-20 16:29:50 -0700683 nacl->initiatorname, lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800684
685 return 0;
686}
687
688void core_dev_free_initiator_node_lun_acl(
689 struct se_portal_group *tpg,
690 struct se_lun_acl *lacl)
691{
Andy Grover6708bb22011-06-08 10:36:43 -0700692 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
Hannes Reineckef2d30682015-06-10 08:41:22 +0200693 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
Andy Grovere3d6f902011-07-19 08:55:10 +0000694 tpg->se_tpg_tfo->tpg_get_tag(tpg),
695 tpg->se_tpg_tfo->get_fabric_name(),
Chris Zankelb6a54b82015-07-20 16:29:50 -0700696 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800697
698 kfree(lacl);
699}
700
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400701static void scsi_dump_inquiry(struct se_device *dev)
702{
703 struct t10_wwn *wwn = &dev->t10_wwn;
704 char buf[17];
705 int i, device_type;
706 /*
707 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
708 */
709 for (i = 0; i < 8; i++)
710 if (wwn->vendor[i] >= 0x20)
711 buf[i] = wwn->vendor[i];
712 else
713 buf[i] = ' ';
714 buf[i] = '\0';
715 pr_debug(" Vendor: %s\n", buf);
716
717 for (i = 0; i < 16; i++)
718 if (wwn->model[i] >= 0x20)
719 buf[i] = wwn->model[i];
720 else
721 buf[i] = ' ';
722 buf[i] = '\0';
723 pr_debug(" Model: %s\n", buf);
724
725 for (i = 0; i < 4; i++)
726 if (wwn->revision[i] >= 0x20)
727 buf[i] = wwn->revision[i];
728 else
729 buf[i] = ' ';
730 buf[i] = '\0';
731 pr_debug(" Revision: %s\n", buf);
732
733 device_type = dev->transport->get_device_type(dev);
734 pr_debug(" Type: %s ", scsi_device_type(device_type));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400735}
736
737struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
738{
739 struct se_device *dev;
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800740 struct se_lun *xcopy_lun;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400741
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200742 dev = hba->backend->ops->alloc_device(hba, name);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400743 if (!dev)
744 return NULL;
745
Nicholas Bellinger0ff87542012-12-04 23:43:57 -0800746 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400747 dev->se_hba = hba;
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200748 dev->transport = hba->backend->ops;
Sagi Grimbergfe052a12015-06-29 13:08:19 +0300749 dev->prot_length = sizeof(struct t10_pi_tuple);
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700750 dev->hba_index = hba->hba_index;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400751
752 INIT_LIST_HEAD(&dev->dev_list);
753 INIT_LIST_HEAD(&dev->dev_sep_list);
754 INIT_LIST_HEAD(&dev->dev_tmr_list);
755 INIT_LIST_HEAD(&dev->delayed_cmd_list);
756 INIT_LIST_HEAD(&dev->state_list);
757 INIT_LIST_HEAD(&dev->qf_cmd_list);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -0700758 INIT_LIST_HEAD(&dev->g_dev_node);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400759 spin_lock_init(&dev->execute_task_lock);
760 spin_lock_init(&dev->delayed_cmd_lock);
761 spin_lock_init(&dev->dev_reservation_lock);
762 spin_lock_init(&dev->se_port_lock);
763 spin_lock_init(&dev->se_tmr_lock);
764 spin_lock_init(&dev->qf_cmd_lock);
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700765 sema_init(&dev->caw_sem, 1);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400766 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
767 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
768 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
769 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
770 spin_lock_init(&dev->t10_pr.registration_lock);
771 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
772 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
773 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
Hannes Reineckec66094b2013-12-17 09:18:49 +0100774 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
775 spin_lock_init(&dev->t10_alua.lba_map_lock);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400776
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400777 dev->t10_wwn.t10_dev = dev;
778 dev->t10_alua.t10_dev = dev;
779
780 dev->dev_attrib.da_dev = dev;
Tregaron Baylyadfa9572013-01-31 15:30:24 -0700781 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
Christoph Hellwig814e5b42015-04-20 15:00:30 +0200782 dev->dev_attrib.emulate_dpo = 1;
783 dev->dev_attrib.emulate_fua_write = 1;
784 dev->dev_attrib.emulate_fua_read = 1;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400785 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
786 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
787 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
788 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
789 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -0700790 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
Nicholas Bellingerd397a442013-08-22 14:17:20 -0700791 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +0000792 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400793 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
Nicholas Bellinger92404e62014-10-04 01:06:08 +0000794 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400795 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
796 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
797 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
798 dev->dev_attrib.max_unmap_block_desc_count =
799 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
800 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
801 dev->dev_attrib.unmap_granularity_alignment =
802 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
Jamie Pocase6f41632015-11-29 14:44:57 -0800803 dev->dev_attrib.unmap_zeroes_data =
804 DA_UNMAP_ZEROES_DATA_DEFAULT;
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800805 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400806
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800807 xcopy_lun = &dev->xcopy_lun;
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700808 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800809 init_completion(&xcopy_lun->lun_ref_comp);
Christoph Hellwigadf653f2015-05-25 21:33:08 -0700810 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
811 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
812 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
813 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
Nicholas Bellinger4863e522013-11-08 13:10:44 -0800814
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400815 return dev;
816}
817
Mike Christie8a9ebe72016-01-18 14:09:27 -0600818/*
819 * Check if the underlying struct block_device request_queue supports
820 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
821 * in ATA and we need to set TPE=1
822 */
823bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
824 struct request_queue *q, int block_size)
825{
826 if (!blk_queue_discard(q))
827 return false;
828
829 attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
830 block_size;
831 /*
832 * Currently hardcoded to 1 in Linux/SCSI code..
833 */
834 attrib->max_unmap_block_desc_count = 1;
835 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
836 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
837 block_size;
838 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
839 return true;
840}
841EXPORT_SYMBOL(target_configure_unmap_from_queue);
842
843/*
844 * Convert from blocksize advertised to the initiator to the 512 byte
845 * units unconditionally used by the Linux block layer.
846 */
847sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
848{
849 switch (dev->dev_attrib.block_size) {
850 case 4096:
851 return lb << 3;
852 case 2048:
853 return lb << 2;
854 case 1024:
855 return lb << 1;
856 default:
857 return lb;
858 }
859}
860EXPORT_SYMBOL(target_to_linux_sector);
861
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400862int target_configure_device(struct se_device *dev)
863{
864 struct se_hba *hba = dev->se_hba;
865 int ret;
866
867 if (dev->dev_flags & DF_CONFIGURED) {
868 pr_err("se_dev->se_dev_ptr already set for storage"
869 " object\n");
870 return -EEXIST;
871 }
872
873 ret = dev->transport->configure_device(dev);
874 if (ret)
875 goto out;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400876 /*
877 * XXX: there is not much point to have two different values here..
878 */
879 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
880 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
881
882 /*
883 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
884 */
885 dev->dev_attrib.hw_max_sectors =
886 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
887 dev->dev_attrib.hw_block_size);
Nicholas Bellinger046ba642015-01-06 16:10:37 -0800888 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400889
890 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
891 dev->creation_time = get_jiffies_64();
892
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400893 ret = core_setup_alua(dev);
894 if (ret)
895 goto out;
896
897 /*
898 * Startup the struct se_device processing thread
899 */
900 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
901 dev->transport->name);
902 if (!dev->tmr_wq) {
903 pr_err("Unable to create tmr workqueue for %s\n",
904 dev->transport->name);
905 ret = -ENOMEM;
906 goto out_free_alua;
907 }
908
909 /*
910 * Setup work_queue for QUEUE_FULL
911 */
912 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
913
914 /*
915 * Preload the initial INQUIRY const values if we are doing
916 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
917 * passthrough because this is being provided by the backend LLD.
918 */
Andy Grovera3541702015-05-19 14:44:41 -0700919 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400920 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
921 strncpy(&dev->t10_wwn.model[0],
922 dev->transport->inquiry_prod, 16);
923 strncpy(&dev->t10_wwn.revision[0],
924 dev->transport->inquiry_rev, 4);
925 }
926
927 scsi_dump_inquiry(dev);
928
929 spin_lock(&hba->device_lock);
930 hba->dev_count++;
931 spin_unlock(&hba->device_lock);
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -0700932
933 mutex_lock(&g_device_mutex);
934 list_add_tail(&dev->g_dev_node, &g_device_list);
935 mutex_unlock(&g_device_mutex);
936
Nicholas Bellinger5f7da042015-03-05 03:28:24 +0000937 dev->dev_flags |= DF_CONFIGURED;
938
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400939 return 0;
940
941out_free_alua:
942 core_alua_free_lu_gp_mem(dev);
943out:
944 se_release_vpd_for_dev(dev);
945 return ret;
946}
947
948void target_free_device(struct se_device *dev)
949{
950 struct se_hba *hba = dev->se_hba;
951
952 WARN_ON(!list_empty(&dev->dev_sep_list));
953
954 if (dev->dev_flags & DF_CONFIGURED) {
955 destroy_workqueue(dev->tmr_wq);
956
Nicholas Bellingerd9ea32b2013-08-22 11:33:37 -0700957 mutex_lock(&g_device_mutex);
958 list_del(&dev->g_dev_node);
959 mutex_unlock(&g_device_mutex);
960
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400961 spin_lock(&hba->device_lock);
962 hba->dev_count--;
963 spin_unlock(&hba->device_lock);
964 }
965
966 core_alua_free_lu_gp_mem(dev);
Hannes Reinecke229d4f12013-12-17 09:18:50 +0100967 core_alua_set_lba_map(dev, NULL, 0, 0);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400968 core_scsi3_free_all_registrations(dev);
969 se_release_vpd_for_dev(dev);
970
Nicholas Bellinger2ed22c92014-01-08 18:19:31 +0000971 if (dev->transport->free_prot)
972 dev->transport->free_prot(dev);
973
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400974 dev->transport->free_device(dev);
975}
976
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800977int core_dev_setup_virtual_lun0(void)
978{
979 struct se_hba *hba;
980 struct se_device *dev;
Andy Groverdb5d1c32013-05-28 16:55:20 -0700981 char buf[] = "rd_pages=8,rd_nullio=1";
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800982 int ret;
983
Andy Grover6708bb22011-06-08 10:36:43 -0700984 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800985 if (IS_ERR(hba))
986 return PTR_ERR(hba);
987
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400988 dev = target_alloc_device(hba, "virt_lun0");
989 if (!dev) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800990 ret = -ENOMEM;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400991 goto out_free_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800992 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800993
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200994 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800995
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400996 ret = target_configure_device(dev);
997 if (ret)
998 goto out_free_se_dev;
999
1000 lun0_hba = hba;
Andy Grovere3d6f902011-07-19 08:55:10 +00001001 g_lun0_dev = dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001002 return 0;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001003
1004out_free_se_dev:
1005 target_free_device(dev);
1006out_free_hba:
1007 core_delete_hba(hba);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001008 return ret;
1009}
1010
1011
1012void core_dev_release_virtual_lun0(void)
1013{
Andy Grovere3d6f902011-07-19 08:55:10 +00001014 struct se_hba *hba = lun0_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001015
Andy Grover6708bb22011-06-08 10:36:43 -07001016 if (!hba)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001017 return;
1018
Andy Grovere3d6f902011-07-19 08:55:10 +00001019 if (g_lun0_dev)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001020 target_free_device(g_lun0_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001021 core_delete_hba(hba);
1022}
Andy Grover7bfea53b2015-05-19 14:44:40 -07001023
1024/*
1025 * Common CDB parsing for kernel and user passthrough.
1026 */
1027sense_reason_t
1028passthrough_parse_cdb(struct se_cmd *cmd,
1029 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1030{
1031 unsigned char *cdb = cmd->t_task_cdb;
1032
1033 /*
1034 * Clear a lun set in the cdb if the initiator talking to use spoke
1035 * and old standards version, as we can't assume the underlying device
1036 * won't choke up on it.
1037 */
1038 switch (cdb[0]) {
1039 case READ_10: /* SBC - RDProtect */
1040 case READ_12: /* SBC - RDProtect */
1041 case READ_16: /* SBC - RDProtect */
1042 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1043 case VERIFY: /* SBC - VRProtect */
1044 case VERIFY_16: /* SBC - VRProtect */
1045 case WRITE_VERIFY: /* SBC - VRProtect */
1046 case WRITE_VERIFY_12: /* SBC - VRProtect */
1047 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1048 break;
1049 default:
1050 cdb[1] &= 0x1f; /* clear logical unit number */
1051 break;
1052 }
1053
1054 /*
1055 * For REPORT LUNS we always need to emulate the response, for everything
1056 * else, pass it up.
1057 */
1058 if (cdb[0] == REPORT_LUNS) {
1059 cmd->execute_cmd = spc_emulate_report_luns;
1060 return TCM_NO_SENSE;
1061 }
1062
1063 /* Set DATA_CDB flag for ops that should have it */
1064 switch (cdb[0]) {
1065 case READ_6:
1066 case READ_10:
1067 case READ_12:
1068 case READ_16:
1069 case WRITE_6:
1070 case WRITE_10:
1071 case WRITE_12:
1072 case WRITE_16:
1073 case WRITE_VERIFY:
1074 case WRITE_VERIFY_12:
1075 case 0x8e: /* WRITE_VERIFY_16 */
1076 case COMPARE_AND_WRITE:
1077 case XDWRITEREAD_10:
1078 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1079 break;
1080 case VARIABLE_LENGTH_CMD:
1081 switch (get_unaligned_be16(&cdb[8])) {
1082 case READ_32:
1083 case WRITE_32:
1084 case 0x0c: /* WRITE_VERIFY_32 */
1085 case XDWRITEREAD_32:
1086 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1087 break;
1088 }
1089 }
1090
1091 cmd->execute_cmd = exec_cmd;
1092
1093 return TCM_NO_SENSE;
1094}
1095EXPORT_SYMBOL(passthrough_parse_cdb);