blob: dbf91f02ee5ceedcc9e8b9679b4b832ee229cd4d [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_configfs.c
3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07006 * (c) Copyright 2008-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08007 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080025#include <generated/utsrelease.h>
26#include <linux/utsname.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <linux/namei.h>
30#include <linux/slab.h>
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/unistd.h>
34#include <linux/string.h>
35#include <linux/parser.h>
36#include <linux/syscalls.h>
37#include <linux/configfs.h>
Andy Grovere3d6f902011-07-19 08:55:10 +000038#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080039
40#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050041#include <target/target_core_backend.h>
42#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080043#include <target/target_core_fabric_configfs.h>
44#include <target/target_core_configfs.h>
45#include <target/configfs_macros.h>
46
Christoph Hellwige26d99a2011-11-14 12:30:30 -050047#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080048#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080049#include "target_core_pr.h"
50#include "target_core_rd.h"
Nicholas Bellingerf99715a2013-08-22 12:48:53 -070051#include "target_core_xcopy.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080052
Nicholas Bellinger73112ed2014-11-27 13:59:20 -080053#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
Christoph Hellwig0a06d432015-05-10 18:14:56 +020054static void target_core_setup_##_name##_cit(struct target_backend *tb) \
Nicholas Bellinger73112ed2014-11-27 13:59:20 -080055{ \
Christoph Hellwig0a06d432015-05-10 18:14:56 +020056 struct config_item_type *cit = &tb->tb_##_name##_cit; \
Nicholas Bellinger73112ed2014-11-27 13:59:20 -080057 \
58 cit->ct_item_ops = _item_ops; \
59 cit->ct_group_ops = _group_ops; \
60 cit->ct_attrs = _attrs; \
Christoph Hellwig0a06d432015-05-10 18:14:56 +020061 cit->ct_owner = tb->ops->owner; \
62 pr_debug("Setup generic %s\n", __stringify(_name)); \
63}
64
65#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
66static void target_core_setup_##_name##_cit(struct target_backend *tb) \
67{ \
68 struct config_item_type *cit = &tb->tb_##_name##_cit; \
69 \
70 cit->ct_item_ops = _item_ops; \
71 cit->ct_group_ops = _group_ops; \
72 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
73 cit->ct_owner = tb->ops->owner; \
Nicholas Bellinger73112ed2014-11-27 13:59:20 -080074 pr_debug("Setup generic %s\n", __stringify(_name)); \
75}
76
Andy Grovere3d6f902011-07-19 08:55:10 +000077extern struct t10_alua_lu_gp *default_lu_gp;
78
Roland Dreierd0f474e2012-01-12 10:41:18 -080079static LIST_HEAD(g_tf_list);
80static DEFINE_MUTEX(g_tf_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080081
82struct target_core_configfs_attribute {
83 struct configfs_attribute attr;
84 ssize_t (*show)(void *, char *);
85 ssize_t (*store)(void *, const char *, size_t);
86};
87
Andy Grovere3d6f902011-07-19 08:55:10 +000088static struct config_group target_core_hbagroup;
89static struct config_group alua_group;
90static struct config_group alua_lu_gps_group;
91
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080092static inline struct se_hba *
93item_to_hba(struct config_item *item)
94{
95 return container_of(to_config_group(item), struct se_hba, hba_group);
96}
97
98/*
99 * Attributes for /sys/kernel/config/target/
100 */
101static ssize_t target_core_attr_show(struct config_item *item,
102 struct configfs_attribute *attr,
103 char *page)
104{
105 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
106 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
107 utsname()->sysname, utsname()->machine);
108}
109
110static struct configfs_item_operations target_core_fabric_item_ops = {
111 .show_attribute = target_core_attr_show,
112};
113
114static struct configfs_attribute target_core_item_attr_version = {
115 .ca_owner = THIS_MODULE,
116 .ca_name = "version",
117 .ca_mode = S_IRUGO,
118};
119
120static struct target_fabric_configfs *target_core_get_fabric(
121 const char *name)
122{
123 struct target_fabric_configfs *tf;
124
Andy Grover6708bb22011-06-08 10:36:43 -0700125 if (!name)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800126 return NULL;
127
128 mutex_lock(&g_tf_lock);
129 list_for_each_entry(tf, &g_tf_list, tf_list) {
Christoph Hellwig0dc2e8d2015-05-03 08:50:54 +0200130 if (!strcmp(tf->tf_ops->name, name)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800131 atomic_inc(&tf->tf_access_cnt);
132 mutex_unlock(&g_tf_lock);
133 return tf;
134 }
135 }
136 mutex_unlock(&g_tf_lock);
137
138 return NULL;
139}
140
141/*
142 * Called from struct target_core_group_ops->make_group()
143 */
144static struct config_group *target_core_register_fabric(
145 struct config_group *group,
146 const char *name)
147{
148 struct target_fabric_configfs *tf;
149 int ret;
150
Andy Grover6708bb22011-06-08 10:36:43 -0700151 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800152 " %s\n", group, name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800153
154 tf = target_core_get_fabric(name);
Andy Grover6708bb22011-06-08 10:36:43 -0700155 if (!tf) {
Nicholas Bellinger62554912015-03-20 11:36:50 -0700156 pr_debug("target_core_register_fabric() trying autoload for %s\n",
157 name);
Roland Dreiere7b7af62014-11-14 12:54:36 -0800158
159 /*
160 * Below are some hardcoded request_module() calls to automatically
161 * local fabric modules when the following is called:
162 *
163 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
164 *
165 * Note that this does not limit which TCM fabric module can be
166 * registered, but simply provids auto loading logic for modules with
167 * mkdir(2) system calls with known TCM fabric modules.
168 */
169
170 if (!strncmp(name, "iscsi", 5)) {
171 /*
172 * Automatically load the LIO Target fabric module when the
173 * following is called:
174 *
175 * mkdir -p $CONFIGFS/target/iscsi
176 */
177 ret = request_module("iscsi_target_mod");
178 if (ret < 0) {
Nicholas Bellinger62554912015-03-20 11:36:50 -0700179 pr_debug("request_module() failed for"
180 " iscsi_target_mod.ko: %d\n", ret);
Roland Dreiere7b7af62014-11-14 12:54:36 -0800181 return ERR_PTR(-EINVAL);
182 }
183 } else if (!strncmp(name, "loopback", 8)) {
184 /*
185 * Automatically load the tcm_loop fabric module when the
186 * following is called:
187 *
188 * mkdir -p $CONFIGFS/target/loopback
189 */
190 ret = request_module("tcm_loop");
191 if (ret < 0) {
Nicholas Bellinger62554912015-03-20 11:36:50 -0700192 pr_debug("request_module() failed for"
193 " tcm_loop.ko: %d\n", ret);
Roland Dreiere7b7af62014-11-14 12:54:36 -0800194 return ERR_PTR(-EINVAL);
195 }
196 }
197
198 tf = target_core_get_fabric(name);
199 }
200
201 if (!tf) {
Nicholas Bellinger62554912015-03-20 11:36:50 -0700202 pr_debug("target_core_get_fabric() failed for %s\n",
203 name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800204 return ERR_PTR(-EINVAL);
205 }
Andy Grover6708bb22011-06-08 10:36:43 -0700206 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
Christoph Hellwig0dc2e8d2015-05-03 08:50:54 +0200207 " %s\n", tf->tf_ops->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800208 /*
209 * On a successful target_core_get_fabric() look, the returned
210 * struct target_fabric_configfs *tf will contain a usage reference.
211 */
Andy Grover6708bb22011-06-08 10:36:43 -0700212 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
Christoph Hellwig968ebe72015-05-03 08:50:55 +0200213 &tf->tf_wwn_cit);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800214
215 tf->tf_group.default_groups = tf->tf_default_groups;
216 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
217 tf->tf_group.default_groups[1] = NULL;
218
Christoph Hellwig968ebe72015-05-03 08:50:55 +0200219 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800220 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
Christoph Hellwig968ebe72015-05-03 08:50:55 +0200221 &tf->tf_discovery_cit);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800222
Andy Grover6708bb22011-06-08 10:36:43 -0700223 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800224 " %s\n", tf->tf_group.cg_item.ci_name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800225 return &tf->tf_group;
226}
227
228/*
229 * Called from struct target_core_group_ops->drop_item()
230 */
231static void target_core_deregister_fabric(
232 struct config_group *group,
233 struct config_item *item)
234{
235 struct target_fabric_configfs *tf = container_of(
236 to_config_group(item), struct target_fabric_configfs, tf_group);
237 struct config_group *tf_group;
238 struct config_item *df_item;
239 int i;
240
Andy Grover6708bb22011-06-08 10:36:43 -0700241 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800242 " tf list\n", config_item_name(item));
243
Andy Grover6708bb22011-06-08 10:36:43 -0700244 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
Christoph Hellwig0dc2e8d2015-05-03 08:50:54 +0200245 " %s\n", tf->tf_ops->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800246 atomic_dec(&tf->tf_access_cnt);
247
Andy Grover6708bb22011-06-08 10:36:43 -0700248 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800249 " %s\n", config_item_name(item));
250
251 tf_group = &tf->tf_group;
252 for (i = 0; tf_group->default_groups[i]; i++) {
253 df_item = &tf_group->default_groups[i]->cg_item;
254 tf_group->default_groups[i] = NULL;
255 config_item_put(df_item);
256 }
257 config_item_put(item);
258}
259
260static struct configfs_group_operations target_core_fabric_group_ops = {
261 .make_group = &target_core_register_fabric,
262 .drop_item = &target_core_deregister_fabric,
263};
264
265/*
266 * All item attributes appearing in /sys/kernel/target/ appear here.
267 */
268static struct configfs_attribute *target_core_fabric_item_attrs[] = {
269 &target_core_item_attr_version,
270 NULL,
271};
272
273/*
274 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
275 */
276static struct config_item_type target_core_fabrics_item = {
277 .ct_item_ops = &target_core_fabric_item_ops,
278 .ct_group_ops = &target_core_fabric_group_ops,
279 .ct_attrs = target_core_fabric_item_attrs,
280 .ct_owner = THIS_MODULE,
281};
282
283static struct configfs_subsystem target_core_fabrics = {
284 .su_group = {
285 .cg_item = {
286 .ci_namebuf = "target",
287 .ci_type = &target_core_fabrics_item,
288 },
289 },
290};
291
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200292int target_depend_item(struct config_item *item)
293{
294 return configfs_depend_item(&target_core_fabrics, item);
295}
296EXPORT_SYMBOL(target_depend_item);
297
298void target_undepend_item(struct config_item *item)
299{
300 return configfs_undepend_item(&target_core_fabrics, item);
301}
302EXPORT_SYMBOL(target_undepend_item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800303
304/*##############################################################################
305// Start functions called by external Target Fabrics Modules
306//############################################################################*/
307
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200308static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800309{
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200310 if (!tfo->name) {
311 pr_err("Missing tfo->name\n");
312 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800313 }
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200314 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
Andy Grover6708bb22011-06-08 10:36:43 -0700315 pr_err("Passed name: %s exceeds TARGET_FABRIC"
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200316 "_NAME_SIZE\n", tfo->name);
317 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800318 }
Andy Grover6708bb22011-06-08 10:36:43 -0700319 if (!tfo->get_fabric_name) {
320 pr_err("Missing tfo->get_fabric_name()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800321 return -EINVAL;
322 }
Andy Grover6708bb22011-06-08 10:36:43 -0700323 if (!tfo->tpg_get_wwn) {
324 pr_err("Missing tfo->tpg_get_wwn()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800325 return -EINVAL;
326 }
Andy Grover6708bb22011-06-08 10:36:43 -0700327 if (!tfo->tpg_get_tag) {
328 pr_err("Missing tfo->tpg_get_tag()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800329 return -EINVAL;
330 }
Andy Grover6708bb22011-06-08 10:36:43 -0700331 if (!tfo->tpg_check_demo_mode) {
332 pr_err("Missing tfo->tpg_check_demo_mode()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800333 return -EINVAL;
334 }
Andy Grover6708bb22011-06-08 10:36:43 -0700335 if (!tfo->tpg_check_demo_mode_cache) {
336 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800337 return -EINVAL;
338 }
Andy Grover6708bb22011-06-08 10:36:43 -0700339 if (!tfo->tpg_check_demo_mode_write_protect) {
340 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800341 return -EINVAL;
342 }
Andy Grover6708bb22011-06-08 10:36:43 -0700343 if (!tfo->tpg_check_prod_mode_write_protect) {
344 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800345 return -EINVAL;
346 }
Andy Grover6708bb22011-06-08 10:36:43 -0700347 if (!tfo->tpg_get_inst_index) {
348 pr_err("Missing tfo->tpg_get_inst_index()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800349 return -EINVAL;
350 }
Christoph Hellwig35462972011-05-31 23:56:57 -0400351 if (!tfo->release_cmd) {
Andy Grover6708bb22011-06-08 10:36:43 -0700352 pr_err("Missing tfo->release_cmd()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800353 return -EINVAL;
354 }
Andy Grover6708bb22011-06-08 10:36:43 -0700355 if (!tfo->shutdown_session) {
356 pr_err("Missing tfo->shutdown_session()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800357 return -EINVAL;
358 }
Andy Grover6708bb22011-06-08 10:36:43 -0700359 if (!tfo->close_session) {
360 pr_err("Missing tfo->close_session()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800361 return -EINVAL;
362 }
Andy Grover6708bb22011-06-08 10:36:43 -0700363 if (!tfo->sess_get_index) {
364 pr_err("Missing tfo->sess_get_index()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800365 return -EINVAL;
366 }
Andy Grover6708bb22011-06-08 10:36:43 -0700367 if (!tfo->write_pending) {
368 pr_err("Missing tfo->write_pending()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800369 return -EINVAL;
370 }
Andy Grover6708bb22011-06-08 10:36:43 -0700371 if (!tfo->write_pending_status) {
372 pr_err("Missing tfo->write_pending_status()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800373 return -EINVAL;
374 }
Andy Grover6708bb22011-06-08 10:36:43 -0700375 if (!tfo->set_default_node_attributes) {
376 pr_err("Missing tfo->set_default_node_attributes()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800377 return -EINVAL;
378 }
Andy Grover6708bb22011-06-08 10:36:43 -0700379 if (!tfo->get_cmd_state) {
380 pr_err("Missing tfo->get_cmd_state()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800381 return -EINVAL;
382 }
Andy Grover6708bb22011-06-08 10:36:43 -0700383 if (!tfo->queue_data_in) {
384 pr_err("Missing tfo->queue_data_in()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800385 return -EINVAL;
386 }
Andy Grover6708bb22011-06-08 10:36:43 -0700387 if (!tfo->queue_status) {
388 pr_err("Missing tfo->queue_status()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800389 return -EINVAL;
390 }
Andy Grover6708bb22011-06-08 10:36:43 -0700391 if (!tfo->queue_tm_rsp) {
392 pr_err("Missing tfo->queue_tm_rsp()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800393 return -EINVAL;
394 }
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -0700395 if (!tfo->aborted_task) {
396 pr_err("Missing tfo->aborted_task()\n");
397 return -EINVAL;
398 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800399 /*
400 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
401 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
402 * target_core_fabric_configfs.c WWN+TPG group context code.
403 */
Andy Grover6708bb22011-06-08 10:36:43 -0700404 if (!tfo->fabric_make_wwn) {
405 pr_err("Missing tfo->fabric_make_wwn()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800406 return -EINVAL;
407 }
Andy Grover6708bb22011-06-08 10:36:43 -0700408 if (!tfo->fabric_drop_wwn) {
409 pr_err("Missing tfo->fabric_drop_wwn()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800410 return -EINVAL;
411 }
Andy Grover6708bb22011-06-08 10:36:43 -0700412 if (!tfo->fabric_make_tpg) {
413 pr_err("Missing tfo->fabric_make_tpg()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800414 return -EINVAL;
415 }
Andy Grover6708bb22011-06-08 10:36:43 -0700416 if (!tfo->fabric_drop_tpg) {
417 pr_err("Missing tfo->fabric_drop_tpg()\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800418 return -EINVAL;
419 }
420
421 return 0;
422}
423
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200424int target_register_template(const struct target_core_fabric_ops *fo)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800425{
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200426 struct target_fabric_configfs *tf;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800427 int ret;
428
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200429 ret = target_fabric_tf_ops_check(fo);
430 if (ret)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800431 return ret;
432
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200433 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700434 if (!tf) {
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200435 pr_err("%s: could not allocate memory!\n", __func__);
436 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800437 }
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200438
439 INIT_LIST_HEAD(&tf->tf_list);
440 atomic_set(&tf->tf_access_cnt, 0);
Christoph Hellwigef0caf82015-05-03 08:50:53 +0200441 tf->tf_ops = fo;
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200442 target_fabric_setup_cits(tf);
443
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800444 mutex_lock(&g_tf_lock);
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200445 list_add_tail(&tf->tf_list, &g_tf_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800446 mutex_unlock(&g_tf_lock);
447
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200448 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800449}
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200450EXPORT_SYMBOL(target_register_template);
451
452void target_unregister_template(const struct target_core_fabric_ops *fo)
453{
454 struct target_fabric_configfs *t;
455
456 mutex_lock(&g_tf_lock);
457 list_for_each_entry(t, &g_tf_list, tf_list) {
Christoph Hellwig0dc2e8d2015-05-03 08:50:54 +0200458 if (!strcmp(t->tf_ops->name, fo->name)) {
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200459 BUG_ON(atomic_read(&t->tf_access_cnt));
460 list_del(&t->tf_list);
461 kfree(t);
462 break;
463 }
464 }
465 mutex_unlock(&g_tf_lock);
466}
467EXPORT_SYMBOL(target_unregister_template);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800468
469/*##############################################################################
470// Stop functions called by external Target Fabrics Modules
471//############################################################################*/
472
Nicholas Bellingerf79a8972014-11-27 14:51:14 -0800473/* Start functions for struct config_item_type tb_dev_attrib_cit */
Christoph Hellwig5873c4d2015-05-10 18:14:57 +0200474#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
475static ssize_t _backend##_dev_show_attr_##_name( \
476 struct se_dev_attrib *da, \
477 char *page) \
478{ \
479 return snprintf(page, PAGE_SIZE, "%u\n", \
480 (u32)da->da_dev->dev_attrib._name); \
481}
482
483#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
484static ssize_t _backend##_dev_store_attr_##_name( \
485 struct se_dev_attrib *da, \
486 const char *page, \
487 size_t count) \
488{ \
489 unsigned long val; \
490 int ret; \
491 \
492 ret = kstrtoul(page, 0, &val); \
493 if (ret < 0) { \
494 pr_err("kstrtoul() failed with ret: %d\n", ret); \
495 return -EINVAL; \
496 } \
497 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
498 \
499 return (!ret) ? count : -EINVAL; \
500}
501
502#define DEF_TB_DEV_ATTRIB(_backend, _name) \
503DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
504DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
505
506#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
507DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
508
509CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
510#define TB_DEV_ATTR(_backend, _name, _mode) \
511static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
512 __CONFIGFS_EATTR(_name, _mode, \
513 _backend##_dev_show_attr_##_name, \
514 _backend##_dev_store_attr_##_name);
515
516#define TB_DEV_ATTR_RO(_backend, _name) \
517static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
518 __CONFIGFS_EATTR_RO(_name, \
519 _backend##_dev_show_attr_##_name);
520
521DEF_TB_DEV_ATTRIB(target_core, emulate_model_alias);
522DEF_TB_DEV_ATTRIB(target_core, emulate_dpo);
523DEF_TB_DEV_ATTRIB(target_core, emulate_fua_write);
524DEF_TB_DEV_ATTRIB(target_core, emulate_fua_read);
525DEF_TB_DEV_ATTRIB(target_core, emulate_write_cache);
526DEF_TB_DEV_ATTRIB(target_core, emulate_ua_intlck_ctrl);
527DEF_TB_DEV_ATTRIB(target_core, emulate_tas);
528DEF_TB_DEV_ATTRIB(target_core, emulate_tpu);
529DEF_TB_DEV_ATTRIB(target_core, emulate_tpws);
530DEF_TB_DEV_ATTRIB(target_core, emulate_caw);
531DEF_TB_DEV_ATTRIB(target_core, emulate_3pc);
532DEF_TB_DEV_ATTRIB(target_core, pi_prot_type);
533DEF_TB_DEV_ATTRIB_RO(target_core, hw_pi_prot_type);
534DEF_TB_DEV_ATTRIB(target_core, pi_prot_format);
535DEF_TB_DEV_ATTRIB(target_core, enforce_pr_isids);
536DEF_TB_DEV_ATTRIB(target_core, is_nonrot);
537DEF_TB_DEV_ATTRIB(target_core, emulate_rest_reord);
538DEF_TB_DEV_ATTRIB(target_core, force_pr_aptpl);
539DEF_TB_DEV_ATTRIB_RO(target_core, hw_block_size);
540DEF_TB_DEV_ATTRIB(target_core, block_size);
541DEF_TB_DEV_ATTRIB_RO(target_core, hw_max_sectors);
542DEF_TB_DEV_ATTRIB(target_core, optimal_sectors);
543DEF_TB_DEV_ATTRIB_RO(target_core, hw_queue_depth);
544DEF_TB_DEV_ATTRIB(target_core, queue_depth);
545DEF_TB_DEV_ATTRIB(target_core, max_unmap_lba_count);
546DEF_TB_DEV_ATTRIB(target_core, max_unmap_block_desc_count);
547DEF_TB_DEV_ATTRIB(target_core, unmap_granularity);
548DEF_TB_DEV_ATTRIB(target_core, unmap_granularity_alignment);
549DEF_TB_DEV_ATTRIB(target_core, max_write_same_len);
550
551TB_DEV_ATTR(target_core, emulate_model_alias, S_IRUGO | S_IWUSR);
552TB_DEV_ATTR(target_core, emulate_dpo, S_IRUGO | S_IWUSR);
553TB_DEV_ATTR(target_core, emulate_fua_write, S_IRUGO | S_IWUSR);
554TB_DEV_ATTR(target_core, emulate_fua_read, S_IRUGO | S_IWUSR);
555TB_DEV_ATTR(target_core, emulate_write_cache, S_IRUGO | S_IWUSR);
556TB_DEV_ATTR(target_core, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
557TB_DEV_ATTR(target_core, emulate_tas, S_IRUGO | S_IWUSR);
558TB_DEV_ATTR(target_core, emulate_tpu, S_IRUGO | S_IWUSR);
559TB_DEV_ATTR(target_core, emulate_tpws, S_IRUGO | S_IWUSR);
560TB_DEV_ATTR(target_core, emulate_caw, S_IRUGO | S_IWUSR);
561TB_DEV_ATTR(target_core, emulate_3pc, S_IRUGO | S_IWUSR);
562TB_DEV_ATTR(target_core, pi_prot_type, S_IRUGO | S_IWUSR);
563TB_DEV_ATTR_RO(target_core, hw_pi_prot_type);
564TB_DEV_ATTR(target_core, pi_prot_format, S_IRUGO | S_IWUSR);
565TB_DEV_ATTR(target_core, enforce_pr_isids, S_IRUGO | S_IWUSR);
566TB_DEV_ATTR(target_core, is_nonrot, S_IRUGO | S_IWUSR);
567TB_DEV_ATTR(target_core, emulate_rest_reord, S_IRUGO | S_IWUSR);
568TB_DEV_ATTR(target_core, force_pr_aptpl, S_IRUGO | S_IWUSR)
569TB_DEV_ATTR_RO(target_core, hw_block_size);
570TB_DEV_ATTR(target_core, block_size, S_IRUGO | S_IWUSR)
571TB_DEV_ATTR_RO(target_core, hw_max_sectors);
572TB_DEV_ATTR(target_core, optimal_sectors, S_IRUGO | S_IWUSR);
573TB_DEV_ATTR_RO(target_core, hw_queue_depth);
574TB_DEV_ATTR(target_core, queue_depth, S_IRUGO | S_IWUSR);
575TB_DEV_ATTR(target_core, max_unmap_lba_count, S_IRUGO | S_IWUSR);
576TB_DEV_ATTR(target_core, max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
577TB_DEV_ATTR(target_core, unmap_granularity, S_IRUGO | S_IWUSR);
578TB_DEV_ATTR(target_core, unmap_granularity_alignment, S_IRUGO | S_IWUSR);
579TB_DEV_ATTR(target_core, max_write_same_len, S_IRUGO | S_IWUSR);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800580
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800581CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800582CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
583
Christoph Hellwig5873c4d2015-05-10 18:14:57 +0200584/*
585 * dev_attrib attributes for devices using the target core SBC/SPC
586 * interpreter. Any backend using spc_parse_cdb should be using
587 * these.
588 */
589struct configfs_attribute *sbc_attrib_attrs[] = {
590 &target_core_dev_attrib_emulate_model_alias.attr,
591 &target_core_dev_attrib_emulate_dpo.attr,
592 &target_core_dev_attrib_emulate_fua_write.attr,
593 &target_core_dev_attrib_emulate_fua_read.attr,
594 &target_core_dev_attrib_emulate_write_cache.attr,
595 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
596 &target_core_dev_attrib_emulate_tas.attr,
597 &target_core_dev_attrib_emulate_tpu.attr,
598 &target_core_dev_attrib_emulate_tpws.attr,
599 &target_core_dev_attrib_emulate_caw.attr,
600 &target_core_dev_attrib_emulate_3pc.attr,
601 &target_core_dev_attrib_pi_prot_type.attr,
602 &target_core_dev_attrib_hw_pi_prot_type.attr,
603 &target_core_dev_attrib_pi_prot_format.attr,
604 &target_core_dev_attrib_enforce_pr_isids.attr,
605 &target_core_dev_attrib_is_nonrot.attr,
606 &target_core_dev_attrib_emulate_rest_reord.attr,
607 &target_core_dev_attrib_force_pr_aptpl.attr,
608 &target_core_dev_attrib_hw_block_size.attr,
609 &target_core_dev_attrib_block_size.attr,
610 &target_core_dev_attrib_hw_max_sectors.attr,
611 &target_core_dev_attrib_optimal_sectors.attr,
612 &target_core_dev_attrib_hw_queue_depth.attr,
613 &target_core_dev_attrib_queue_depth.attr,
614 &target_core_dev_attrib_max_unmap_lba_count.attr,
615 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
616 &target_core_dev_attrib_unmap_granularity.attr,
617 &target_core_dev_attrib_unmap_granularity_alignment.attr,
618 &target_core_dev_attrib_max_write_same_len.attr,
619 NULL,
620};
621EXPORT_SYMBOL(sbc_attrib_attrs);
622
623DEF_TB_DEV_ATTRIB_RO(target_pt, hw_pi_prot_type);
624DEF_TB_DEV_ATTRIB_RO(target_pt, hw_block_size);
625DEF_TB_DEV_ATTRIB_RO(target_pt, hw_max_sectors);
626DEF_TB_DEV_ATTRIB_RO(target_pt, hw_queue_depth);
627
628TB_DEV_ATTR_RO(target_pt, hw_pi_prot_type);
629TB_DEV_ATTR_RO(target_pt, hw_block_size);
630TB_DEV_ATTR_RO(target_pt, hw_max_sectors);
631TB_DEV_ATTR_RO(target_pt, hw_queue_depth);
632
633/*
634 * Minimal dev_attrib attributes for devices passing through CDBs.
635 * In this case we only provide a few read-only attributes for
636 * backwards compatibility.
637 */
638struct configfs_attribute *passthrough_attrib_attrs[] = {
639 &target_pt_dev_attrib_hw_pi_prot_type.attr,
640 &target_pt_dev_attrib_hw_block_size.attr,
641 &target_pt_dev_attrib_hw_max_sectors.attr,
642 &target_pt_dev_attrib_hw_queue_depth.attr,
643 NULL,
644};
645EXPORT_SYMBOL(passthrough_attrib_attrs);
646
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800647static struct configfs_item_operations target_core_dev_attrib_ops = {
648 .show_attribute = target_core_dev_attrib_attr_show,
649 .store_attribute = target_core_dev_attrib_attr_store,
650};
651
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200652TB_CIT_SETUP_DRV(dev_attrib, &target_core_dev_attrib_ops, NULL);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800653
Nicholas Bellingerf79a8972014-11-27 14:51:14 -0800654/* End functions for struct config_item_type tb_dev_attrib_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800655
Nicholas Bellingerf8d389c2014-11-27 15:01:12 -0800656/* Start functions for struct config_item_type tb_dev_wwn_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800657
658CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
659#define SE_DEV_WWN_ATTR(_name, _mode) \
660static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
661 __CONFIGFS_EATTR(_name, _mode, \
662 target_core_dev_wwn_show_attr_##_name, \
663 target_core_dev_wwn_store_attr_##_name);
664
665#define SE_DEV_WWN_ATTR_RO(_name); \
666do { \
667 static struct target_core_dev_wwn_attribute \
668 target_core_dev_wwn_##_name = \
669 __CONFIGFS_EATTR_RO(_name, \
670 target_core_dev_wwn_show_attr_##_name); \
671} while (0);
672
673/*
674 * VPD page 0x80 Unit serial
675 */
676static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
677 struct t10_wwn *t10_wwn,
678 char *page)
679{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800680 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
681 &t10_wwn->unit_serial[0]);
682}
683
684static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
685 struct t10_wwn *t10_wwn,
686 const char *page,
687 size_t count)
688{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400689 struct se_device *dev = t10_wwn->t10_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800690 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
691
692 /*
693 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
694 * from the struct scsi_device level firmware, do not allow
695 * VPD Unit Serial to be emulated.
696 *
697 * Note this struct scsi_device could also be emulating VPD
698 * information from its drivers/scsi LLD. But for now we assume
699 * it is doing 'the right thing' wrt a world wide unique
700 * VPD Unit Serial Number that OS dependent multipath can depend on.
701 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400702 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
Andy Grover6708bb22011-06-08 10:36:43 -0700703 pr_err("Underlying SCSI device firmware provided VPD"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800704 " Unit Serial, ignoring request\n");
705 return -EOPNOTSUPP;
706 }
707
Dan Carpenter60d645a2011-06-15 10:03:05 -0700708 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -0700709 pr_err("Emulated VPD Unit Serial exceeds"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800710 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
711 return -EOVERFLOW;
712 }
713 /*
714 * Check to see if any active $FABRIC_MOD exports exist. If they
715 * do exist, fail here as changing this information on the fly
716 * (underneath the initiator side OS dependent multipath code)
717 * could cause negative effects.
718 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400719 if (dev->export_count) {
720 pr_err("Unable to set VPD Unit Serial while"
721 " active %d $FABRIC_MOD exports exist\n",
722 dev->export_count);
723 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800724 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400725
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800726 /*
727 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
728 *
729 * Also, strip any newline added from the userspace
730 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
731 */
732 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
733 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400734 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800735 "%s", strstrip(buf));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400736 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800737
Andy Grover6708bb22011-06-08 10:36:43 -0700738 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400739 " %s\n", dev->t10_wwn.unit_serial);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800740
741 return count;
742}
743
744SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
745
746/*
747 * VPD page 0x83 Protocol Identifier
748 */
749static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
750 struct t10_wwn *t10_wwn,
751 char *page)
752{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800753 struct t10_vpd *vpd;
754 unsigned char buf[VPD_TMP_BUF_SIZE];
755 ssize_t len = 0;
756
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800757 memset(buf, 0, VPD_TMP_BUF_SIZE);
758
759 spin_lock(&t10_wwn->t10_vpd_lock);
760 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
Andy Grover6708bb22011-06-08 10:36:43 -0700761 if (!vpd->protocol_identifier_set)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800762 continue;
763
764 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
765
Andy Grover6708bb22011-06-08 10:36:43 -0700766 if (len + strlen(buf) >= PAGE_SIZE)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800767 break;
768
769 len += sprintf(page+len, "%s", buf);
770 }
771 spin_unlock(&t10_wwn->t10_vpd_lock);
772
773 return len;
774}
775
776static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
777 struct t10_wwn *t10_wwn,
778 const char *page,
779 size_t count)
780{
781 return -ENOSYS;
782}
783
784SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
785
786/*
787 * Generic wrapper for dumping VPD identifiers by association.
788 */
789#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
790static ssize_t target_core_dev_wwn_show_attr_##_name( \
791 struct t10_wwn *t10_wwn, \
792 char *page) \
793{ \
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800794 struct t10_vpd *vpd; \
795 unsigned char buf[VPD_TMP_BUF_SIZE]; \
796 ssize_t len = 0; \
797 \
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800798 spin_lock(&t10_wwn->t10_vpd_lock); \
799 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
800 if (vpd->association != _assoc) \
801 continue; \
802 \
803 memset(buf, 0, VPD_TMP_BUF_SIZE); \
804 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
Andy Grover6708bb22011-06-08 10:36:43 -0700805 if (len + strlen(buf) >= PAGE_SIZE) \
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800806 break; \
807 len += sprintf(page+len, "%s", buf); \
808 \
809 memset(buf, 0, VPD_TMP_BUF_SIZE); \
810 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
Andy Grover6708bb22011-06-08 10:36:43 -0700811 if (len + strlen(buf) >= PAGE_SIZE) \
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800812 break; \
813 len += sprintf(page+len, "%s", buf); \
814 \
815 memset(buf, 0, VPD_TMP_BUF_SIZE); \
816 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
Andy Grover6708bb22011-06-08 10:36:43 -0700817 if (len + strlen(buf) >= PAGE_SIZE) \
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800818 break; \
819 len += sprintf(page+len, "%s", buf); \
820 } \
821 spin_unlock(&t10_wwn->t10_vpd_lock); \
822 \
823 return len; \
824}
825
826/*
Andy Shevchenko163cd5f2011-07-18 22:17:43 -0700827 * VPD page 0x83 Association: Logical Unit
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800828 */
829DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
830
831static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
832 struct t10_wwn *t10_wwn,
833 const char *page,
834 size_t count)
835{
836 return -ENOSYS;
837}
838
839SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
840
841/*
842 * VPD page 0x83 Association: Target Port
843 */
844DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
845
846static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
847 struct t10_wwn *t10_wwn,
848 const char *page,
849 size_t count)
850{
851 return -ENOSYS;
852}
853
854SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
855
856/*
857 * VPD page 0x83 Association: SCSI Target Device
858 */
859DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
860
861static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
862 struct t10_wwn *t10_wwn,
863 const char *page,
864 size_t count)
865{
866 return -ENOSYS;
867}
868
869SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
870
871CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
872
873static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
874 &target_core_dev_wwn_vpd_unit_serial.attr,
875 &target_core_dev_wwn_vpd_protocol_identifier.attr,
876 &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
877 &target_core_dev_wwn_vpd_assoc_target_port.attr,
878 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
879 NULL,
880};
881
882static struct configfs_item_operations target_core_dev_wwn_ops = {
883 .show_attribute = target_core_dev_wwn_attr_show,
884 .store_attribute = target_core_dev_wwn_attr_store,
885};
886
Nicholas Bellingerf8d389c2014-11-27 15:01:12 -0800887TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800888
Nicholas Bellingerf8d389c2014-11-27 15:01:12 -0800889/* End functions for struct config_item_type tb_dev_wwn_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800890
Nicholas Bellinger91e2e392014-11-27 14:57:01 -0800891/* Start functions for struct config_item_type tb_dev_pr_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800892
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400893CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800894#define SE_DEV_PR_ATTR(_name, _mode) \
895static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
896 __CONFIGFS_EATTR(_name, _mode, \
897 target_core_dev_pr_show_attr_##_name, \
898 target_core_dev_pr_store_attr_##_name);
899
900#define SE_DEV_PR_ATTR_RO(_name); \
901static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
902 __CONFIGFS_EATTR_RO(_name, \
903 target_core_dev_pr_show_attr_##_name);
904
Christoph Hellwigd977f432012-10-10 17:37:15 -0400905static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
906 char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800907{
908 struct se_node_acl *se_nacl;
909 struct t10_pr_registration *pr_reg;
910 char i_buf[PR_REG_ISID_ID_LEN];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800911
912 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
913
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800914 pr_reg = dev->dev_pr_res_holder;
Christoph Hellwigd977f432012-10-10 17:37:15 -0400915 if (!pr_reg)
916 return sprintf(page, "No SPC-3 Reservation holder\n");
917
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800918 se_nacl = pr_reg->pr_reg_nacl;
Andy Groverd2843c12013-05-16 10:40:55 -0700919 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800920
Christoph Hellwigd977f432012-10-10 17:37:15 -0400921 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000922 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
Andy Groverd2843c12013-05-16 10:40:55 -0700923 se_nacl->initiatorname, i_buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800924}
925
Christoph Hellwigd977f432012-10-10 17:37:15 -0400926static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
927 char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800928{
929 struct se_node_acl *se_nacl;
Christoph Hellwigd977f432012-10-10 17:37:15 -0400930 ssize_t len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800931
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800932 se_nacl = dev->dev_reserved_node_acl;
Christoph Hellwigd977f432012-10-10 17:37:15 -0400933 if (se_nacl) {
934 len = sprintf(page,
935 "SPC-2 Reservation: %s Initiator: %s\n",
936 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
937 se_nacl->initiatorname);
938 } else {
939 len = sprintf(page, "No SPC-2 Reservation holder\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800940 }
Christoph Hellwigd977f432012-10-10 17:37:15 -0400941 return len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800942}
943
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400944static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
945 char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800946{
Christoph Hellwigd977f432012-10-10 17:37:15 -0400947 int ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800948
Andy Grovera3541702015-05-19 14:44:41 -0700949 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Christoph Hellwigd977f432012-10-10 17:37:15 -0400950 return sprintf(page, "Passthrough\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800951
Christoph Hellwigd977f432012-10-10 17:37:15 -0400952 spin_lock(&dev->dev_reservation_lock);
953 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
954 ret = target_core_dev_pr_show_spc2_res(dev, page);
955 else
956 ret = target_core_dev_pr_show_spc3_res(dev, page);
957 spin_unlock(&dev->dev_reservation_lock);
958 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800959}
960
961SE_DEV_PR_ATTR_RO(res_holder);
962
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800963static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400964 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800965{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800966 ssize_t len = 0;
967
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800968 spin_lock(&dev->dev_reservation_lock);
Christoph Hellwigd977f432012-10-10 17:37:15 -0400969 if (!dev->dev_pr_res_holder) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800970 len = sprintf(page, "No SPC-3 Reservation holder\n");
Christoph Hellwigd977f432012-10-10 17:37:15 -0400971 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800972 len = sprintf(page, "SPC-3 Reservation: All Target"
973 " Ports registration\n");
Christoph Hellwigd977f432012-10-10 17:37:15 -0400974 } else {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800975 len = sprintf(page, "SPC-3 Reservation: Single"
976 " Target Port registration\n");
Christoph Hellwigd977f432012-10-10 17:37:15 -0400977 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800978
Christoph Hellwigd977f432012-10-10 17:37:15 -0400979 spin_unlock(&dev->dev_reservation_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800980 return len;
981}
982
983SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
984
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800985static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400986 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800987{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400988 return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800989}
990
991SE_DEV_PR_ATTR_RO(res_pr_generation);
992
993/*
994 * res_pr_holder_tg_port
995 */
996static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400997 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800998{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800999 struct se_node_acl *se_nacl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001000 struct se_portal_group *se_tpg;
1001 struct t10_pr_registration *pr_reg;
Christoph Hellwig9ac89282015-04-08 20:01:35 +02001002 const struct target_core_fabric_ops *tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001003 ssize_t len = 0;
1004
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001005 spin_lock(&dev->dev_reservation_lock);
1006 pr_reg = dev->dev_pr_res_holder;
Andy Grover6708bb22011-06-08 10:36:43 -07001007 if (!pr_reg) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001008 len = sprintf(page, "No SPC-3 Reservation holder\n");
Christoph Hellwigd977f432012-10-10 17:37:15 -04001009 goto out_unlock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001010 }
Christoph Hellwigd977f432012-10-10 17:37:15 -04001011
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001012 se_nacl = pr_reg->pr_reg_nacl;
1013 se_tpg = se_nacl->se_tpg;
Andy Grovere3d6f902011-07-19 08:55:10 +00001014 tfo = se_tpg->se_tpg_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001015
1016 len += sprintf(page+len, "SPC-3 Reservation: %s"
1017 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1018 tfo->tpg_get_wwn(se_tpg));
1019 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
Masanari Iida35d1efe2012-08-16 22:43:13 +09001020 " Identifier Tag: %hu %s Portal Group Tag: %hu"
Nicholas Bellinger79dc9c92015-03-27 04:51:03 +00001021 " %s Logical Unit: %u\n", pr_reg->tg_pt_sep_rtpi,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001022 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
Nicholas Bellinger79dc9c92015-03-27 04:51:03 +00001023 tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001024
Christoph Hellwigd977f432012-10-10 17:37:15 -04001025out_unlock:
1026 spin_unlock(&dev->dev_reservation_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001027 return len;
1028}
1029
1030SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1031
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001032static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001033 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001034{
Christoph Hellwig9ac89282015-04-08 20:01:35 +02001035 const struct target_core_fabric_ops *tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001036 struct t10_pr_registration *pr_reg;
1037 unsigned char buf[384];
1038 char i_buf[PR_REG_ISID_ID_LEN];
1039 ssize_t len = 0;
Andy Groverd2843c12013-05-16 10:40:55 -07001040 int reg_count = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001041
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001042 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1043
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001044 spin_lock(&dev->t10_pr.registration_lock);
1045 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001046 pr_reg_list) {
1047
1048 memset(buf, 0, 384);
1049 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1050 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
Andy Groverd2843c12013-05-16 10:40:55 -07001051 core_pr_dump_initiator_port(pr_reg, i_buf,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001052 PR_REG_ISID_ID_LEN);
1053 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1054 tfo->get_fabric_name(),
Andy Groverd2843c12013-05-16 10:40:55 -07001055 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001056 pr_reg->pr_res_generation);
1057
Andy Grover6708bb22011-06-08 10:36:43 -07001058 if (len + strlen(buf) >= PAGE_SIZE)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001059 break;
1060
1061 len += sprintf(page+len, "%s", buf);
1062 reg_count++;
1063 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001064 spin_unlock(&dev->t10_pr.registration_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001065
Andy Grover6708bb22011-06-08 10:36:43 -07001066 if (!reg_count)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001067 len += sprintf(page+len, "None\n");
1068
1069 return len;
1070}
1071
1072SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1073
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001074static ssize_t target_core_dev_pr_show_attr_res_pr_type(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001075 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001076{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001077 struct t10_pr_registration *pr_reg;
1078 ssize_t len = 0;
1079
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001080 spin_lock(&dev->dev_reservation_lock);
1081 pr_reg = dev->dev_pr_res_holder;
Christoph Hellwigd977f432012-10-10 17:37:15 -04001082 if (pr_reg) {
1083 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1084 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1085 } else {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001086 len = sprintf(page, "No SPC-3 Reservation holder\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001087 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001088
Christoph Hellwigd977f432012-10-10 17:37:15 -04001089 spin_unlock(&dev->dev_reservation_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001090 return len;
1091}
1092
1093SE_DEV_PR_ATTR_RO(res_pr_type);
1094
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001095static ssize_t target_core_dev_pr_show_attr_res_type(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001096 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001097{
Andy Grovera3541702015-05-19 14:44:41 -07001098 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Christoph Hellwigd977f432012-10-10 17:37:15 -04001099 return sprintf(page, "SPC_PASSTHROUGH\n");
1100 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1101 return sprintf(page, "SPC2_RESERVATIONS\n");
1102 else
1103 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001104}
1105
1106SE_DEV_PR_ATTR_RO(res_type);
1107
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001108static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001109 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001110{
Andy Grovera3541702015-05-19 14:44:41 -07001111 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001112 return 0;
1113
1114 return sprintf(page, "APTPL Bit Status: %s\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001115 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001116}
1117
1118SE_DEV_PR_ATTR_RO(res_aptpl_active);
1119
1120/*
1121 * res_aptpl_metadata
1122 */
1123static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001124 struct se_device *dev, char *page)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001125{
Andy Grovera3541702015-05-19 14:44:41 -07001126 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001127 return 0;
1128
1129 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1130}
1131
1132enum {
1133 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1134 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1135 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1136 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1137};
1138
1139static match_table_t tokens = {
1140 {Opt_initiator_fabric, "initiator_fabric=%s"},
1141 {Opt_initiator_node, "initiator_node=%s"},
1142 {Opt_initiator_sid, "initiator_sid=%s"},
1143 {Opt_sa_res_key, "sa_res_key=%s"},
1144 {Opt_res_holder, "res_holder=%d"},
1145 {Opt_res_type, "res_type=%d"},
1146 {Opt_res_scope, "res_scope=%d"},
1147 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1148 {Opt_mapped_lun, "mapped_lun=%d"},
1149 {Opt_target_fabric, "target_fabric=%s"},
1150 {Opt_target_node, "target_node=%s"},
1151 {Opt_tpgt, "tpgt=%d"},
1152 {Opt_port_rtpi, "port_rtpi=%d"},
1153 {Opt_target_lun, "target_lun=%d"},
1154 {Opt_err, NULL}
1155};
1156
1157static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001158 struct se_device *dev,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001159 const char *page,
1160 size_t count)
1161{
Jesper Juhl6d180252011-03-14 04:05:56 -07001162 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1163 unsigned char *t_fabric = NULL, *t_port = NULL;
Joern Engel8d213552014-09-02 17:49:56 -04001164 char *orig, *ptr, *opts;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001165 substring_t args[MAX_OPT_ARGS];
1166 unsigned long long tmp_ll;
1167 u64 sa_res_key = 0;
1168 u32 mapped_lun = 0, target_lun = 0;
1169 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
Bart Van Assche45fb94c2015-04-14 13:00:58 +02001170 u16 tpgt = 0;
1171 u8 type = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001172
Andy Grovera3541702015-05-19 14:44:41 -07001173 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Christoph Hellwigd977f432012-10-10 17:37:15 -04001174 return 0;
1175 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001176 return 0;
1177
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001178 if (dev->export_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07001179 pr_debug("Unable to process APTPL metadata while"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001180 " active fabric exports exist\n");
1181 return -EINVAL;
1182 }
1183
1184 opts = kstrdup(page, GFP_KERNEL);
1185 if (!opts)
1186 return -ENOMEM;
1187
1188 orig = opts;
Sebastian Andrzej Siewior90c161b2011-11-23 20:53:17 +01001189 while ((ptr = strsep(&opts, ",\n")) != NULL) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001190 if (!*ptr)
1191 continue;
1192
1193 token = match_token(ptr, tokens, args);
1194 switch (token) {
1195 case Opt_initiator_fabric:
Joern Engel8d213552014-09-02 17:49:56 -04001196 i_fabric = match_strdup(args);
Jesper Juhl6d180252011-03-14 04:05:56 -07001197 if (!i_fabric) {
1198 ret = -ENOMEM;
1199 goto out;
1200 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001201 break;
1202 case Opt_initiator_node:
Joern Engel8d213552014-09-02 17:49:56 -04001203 i_port = match_strdup(args);
Jesper Juhl6d180252011-03-14 04:05:56 -07001204 if (!i_port) {
1205 ret = -ENOMEM;
1206 goto out;
1207 }
Dan Carpenter60d645a2011-06-15 10:03:05 -07001208 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001209 pr_err("APTPL metadata initiator_node="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001210 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1211 PR_APTPL_MAX_IPORT_LEN);
1212 ret = -EINVAL;
1213 break;
1214 }
1215 break;
1216 case Opt_initiator_sid:
Joern Engel8d213552014-09-02 17:49:56 -04001217 isid = match_strdup(args);
Jesper Juhl6d180252011-03-14 04:05:56 -07001218 if (!isid) {
1219 ret = -ENOMEM;
1220 goto out;
1221 }
Dan Carpenter60d645a2011-06-15 10:03:05 -07001222 if (strlen(isid) >= PR_REG_ISID_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001223 pr_err("APTPL metadata initiator_isid"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001224 "= exceeds PR_REG_ISID_LEN: %d\n",
1225 PR_REG_ISID_LEN);
1226 ret = -EINVAL;
1227 break;
1228 }
1229 break;
1230 case Opt_sa_res_key:
Joern Engel8d213552014-09-02 17:49:56 -04001231 ret = kstrtoull(args->from, 0, &tmp_ll);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001232 if (ret < 0) {
Joern Engel8d213552014-09-02 17:49:56 -04001233 pr_err("kstrtoull() failed for sa_res_key=\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001234 goto out;
1235 }
1236 sa_res_key = (u64)tmp_ll;
1237 break;
1238 /*
1239 * PR APTPL Metadata for Reservation
1240 */
1241 case Opt_res_holder:
1242 match_int(args, &arg);
1243 res_holder = arg;
1244 break;
1245 case Opt_res_type:
1246 match_int(args, &arg);
1247 type = (u8)arg;
1248 break;
1249 case Opt_res_scope:
1250 match_int(args, &arg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001251 break;
1252 case Opt_res_all_tg_pt:
1253 match_int(args, &arg);
1254 all_tg_pt = (int)arg;
1255 break;
1256 case Opt_mapped_lun:
1257 match_int(args, &arg);
1258 mapped_lun = (u32)arg;
1259 break;
1260 /*
1261 * PR APTPL Metadata for Target Port
1262 */
1263 case Opt_target_fabric:
Joern Engel8d213552014-09-02 17:49:56 -04001264 t_fabric = match_strdup(args);
Jesper Juhl6d180252011-03-14 04:05:56 -07001265 if (!t_fabric) {
1266 ret = -ENOMEM;
1267 goto out;
1268 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001269 break;
1270 case Opt_target_node:
Joern Engel8d213552014-09-02 17:49:56 -04001271 t_port = match_strdup(args);
Jesper Juhl6d180252011-03-14 04:05:56 -07001272 if (!t_port) {
1273 ret = -ENOMEM;
1274 goto out;
1275 }
Dan Carpenter60d645a2011-06-15 10:03:05 -07001276 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07001277 pr_err("APTPL metadata target_node="
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001278 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1279 PR_APTPL_MAX_TPORT_LEN);
1280 ret = -EINVAL;
1281 break;
1282 }
1283 break;
1284 case Opt_tpgt:
1285 match_int(args, &arg);
1286 tpgt = (u16)arg;
1287 break;
1288 case Opt_port_rtpi:
1289 match_int(args, &arg);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001290 break;
1291 case Opt_target_lun:
1292 match_int(args, &arg);
1293 target_lun = (u32)arg;
1294 break;
1295 default:
1296 break;
1297 }
1298 }
1299
Andy Grover6708bb22011-06-08 10:36:43 -07001300 if (!i_port || !t_port || !sa_res_key) {
1301 pr_err("Illegal parameters for APTPL registration\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001302 ret = -EINVAL;
1303 goto out;
1304 }
1305
1306 if (res_holder && !(type)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001307 pr_err("Illegal PR type: 0x%02x for reservation"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001308 " holder\n", type);
1309 ret = -EINVAL;
1310 goto out;
1311 }
1312
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001313 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001314 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1315 res_holder, all_tg_pt, type);
1316out:
Jesper Juhl6d180252011-03-14 04:05:56 -07001317 kfree(i_fabric);
1318 kfree(i_port);
1319 kfree(isid);
1320 kfree(t_fabric);
1321 kfree(t_port);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001322 kfree(orig);
1323 return (ret == 0) ? count : ret;
1324}
1325
1326SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1327
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001328CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001329
1330static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1331 &target_core_dev_pr_res_holder.attr,
1332 &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1333 &target_core_dev_pr_res_pr_generation.attr,
1334 &target_core_dev_pr_res_pr_holder_tg_port.attr,
1335 &target_core_dev_pr_res_pr_registered_i_pts.attr,
1336 &target_core_dev_pr_res_pr_type.attr,
1337 &target_core_dev_pr_res_type.attr,
1338 &target_core_dev_pr_res_aptpl_active.attr,
1339 &target_core_dev_pr_res_aptpl_metadata.attr,
1340 NULL,
1341};
1342
1343static struct configfs_item_operations target_core_dev_pr_ops = {
1344 .show_attribute = target_core_dev_pr_attr_show,
1345 .store_attribute = target_core_dev_pr_attr_store,
1346};
1347
Nicholas Bellinger91e2e392014-11-27 14:57:01 -08001348TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001349
Nicholas Bellinger91e2e392014-11-27 14:57:01 -08001350/* End functions for struct config_item_type tb_dev_pr_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001351
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08001352/* Start functions for struct config_item_type tb_dev_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001353
1354static ssize_t target_core_show_dev_info(void *p, char *page)
1355{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001356 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001357 int bl = 0;
1358 ssize_t read_bytes = 0;
1359
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001360 transport_dump_dev_state(dev, page, &bl);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001361 read_bytes += bl;
Christoph Hellwig0a06d432015-05-10 18:14:56 +02001362 read_bytes += dev->transport->show_configfs_dev_params(dev,
1363 page+read_bytes);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001364 return read_bytes;
1365}
1366
1367static struct target_core_configfs_attribute target_core_attr_dev_info = {
1368 .attr = { .ca_owner = THIS_MODULE,
1369 .ca_name = "info",
1370 .ca_mode = S_IRUGO },
1371 .show = target_core_show_dev_info,
1372 .store = NULL,
1373};
1374
1375static ssize_t target_core_store_dev_control(
1376 void *p,
1377 const char *page,
1378 size_t count)
1379{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001380 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001381
Christoph Hellwig0a06d432015-05-10 18:14:56 +02001382 return dev->transport->set_configfs_dev_params(dev, page, count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001383}
1384
1385static struct target_core_configfs_attribute target_core_attr_dev_control = {
1386 .attr = { .ca_owner = THIS_MODULE,
1387 .ca_name = "control",
1388 .ca_mode = S_IWUSR },
1389 .show = NULL,
1390 .store = target_core_store_dev_control,
1391};
1392
1393static ssize_t target_core_show_dev_alias(void *p, char *page)
1394{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001395 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001396
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001397 if (!(dev->dev_flags & DF_USING_ALIAS))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001398 return 0;
1399
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001400 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001401}
1402
1403static ssize_t target_core_store_dev_alias(
1404 void *p,
1405 const char *page,
1406 size_t count)
1407{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001408 struct se_device *dev = p;
1409 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001410 ssize_t read_bytes;
1411
1412 if (count > (SE_DEV_ALIAS_LEN-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001413 pr_err("alias count: %d exceeds"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001414 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1415 SE_DEV_ALIAS_LEN-1);
1416 return -EINVAL;
1417 }
1418
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001419 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
Dan Carpenter30116842012-01-27 15:50:55 +03001420 if (!read_bytes)
1421 return -EINVAL;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001422 if (dev->dev_alias[read_bytes - 1] == '\n')
1423 dev->dev_alias[read_bytes - 1] = '\0';
Sebastian Andrzej Siewior0877eafd2011-11-28 13:57:25 +01001424
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001425 dev->dev_flags |= DF_USING_ALIAS;
Dan Carpenter30116842012-01-27 15:50:55 +03001426
Andy Grover6708bb22011-06-08 10:36:43 -07001427 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001428 config_item_name(&hba->hba_group.cg_item),
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001429 config_item_name(&dev->dev_group.cg_item),
1430 dev->dev_alias);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001431
1432 return read_bytes;
1433}
1434
1435static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1436 .attr = { .ca_owner = THIS_MODULE,
1437 .ca_name = "alias",
1438 .ca_mode = S_IRUGO | S_IWUSR },
1439 .show = target_core_show_dev_alias,
1440 .store = target_core_store_dev_alias,
1441};
1442
1443static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1444{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001445 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001446
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001447 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001448 return 0;
1449
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001450 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001451}
1452
1453static ssize_t target_core_store_dev_udev_path(
1454 void *p,
1455 const char *page,
1456 size_t count)
1457{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001458 struct se_device *dev = p;
1459 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001460 ssize_t read_bytes;
1461
1462 if (count > (SE_UDEV_PATH_LEN-1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001463 pr_err("udev_path count: %d exceeds"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001464 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1465 SE_UDEV_PATH_LEN-1);
1466 return -EINVAL;
1467 }
1468
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001469 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001470 "%s", page);
Dan Carpenter30116842012-01-27 15:50:55 +03001471 if (!read_bytes)
1472 return -EINVAL;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001473 if (dev->udev_path[read_bytes - 1] == '\n')
1474 dev->udev_path[read_bytes - 1] = '\0';
Sebastian Andrzej Siewior0877eafd2011-11-28 13:57:25 +01001475
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001476 dev->dev_flags |= DF_USING_UDEV_PATH;
Dan Carpenter30116842012-01-27 15:50:55 +03001477
Andy Grover6708bb22011-06-08 10:36:43 -07001478 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001479 config_item_name(&hba->hba_group.cg_item),
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001480 config_item_name(&dev->dev_group.cg_item),
1481 dev->udev_path);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001482
1483 return read_bytes;
1484}
1485
1486static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1487 .attr = { .ca_owner = THIS_MODULE,
1488 .ca_name = "udev_path",
1489 .ca_mode = S_IRUGO | S_IWUSR },
1490 .show = target_core_show_dev_udev_path,
1491 .store = target_core_store_dev_udev_path,
1492};
1493
Andy Grover64146db2013-04-30 11:59:15 -07001494static ssize_t target_core_show_dev_enable(void *p, char *page)
1495{
1496 struct se_device *dev = p;
1497
1498 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1499}
1500
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001501static ssize_t target_core_store_dev_enable(
1502 void *p,
1503 const char *page,
1504 size_t count)
1505{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001506 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001507 char *ptr;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001508 int ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001509
1510 ptr = strstr(page, "1");
Andy Grover6708bb22011-06-08 10:36:43 -07001511 if (!ptr) {
1512 pr_err("For dev_enable ops, only valid value"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001513 " is \"1\"\n");
1514 return -EINVAL;
1515 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001516
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001517 ret = target_configure_device(dev);
1518 if (ret)
1519 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001520 return count;
1521}
1522
1523static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1524 .attr = { .ca_owner = THIS_MODULE,
1525 .ca_name = "enable",
Andy Grover64146db2013-04-30 11:59:15 -07001526 .ca_mode = S_IRUGO | S_IWUSR },
1527 .show = target_core_show_dev_enable,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001528 .store = target_core_store_dev_enable,
1529};
1530
1531static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1532{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001533 struct se_device *dev = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001534 struct config_item *lu_ci;
1535 struct t10_alua_lu_gp *lu_gp;
1536 struct t10_alua_lu_gp_member *lu_gp_mem;
1537 ssize_t len = 0;
1538
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001539 lu_gp_mem = dev->dev_alua_lu_gp_mem;
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001540 if (!lu_gp_mem)
1541 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001542
1543 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1544 lu_gp = lu_gp_mem->lu_gp;
Andy Grover6708bb22011-06-08 10:36:43 -07001545 if (lu_gp) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001546 lu_ci = &lu_gp->lu_gp_group.cg_item;
1547 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1548 config_item_name(lu_ci), lu_gp->lu_gp_id);
1549 }
1550 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1551
1552 return len;
1553}
1554
1555static ssize_t target_core_store_alua_lu_gp(
1556 void *p,
1557 const char *page,
1558 size_t count)
1559{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001560 struct se_device *dev = p;
1561 struct se_hba *hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001562 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1563 struct t10_alua_lu_gp_member *lu_gp_mem;
1564 unsigned char buf[LU_GROUP_NAME_BUF];
1565 int move = 0;
1566
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001567 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1568 if (!lu_gp_mem)
1569 return 0;
1570
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001571 if (count > LU_GROUP_NAME_BUF) {
Andy Grover6708bb22011-06-08 10:36:43 -07001572 pr_err("ALUA LU Group Alias too large!\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001573 return -EINVAL;
1574 }
1575 memset(buf, 0, LU_GROUP_NAME_BUF);
1576 memcpy(buf, page, count);
1577 /*
1578 * Any ALUA logical unit alias besides "NULL" means we will be
1579 * making a new group association.
1580 */
1581 if (strcmp(strstrip(buf), "NULL")) {
1582 /*
1583 * core_alua_get_lu_gp_by_name() will increment reference to
1584 * struct t10_alua_lu_gp. This reference is released with
1585 * core_alua_get_lu_gp_by_name below().
1586 */
1587 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
Andy Grover6708bb22011-06-08 10:36:43 -07001588 if (!lu_gp_new)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001589 return -ENODEV;
1590 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001591
1592 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1593 lu_gp = lu_gp_mem->lu_gp;
Andy Grover6708bb22011-06-08 10:36:43 -07001594 if (lu_gp) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001595 /*
1596 * Clearing an existing lu_gp association, and replacing
1597 * with NULL
1598 */
Andy Grover6708bb22011-06-08 10:36:43 -07001599 if (!lu_gp_new) {
1600 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001601 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1602 " %hu\n",
1603 config_item_name(&hba->hba_group.cg_item),
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001604 config_item_name(&dev->dev_group.cg_item),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001605 config_item_name(&lu_gp->lu_gp_group.cg_item),
1606 lu_gp->lu_gp_id);
1607
1608 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1609 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1610
1611 return count;
1612 }
1613 /*
1614 * Removing existing association of lu_gp_mem with lu_gp
1615 */
1616 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1617 move = 1;
1618 }
1619 /*
1620 * Associate lu_gp_mem with lu_gp_new.
1621 */
1622 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1623 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1624
Andy Grover6708bb22011-06-08 10:36:43 -07001625 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001626 " core/alua/lu_gps/%s, ID: %hu\n",
1627 (move) ? "Moving" : "Adding",
1628 config_item_name(&hba->hba_group.cg_item),
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001629 config_item_name(&dev->dev_group.cg_item),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001630 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1631 lu_gp_new->lu_gp_id);
1632
1633 core_alua_put_lu_gp_from_name(lu_gp_new);
1634 return count;
1635}
1636
1637static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1638 .attr = { .ca_owner = THIS_MODULE,
1639 .ca_name = "alua_lu_gp",
1640 .ca_mode = S_IRUGO | S_IWUSR },
1641 .show = target_core_show_alua_lu_gp,
1642 .store = target_core_store_alua_lu_gp,
1643};
1644
Hannes Reinecke229d4f12013-12-17 09:18:50 +01001645static ssize_t target_core_show_dev_lba_map(void *p, char *page)
1646{
1647 struct se_device *dev = p;
1648 struct t10_alua_lba_map *map;
1649 struct t10_alua_lba_map_member *mem;
1650 char *b = page;
1651 int bl = 0;
1652 char state;
1653
1654 spin_lock(&dev->t10_alua.lba_map_lock);
1655 if (!list_empty(&dev->t10_alua.lba_map_list))
1656 bl += sprintf(b + bl, "%u %u\n",
1657 dev->t10_alua.lba_map_segment_size,
1658 dev->t10_alua.lba_map_segment_multiplier);
1659 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
1660 bl += sprintf(b + bl, "%llu %llu",
1661 map->lba_map_first_lba, map->lba_map_last_lba);
1662 list_for_each_entry(mem, &map->lba_map_mem_list,
1663 lba_map_mem_list) {
1664 switch (mem->lba_map_mem_alua_state) {
1665 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
1666 state = 'O';
1667 break;
1668 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
1669 state = 'A';
1670 break;
1671 case ALUA_ACCESS_STATE_STANDBY:
1672 state = 'S';
1673 break;
1674 case ALUA_ACCESS_STATE_UNAVAILABLE:
1675 state = 'U';
1676 break;
1677 default:
1678 state = '.';
1679 break;
1680 }
1681 bl += sprintf(b + bl, " %d:%c",
1682 mem->lba_map_mem_alua_pg_id, state);
1683 }
1684 bl += sprintf(b + bl, "\n");
1685 }
1686 spin_unlock(&dev->t10_alua.lba_map_lock);
1687 return bl;
1688}
1689
1690static ssize_t target_core_store_dev_lba_map(
1691 void *p,
1692 const char *page,
1693 size_t count)
1694{
1695 struct se_device *dev = p;
1696 struct t10_alua_lba_map *lba_map = NULL;
1697 struct list_head lba_list;
1698 char *map_entries, *ptr;
1699 char state;
1700 int pg_num = -1, pg;
1701 int ret = 0, num = 0, pg_id, alua_state;
1702 unsigned long start_lba = -1, end_lba = -1;
1703 unsigned long segment_size = -1, segment_mult = -1;
1704
1705 map_entries = kstrdup(page, GFP_KERNEL);
1706 if (!map_entries)
1707 return -ENOMEM;
1708
1709 INIT_LIST_HEAD(&lba_list);
1710 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
1711 if (!*ptr)
1712 continue;
1713
1714 if (num == 0) {
1715 if (sscanf(ptr, "%lu %lu\n",
1716 &segment_size, &segment_mult) != 2) {
1717 pr_err("Invalid line %d\n", num);
1718 ret = -EINVAL;
1719 break;
1720 }
1721 num++;
1722 continue;
1723 }
1724 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
1725 pr_err("Invalid line %d\n", num);
1726 ret = -EINVAL;
1727 break;
1728 }
1729 ptr = strchr(ptr, ' ');
1730 if (!ptr) {
1731 pr_err("Invalid line %d, missing end lba\n", num);
1732 ret = -EINVAL;
1733 break;
1734 }
1735 ptr++;
1736 ptr = strchr(ptr, ' ');
1737 if (!ptr) {
1738 pr_err("Invalid line %d, missing state definitions\n",
1739 num);
1740 ret = -EINVAL;
1741 break;
1742 }
1743 ptr++;
1744 lba_map = core_alua_allocate_lba_map(&lba_list,
1745 start_lba, end_lba);
1746 if (IS_ERR(lba_map)) {
1747 ret = PTR_ERR(lba_map);
1748 break;
1749 }
1750 pg = 0;
1751 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
1752 switch (state) {
1753 case 'O':
1754 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1755 break;
1756 case 'A':
1757 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
1758 break;
1759 case 'S':
1760 alua_state = ALUA_ACCESS_STATE_STANDBY;
1761 break;
1762 case 'U':
1763 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
1764 break;
1765 default:
1766 pr_err("Invalid ALUA state '%c'\n", state);
1767 ret = -EINVAL;
1768 goto out;
1769 }
1770
1771 ret = core_alua_allocate_lba_map_mem(lba_map,
1772 pg_id, alua_state);
1773 if (ret) {
1774 pr_err("Invalid target descriptor %d:%c "
1775 "at line %d\n",
1776 pg_id, state, num);
1777 break;
1778 }
1779 pg++;
1780 ptr = strchr(ptr, ' ');
1781 if (ptr)
1782 ptr++;
1783 else
1784 break;
1785 }
1786 if (pg_num == -1)
1787 pg_num = pg;
1788 else if (pg != pg_num) {
1789 pr_err("Only %d from %d port groups definitions "
1790 "at line %d\n", pg, pg_num, num);
1791 ret = -EINVAL;
1792 break;
1793 }
1794 num++;
1795 }
1796out:
1797 if (ret) {
1798 core_alua_free_lba_map(&lba_list);
1799 count = ret;
1800 } else
1801 core_alua_set_lba_map(dev, &lba_list,
1802 segment_size, segment_mult);
1803 kfree(map_entries);
1804 return count;
1805}
1806
1807static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1808 .attr = { .ca_owner = THIS_MODULE,
1809 .ca_name = "lba_map",
1810 .ca_mode = S_IRUGO | S_IWUSR },
1811 .show = target_core_show_dev_lba_map,
1812 .store = target_core_store_dev_lba_map,
1813};
1814
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08001815static struct configfs_attribute *target_core_dev_attrs[] = {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001816 &target_core_attr_dev_info.attr,
1817 &target_core_attr_dev_control.attr,
1818 &target_core_attr_dev_alias.attr,
1819 &target_core_attr_dev_udev_path.attr,
1820 &target_core_attr_dev_enable.attr,
1821 &target_core_attr_dev_alua_lu_gp.attr,
Hannes Reinecke229d4f12013-12-17 09:18:50 +01001822 &target_core_attr_dev_lba_map.attr,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001823 NULL,
1824};
1825
1826static void target_core_dev_release(struct config_item *item)
1827{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001828 struct config_group *dev_cg = to_config_group(item);
1829 struct se_device *dev =
1830 container_of(dev_cg, struct se_device, dev_group);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001831
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001832 kfree(dev_cg->default_groups);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001833 target_free_device(dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001834}
1835
1836static ssize_t target_core_dev_show(struct config_item *item,
1837 struct configfs_attribute *attr,
1838 char *page)
1839{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001840 struct config_group *dev_cg = to_config_group(item);
1841 struct se_device *dev =
1842 container_of(dev_cg, struct se_device, dev_group);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001843 struct target_core_configfs_attribute *tc_attr = container_of(
1844 attr, struct target_core_configfs_attribute, attr);
1845
Andy Grover6708bb22011-06-08 10:36:43 -07001846 if (!tc_attr->show)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001847 return -EINVAL;
1848
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001849 return tc_attr->show(dev, page);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001850}
1851
1852static ssize_t target_core_dev_store(struct config_item *item,
1853 struct configfs_attribute *attr,
1854 const char *page, size_t count)
1855{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001856 struct config_group *dev_cg = to_config_group(item);
1857 struct se_device *dev =
1858 container_of(dev_cg, struct se_device, dev_group);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001859 struct target_core_configfs_attribute *tc_attr = container_of(
1860 attr, struct target_core_configfs_attribute, attr);
1861
Andy Grover6708bb22011-06-08 10:36:43 -07001862 if (!tc_attr->store)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001863 return -EINVAL;
1864
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001865 return tc_attr->store(dev, page, count);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001866}
1867
1868static struct configfs_item_operations target_core_dev_item_ops = {
1869 .release = target_core_dev_release,
1870 .show_attribute = target_core_dev_show,
1871 .store_attribute = target_core_dev_store,
1872};
1873
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08001874TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001875
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08001876/* End functions for struct config_item_type tb_dev_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001877
1878/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
1879
1880CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
1881#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
1882static struct target_core_alua_lu_gp_attribute \
1883 target_core_alua_lu_gp_##_name = \
1884 __CONFIGFS_EATTR(_name, _mode, \
1885 target_core_alua_lu_gp_show_attr_##_name, \
1886 target_core_alua_lu_gp_store_attr_##_name);
1887
1888#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
1889static struct target_core_alua_lu_gp_attribute \
1890 target_core_alua_lu_gp_##_name = \
1891 __CONFIGFS_EATTR_RO(_name, \
1892 target_core_alua_lu_gp_show_attr_##_name);
1893
1894/*
1895 * lu_gp_id
1896 */
1897static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
1898 struct t10_alua_lu_gp *lu_gp,
1899 char *page)
1900{
Andy Grover6708bb22011-06-08 10:36:43 -07001901 if (!lu_gp->lu_gp_valid_id)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001902 return 0;
1903
1904 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
1905}
1906
1907static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
1908 struct t10_alua_lu_gp *lu_gp,
1909 const char *page,
1910 size_t count)
1911{
1912 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
1913 unsigned long lu_gp_id;
1914 int ret;
1915
Jingoo Han57103d72013-07-19 16:22:19 +09001916 ret = kstrtoul(page, 0, &lu_gp_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001917 if (ret < 0) {
Jingoo Han57103d72013-07-19 16:22:19 +09001918 pr_err("kstrtoul() returned %d for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001919 " lu_gp_id\n", ret);
Jingoo Han57103d72013-07-19 16:22:19 +09001920 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001921 }
1922 if (lu_gp_id > 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -07001923 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001924 " 0x0000ffff\n", lu_gp_id);
1925 return -EINVAL;
1926 }
1927
1928 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
1929 if (ret < 0)
1930 return -EINVAL;
1931
Andy Grover6708bb22011-06-08 10:36:43 -07001932 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001933 " Group: core/alua/lu_gps/%s to ID: %hu\n",
1934 config_item_name(&alua_lu_gp_cg->cg_item),
1935 lu_gp->lu_gp_id);
1936
1937 return count;
1938}
1939
1940SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
1941
1942/*
1943 * members
1944 */
1945static ssize_t target_core_alua_lu_gp_show_attr_members(
1946 struct t10_alua_lu_gp *lu_gp,
1947 char *page)
1948{
1949 struct se_device *dev;
1950 struct se_hba *hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001951 struct t10_alua_lu_gp_member *lu_gp_mem;
1952 ssize_t len = 0, cur_len;
1953 unsigned char buf[LU_GROUP_NAME_BUF];
1954
1955 memset(buf, 0, LU_GROUP_NAME_BUF);
1956
1957 spin_lock(&lu_gp->lu_gp_lock);
1958 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1959 dev = lu_gp_mem->lu_gp_mem_dev;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001960 hba = dev->se_hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001961
1962 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
1963 config_item_name(&hba->hba_group.cg_item),
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001964 config_item_name(&dev->dev_group.cg_item));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001965 cur_len++; /* Extra byte for NULL terminator */
1966
1967 if ((cur_len + len) > PAGE_SIZE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001968 pr_warn("Ran out of lu_gp_show_attr"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001969 "_members buffer\n");
1970 break;
1971 }
1972 memcpy(page+len, buf, cur_len);
1973 len += cur_len;
1974 }
1975 spin_unlock(&lu_gp->lu_gp_lock);
1976
1977 return len;
1978}
1979
1980SE_DEV_ALUA_LU_ATTR_RO(members);
1981
1982CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
1983
1984static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
1985 &target_core_alua_lu_gp_lu_gp_id.attr,
1986 &target_core_alua_lu_gp_members.attr,
1987 NULL,
1988};
1989
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08001990static void target_core_alua_lu_gp_release(struct config_item *item)
1991{
1992 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
1993 struct t10_alua_lu_gp, lu_gp_group);
1994
1995 core_alua_free_lu_gp(lu_gp);
1996}
1997
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001998static struct configfs_item_operations target_core_alua_lu_gp_ops = {
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08001999 .release = target_core_alua_lu_gp_release,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002000 .show_attribute = target_core_alua_lu_gp_attr_show,
2001 .store_attribute = target_core_alua_lu_gp_attr_store,
2002};
2003
2004static struct config_item_type target_core_alua_lu_gp_cit = {
2005 .ct_item_ops = &target_core_alua_lu_gp_ops,
2006 .ct_attrs = target_core_alua_lu_gp_attrs,
2007 .ct_owner = THIS_MODULE,
2008};
2009
2010/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2011
2012/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2013
2014static struct config_group *target_core_alua_create_lu_gp(
2015 struct config_group *group,
2016 const char *name)
2017{
2018 struct t10_alua_lu_gp *lu_gp;
2019 struct config_group *alua_lu_gp_cg = NULL;
2020 struct config_item *alua_lu_gp_ci = NULL;
2021
2022 lu_gp = core_alua_allocate_lu_gp(name, 0);
2023 if (IS_ERR(lu_gp))
2024 return NULL;
2025
2026 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2027 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2028
2029 config_group_init_type_name(alua_lu_gp_cg, name,
2030 &target_core_alua_lu_gp_cit);
2031
Andy Grover6708bb22011-06-08 10:36:43 -07002032 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002033 " Group: core/alua/lu_gps/%s\n",
2034 config_item_name(alua_lu_gp_ci));
2035
2036 return alua_lu_gp_cg;
2037
2038}
2039
2040static void target_core_alua_drop_lu_gp(
2041 struct config_group *group,
2042 struct config_item *item)
2043{
2044 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2045 struct t10_alua_lu_gp, lu_gp_group);
2046
Andy Grover6708bb22011-06-08 10:36:43 -07002047 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002048 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2049 config_item_name(item), lu_gp->lu_gp_id);
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002050 /*
2051 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2052 * -> target_core_alua_lu_gp_release()
2053 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002054 config_item_put(item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002055}
2056
2057static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2058 .make_group = &target_core_alua_create_lu_gp,
2059 .drop_item = &target_core_alua_drop_lu_gp,
2060};
2061
2062static struct config_item_type target_core_alua_lu_gps_cit = {
2063 .ct_item_ops = NULL,
2064 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2065 .ct_owner = THIS_MODULE,
2066};
2067
2068/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2069
2070/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2071
2072CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
2073#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
2074static struct target_core_alua_tg_pt_gp_attribute \
2075 target_core_alua_tg_pt_gp_##_name = \
2076 __CONFIGFS_EATTR(_name, _mode, \
2077 target_core_alua_tg_pt_gp_show_attr_##_name, \
2078 target_core_alua_tg_pt_gp_store_attr_##_name);
2079
2080#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
2081static struct target_core_alua_tg_pt_gp_attribute \
2082 target_core_alua_tg_pt_gp_##_name = \
2083 __CONFIGFS_EATTR_RO(_name, \
2084 target_core_alua_tg_pt_gp_show_attr_##_name);
2085
2086/*
2087 * alua_access_state
2088 */
2089static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2090 struct t10_alua_tg_pt_gp *tg_pt_gp,
2091 char *page)
2092{
2093 return sprintf(page, "%d\n",
2094 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
2095}
2096
2097static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2098 struct t10_alua_tg_pt_gp *tg_pt_gp,
2099 const char *page,
2100 size_t count)
2101{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002102 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002103 unsigned long tmp;
2104 int new_state, ret;
2105
Andy Grover6708bb22011-06-08 10:36:43 -07002106 if (!tg_pt_gp->tg_pt_gp_valid_id) {
Hannes Reinecke125d0112013-11-19 09:07:46 +01002107 pr_err("Unable to do implicit ALUA on non valid"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002108 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2109 return -EINVAL;
2110 }
Nicholas Bellingerf1453772014-06-06 00:52:57 -07002111 if (!(dev->dev_flags & DF_CONFIGURED)) {
2112 pr_err("Unable to set alua_access_state while device is"
2113 " not configured\n");
2114 return -ENODEV;
2115 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002116
Jingoo Han57103d72013-07-19 16:22:19 +09002117 ret = kstrtoul(page, 0, &tmp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002118 if (ret < 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07002119 pr_err("Unable to extract new ALUA access state from"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002120 " %s\n", page);
Jingoo Han57103d72013-07-19 16:22:19 +09002121 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002122 }
2123 new_state = (int)tmp;
2124
Hannes Reinecke125d0112013-11-19 09:07:46 +01002125 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2126 pr_err("Unable to process implicit configfs ALUA"
2127 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002128 return -EINVAL;
2129 }
Hannes Reineckec66094b2013-12-17 09:18:49 +01002130 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2131 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2132 /* LBA DEPENDENT is only allowed with implicit ALUA */
2133 pr_err("Unable to process implicit configfs ALUA transition"
2134 " while explicit ALUA management is enabled\n");
2135 return -EINVAL;
2136 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002137
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002138 ret = core_alua_do_port_transition(tg_pt_gp, dev,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002139 NULL, NULL, new_state, 0);
2140 return (!ret) ? count : -EINVAL;
2141}
2142
2143SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
2144
2145/*
2146 * alua_access_status
2147 */
2148static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2149 struct t10_alua_tg_pt_gp *tg_pt_gp,
2150 char *page)
2151{
2152 return sprintf(page, "%s\n",
2153 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2154}
2155
2156static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2157 struct t10_alua_tg_pt_gp *tg_pt_gp,
2158 const char *page,
2159 size_t count)
2160{
2161 unsigned long tmp;
2162 int new_status, ret;
2163
Andy Grover6708bb22011-06-08 10:36:43 -07002164 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2165 pr_err("Unable to do set ALUA access status on non"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002166 " valid tg_pt_gp ID: %hu\n",
2167 tg_pt_gp->tg_pt_gp_valid_id);
2168 return -EINVAL;
2169 }
2170
Jingoo Han57103d72013-07-19 16:22:19 +09002171 ret = kstrtoul(page, 0, &tmp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002172 if (ret < 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07002173 pr_err("Unable to extract new ALUA access status"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002174 " from %s\n", page);
Jingoo Han57103d72013-07-19 16:22:19 +09002175 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002176 }
2177 new_status = (int)tmp;
2178
2179 if ((new_status != ALUA_STATUS_NONE) &&
Hannes Reinecke125d0112013-11-19 09:07:46 +01002180 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2181 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
Andy Grover6708bb22011-06-08 10:36:43 -07002182 pr_err("Illegal ALUA access status: 0x%02x\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002183 new_status);
2184 return -EINVAL;
2185 }
2186
2187 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2188 return count;
2189}
2190
2191SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2192
2193/*
2194 * alua_access_type
2195 */
2196static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2197 struct t10_alua_tg_pt_gp *tg_pt_gp,
2198 char *page)
2199{
2200 return core_alua_show_access_type(tg_pt_gp, page);
2201}
2202
2203static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2204 struct t10_alua_tg_pt_gp *tg_pt_gp,
2205 const char *page,
2206 size_t count)
2207{
2208 return core_alua_store_access_type(tg_pt_gp, page, count);
2209}
2210
2211SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2212
2213/*
Hannes Reinecke6be526c2013-11-19 09:07:50 +01002214 * alua_supported_states
2215 */
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002216
2217#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
2218static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
2219 struct t10_alua_tg_pt_gp *t, char *p) \
2220{ \
2221 return sprintf(p, "%d\n", !!(t->_var & _bit)); \
Hannes Reinecke6be526c2013-11-19 09:07:50 +01002222}
2223
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002224#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
2225static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
2226 struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
2227{ \
2228 unsigned long tmp; \
2229 int ret; \
2230 \
2231 if (!t->tg_pt_gp_valid_id) { \
2232 pr_err("Unable to do set ##_name ALUA state on non" \
2233 " valid tg_pt_gp ID: %hu\n", \
2234 t->tg_pt_gp_valid_id); \
2235 return -EINVAL; \
2236 } \
2237 \
2238 ret = kstrtoul(p, 0, &tmp); \
2239 if (ret < 0) { \
2240 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2241 return -EINVAL; \
2242 } \
2243 if (tmp > 1) { \
2244 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2245 return -EINVAL; \
2246 } \
Sebastian Herbszt1f0b0302014-09-01 00:17:53 +02002247 if (tmp) \
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002248 t->_var |= _bit; \
2249 else \
2250 t->_var &= ~_bit; \
2251 \
2252 return c; \
Hannes Reinecke6be526c2013-11-19 09:07:50 +01002253}
2254
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002255SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
2256 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2257SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
2258 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2259SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
2260
2261SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
2262 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2263SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
2264 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2265SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
2266
2267SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
2268 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2269SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
2270 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
Hannes Reineckec66094b2013-12-17 09:18:49 +01002271SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002272
2273SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
2274 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2275SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
2276 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2277SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
2278
2279SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
2280 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2281SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
2282 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2283SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
2284
2285SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
2286 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2287SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
2288 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2289SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
2290
2291SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
2292 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2293SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
2294 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2295SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
Hannes Reinecke6be526c2013-11-19 09:07:50 +01002296
2297/*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002298 * alua_write_metadata
2299 */
2300static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2301 struct t10_alua_tg_pt_gp *tg_pt_gp,
2302 char *page)
2303{
2304 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2305}
2306
2307static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2308 struct t10_alua_tg_pt_gp *tg_pt_gp,
2309 const char *page,
2310 size_t count)
2311{
2312 unsigned long tmp;
2313 int ret;
2314
Jingoo Han57103d72013-07-19 16:22:19 +09002315 ret = kstrtoul(page, 0, &tmp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002316 if (ret < 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07002317 pr_err("Unable to extract alua_write_metadata\n");
Jingoo Han57103d72013-07-19 16:22:19 +09002318 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002319 }
2320
2321 if ((tmp != 0) && (tmp != 1)) {
Andy Grover6708bb22011-06-08 10:36:43 -07002322 pr_err("Illegal value for alua_write_metadata:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002323 " %lu\n", tmp);
2324 return -EINVAL;
2325 }
2326 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2327
2328 return count;
2329}
2330
2331SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2332
2333
2334
2335/*
2336 * nonop_delay_msecs
2337 */
2338static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2339 struct t10_alua_tg_pt_gp *tg_pt_gp,
2340 char *page)
2341{
2342 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2343
2344}
2345
2346static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2347 struct t10_alua_tg_pt_gp *tg_pt_gp,
2348 const char *page,
2349 size_t count)
2350{
2351 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2352}
2353
2354SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2355
2356/*
2357 * trans_delay_msecs
2358 */
2359static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2360 struct t10_alua_tg_pt_gp *tg_pt_gp,
2361 char *page)
2362{
2363 return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2364}
2365
2366static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2367 struct t10_alua_tg_pt_gp *tg_pt_gp,
2368 const char *page,
2369 size_t count)
2370{
2371 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2372}
2373
2374SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2375
2376/*
Hannes Reinecke125d0112013-11-19 09:07:46 +01002377 * implicit_trans_secs
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002378 */
Hannes Reinecke125d0112013-11-19 09:07:46 +01002379static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002380 struct t10_alua_tg_pt_gp *tg_pt_gp,
2381 char *page)
2382{
Hannes Reinecke125d0112013-11-19 09:07:46 +01002383 return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002384}
2385
Hannes Reinecke125d0112013-11-19 09:07:46 +01002386static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002387 struct t10_alua_tg_pt_gp *tg_pt_gp,
2388 const char *page,
2389 size_t count)
2390{
Hannes Reinecke125d0112013-11-19 09:07:46 +01002391 return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002392}
2393
Hannes Reinecke125d0112013-11-19 09:07:46 +01002394SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
Nicholas Bellinger5b9a4d72012-05-16 22:02:34 -07002395
2396/*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002397 * preferred
2398 */
2399
2400static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2401 struct t10_alua_tg_pt_gp *tg_pt_gp,
2402 char *page)
2403{
2404 return core_alua_show_preferred_bit(tg_pt_gp, page);
2405}
2406
2407static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2408 struct t10_alua_tg_pt_gp *tg_pt_gp,
2409 const char *page,
2410 size_t count)
2411{
2412 return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2413}
2414
2415SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2416
2417/*
2418 * tg_pt_gp_id
2419 */
2420static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2421 struct t10_alua_tg_pt_gp *tg_pt_gp,
2422 char *page)
2423{
Andy Grover6708bb22011-06-08 10:36:43 -07002424 if (!tg_pt_gp->tg_pt_gp_valid_id)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002425 return 0;
2426
2427 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2428}
2429
2430static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2431 struct t10_alua_tg_pt_gp *tg_pt_gp,
2432 const char *page,
2433 size_t count)
2434{
2435 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2436 unsigned long tg_pt_gp_id;
2437 int ret;
2438
Jingoo Han57103d72013-07-19 16:22:19 +09002439 ret = kstrtoul(page, 0, &tg_pt_gp_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002440 if (ret < 0) {
Jingoo Han57103d72013-07-19 16:22:19 +09002441 pr_err("kstrtoul() returned %d for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002442 " tg_pt_gp_id\n", ret);
Jingoo Han57103d72013-07-19 16:22:19 +09002443 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002444 }
2445 if (tg_pt_gp_id > 0x0000ffff) {
Andy Grover6708bb22011-06-08 10:36:43 -07002446 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002447 " 0x0000ffff\n", tg_pt_gp_id);
2448 return -EINVAL;
2449 }
2450
2451 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2452 if (ret < 0)
2453 return -EINVAL;
2454
Andy Grover6708bb22011-06-08 10:36:43 -07002455 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002456 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2457 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2458 tg_pt_gp->tg_pt_gp_id);
2459
2460 return count;
2461}
2462
2463SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2464
2465/*
2466 * members
2467 */
2468static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2469 struct t10_alua_tg_pt_gp *tg_pt_gp,
2470 char *page)
2471{
2472 struct se_port *port;
2473 struct se_portal_group *tpg;
2474 struct se_lun *lun;
2475 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2476 ssize_t len = 0, cur_len;
2477 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2478
2479 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2480
2481 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2482 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2483 tg_pt_gp_mem_list) {
2484 port = tg_pt_gp_mem->tg_pt;
2485 tpg = port->sep_tpg;
2486 lun = port->sep_lun;
2487
2488 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
Andy Grovere3d6f902011-07-19 08:55:10 +00002489 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
2490 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2491 tpg->se_tpg_tfo->tpg_get_tag(tpg),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002492 config_item_name(&lun->lun_group.cg_item));
2493 cur_len++; /* Extra byte for NULL terminator */
2494
2495 if ((cur_len + len) > PAGE_SIZE) {
Andy Grover6708bb22011-06-08 10:36:43 -07002496 pr_warn("Ran out of lu_gp_show_attr"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002497 "_members buffer\n");
2498 break;
2499 }
2500 memcpy(page+len, buf, cur_len);
2501 len += cur_len;
2502 }
2503 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2504
2505 return len;
2506}
2507
2508SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2509
2510CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2511 tg_pt_gp_group);
2512
2513static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2514 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2515 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2516 &target_core_alua_tg_pt_gp_alua_access_type.attr,
Hannes Reineckeb0a382c2013-11-19 09:07:51 +01002517 &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
2518 &target_core_alua_tg_pt_gp_alua_support_offline.attr,
2519 &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
2520 &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
2521 &target_core_alua_tg_pt_gp_alua_support_standby.attr,
2522 &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
2523 &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002524 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2525 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2526 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
Hannes Reinecke125d0112013-11-19 09:07:46 +01002527 &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002528 &target_core_alua_tg_pt_gp_preferred.attr,
2529 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2530 &target_core_alua_tg_pt_gp_members.attr,
2531 NULL,
2532};
2533
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002534static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2535{
2536 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2537 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2538
2539 core_alua_free_tg_pt_gp(tg_pt_gp);
2540}
2541
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002542static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002543 .release = target_core_alua_tg_pt_gp_release,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002544 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2545 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2546};
2547
2548static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2549 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2550 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2551 .ct_owner = THIS_MODULE,
2552};
2553
2554/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2555
Nicholas Bellinger72aca572014-11-27 15:06:23 -08002556/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002557
2558static struct config_group *target_core_alua_create_tg_pt_gp(
2559 struct config_group *group,
2560 const char *name)
2561{
2562 struct t10_alua *alua = container_of(group, struct t10_alua,
2563 alua_tg_pt_gps_group);
2564 struct t10_alua_tg_pt_gp *tg_pt_gp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002565 struct config_group *alua_tg_pt_gp_cg = NULL;
2566 struct config_item *alua_tg_pt_gp_ci = NULL;
2567
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002568 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
Andy Grover6708bb22011-06-08 10:36:43 -07002569 if (!tg_pt_gp)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002570 return NULL;
2571
2572 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2573 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2574
2575 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2576 &target_core_alua_tg_pt_gp_cit);
2577
Andy Grover6708bb22011-06-08 10:36:43 -07002578 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002579 " Group: alua/tg_pt_gps/%s\n",
2580 config_item_name(alua_tg_pt_gp_ci));
2581
2582 return alua_tg_pt_gp_cg;
2583}
2584
2585static void target_core_alua_drop_tg_pt_gp(
2586 struct config_group *group,
2587 struct config_item *item)
2588{
2589 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2590 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2591
Andy Grover6708bb22011-06-08 10:36:43 -07002592 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002593 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2594 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002595 /*
2596 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2597 * -> target_core_alua_tg_pt_gp_release().
2598 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002599 config_item_put(item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002600}
2601
2602static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2603 .make_group = &target_core_alua_create_tg_pt_gp,
2604 .drop_item = &target_core_alua_drop_tg_pt_gp,
2605};
2606
Nicholas Bellinger72aca572014-11-27 15:06:23 -08002607TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002608
Nicholas Bellinger72aca572014-11-27 15:06:23 -08002609/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002610
2611/* Start functions for struct config_item_type target_core_alua_cit */
2612
2613/*
2614 * target_core_alua_cit is a ConfigFS group that lives under
2615 * /sys/kernel/config/target/core/alua. There are default groups
2616 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2617 * target_core_alua_cit in target_core_init_configfs() below.
2618 */
2619static struct config_item_type target_core_alua_cit = {
2620 .ct_item_ops = NULL,
2621 .ct_attrs = NULL,
2622 .ct_owner = THIS_MODULE,
2623};
2624
2625/* End functions for struct config_item_type target_core_alua_cit */
2626
Nicholas Bellingerd23ab572014-11-27 15:09:32 -08002627/* Start functions for struct config_item_type tb_dev_stat_cit */
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002628
2629static struct config_group *target_core_stat_mkdir(
2630 struct config_group *group,
2631 const char *name)
2632{
2633 return ERR_PTR(-ENOSYS);
2634}
2635
2636static void target_core_stat_rmdir(
2637 struct config_group *group,
2638 struct config_item *item)
2639{
2640 return;
2641}
2642
2643static struct configfs_group_operations target_core_stat_group_ops = {
2644 .make_group = &target_core_stat_mkdir,
2645 .drop_item = &target_core_stat_rmdir,
2646};
2647
Nicholas Bellingerd23ab572014-11-27 15:09:32 -08002648TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002649
Nicholas Bellingerd23ab572014-11-27 15:09:32 -08002650/* End functions for struct config_item_type tb_dev_stat_cit */
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002651
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002652/* Start functions for struct config_item_type target_core_hba_cit */
2653
2654static struct config_group *target_core_make_subdev(
2655 struct config_group *group,
2656 const char *name)
2657{
2658 struct t10_alua_tg_pt_gp *tg_pt_gp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002659 struct config_item *hba_ci = &group->cg_item;
2660 struct se_hba *hba = item_to_hba(hba_ci);
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002661 struct target_backend *tb = hba->backend;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002662 struct se_device *dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002663 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002664 struct config_group *dev_stat_grp = NULL;
2665 int errno = -ENOMEM, ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002666
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002667 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2668 if (ret)
2669 return ERR_PTR(ret);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002670
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002671 dev = target_alloc_device(hba, name);
2672 if (!dev)
2673 goto out_unlock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002674
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002675 dev_cg = &dev->dev_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002676
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01002677 dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002678 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07002679 if (!dev_cg->default_groups)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002680 goto out_free_device;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002681
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002682 config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002683 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002684 &tb->tb_dev_attrib_cit);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002685 config_group_init_type_name(&dev->dev_pr_group, "pr",
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002686 &tb->tb_dev_pr_cit);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002687 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002688 &tb->tb_dev_wwn_cit);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002689 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002690 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002691 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002692 "statistics", &tb->tb_dev_stat_cit);
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002693
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002694 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2695 dev_cg->default_groups[1] = &dev->dev_pr_group;
2696 dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
2697 dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
2698 dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002699 dev_cg->default_groups[5] = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002700 /*
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002701 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002702 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002703 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
Andy Grover6708bb22011-06-08 10:36:43 -07002704 if (!tg_pt_gp)
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002705 goto out_free_dev_cg_default_groups;
2706 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002707
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002708 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01002709 tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002710 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07002711 if (!tg_pt_gp_cg->default_groups) {
2712 pr_err("Unable to allocate tg_pt_gp_cg->"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002713 "default_groups\n");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002714 goto out_free_tg_pt_gp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002715 }
2716
2717 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2718 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2719 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2720 tg_pt_gp_cg->default_groups[1] = NULL;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002721 /*
2722 * Add core/$HBA/$DEV/statistics/ default groups
2723 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002724 dev_stat_grp = &dev->dev_stat_grps.stat_group;
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01002725 dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002726 GFP_KERNEL);
2727 if (!dev_stat_grp->default_groups) {
Andy Grover6708bb22011-06-08 10:36:43 -07002728 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002729 goto out_free_tg_pt_gp_cg_default_groups;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002730 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002731 target_stat_setup_dev_default_groups(dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002732
2733 mutex_unlock(&hba->hba_access_mutex);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002734 return dev_cg;
2735
2736out_free_tg_pt_gp_cg_default_groups:
2737 kfree(tg_pt_gp_cg->default_groups);
2738out_free_tg_pt_gp:
2739 core_alua_free_tg_pt_gp(tg_pt_gp);
2740out_free_dev_cg_default_groups:
2741 kfree(dev_cg->default_groups);
2742out_free_device:
2743 target_free_device(dev);
2744out_unlock:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002745 mutex_unlock(&hba->hba_access_mutex);
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002746 return ERR_PTR(errno);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002747}
2748
2749static void target_core_drop_subdev(
2750 struct config_group *group,
2751 struct config_item *item)
2752{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002753 struct config_group *dev_cg = to_config_group(item);
2754 struct se_device *dev =
2755 container_of(dev_cg, struct se_device, dev_group);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002756 struct se_hba *hba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002757 struct config_item *df_item;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002758 struct config_group *tg_pt_gp_cg, *dev_stat_grp;
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002759 int i;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002760
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002761 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002762
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002763 mutex_lock(&hba->hba_access_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002764
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002765 dev_stat_grp = &dev->dev_stat_grps.stat_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -07002766 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2767 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2768 dev_stat_grp->default_groups[i] = NULL;
2769 config_item_put(df_item);
2770 }
2771 kfree(dev_stat_grp->default_groups);
2772
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002773 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002774 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2775 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2776 tg_pt_gp_cg->default_groups[i] = NULL;
2777 config_item_put(df_item);
2778 }
2779 kfree(tg_pt_gp_cg->default_groups);
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002780 /*
2781 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2782 * directly from target_core_alua_tg_pt_gp_release().
2783 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002784 dev->t10_alua.default_tg_pt_gp = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002785
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002786 for (i = 0; dev_cg->default_groups[i]; i++) {
2787 df_item = &dev_cg->default_groups[i]->cg_item;
2788 dev_cg->default_groups[i] = NULL;
2789 config_item_put(df_item);
2790 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002791 /*
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002792 * se_dev is released from target_core_dev_item_ops->release()
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002793 */
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002794 config_item_put(item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002795 mutex_unlock(&hba->hba_access_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002796}
2797
2798static struct configfs_group_operations target_core_hba_group_ops = {
2799 .make_group = target_core_make_subdev,
2800 .drop_item = target_core_drop_subdev,
2801};
2802
2803CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2804#define SE_HBA_ATTR(_name, _mode) \
2805static struct target_core_hba_attribute \
2806 target_core_hba_##_name = \
2807 __CONFIGFS_EATTR(_name, _mode, \
2808 target_core_hba_show_attr_##_name, \
2809 target_core_hba_store_attr_##_name);
2810
2811#define SE_HBA_ATTR_RO(_name) \
2812static struct target_core_hba_attribute \
2813 target_core_hba_##_name = \
2814 __CONFIGFS_EATTR_RO(_name, \
2815 target_core_hba_show_attr_##_name);
2816
2817static ssize_t target_core_hba_show_attr_hba_info(
2818 struct se_hba *hba,
2819 char *page)
2820{
2821 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002822 hba->hba_id, hba->backend->ops->name,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002823 TARGET_CORE_CONFIGFS_VERSION);
2824}
2825
2826SE_HBA_ATTR_RO(hba_info);
2827
2828static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2829 char *page)
2830{
2831 int hba_mode = 0;
2832
2833 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2834 hba_mode = 1;
2835
2836 return sprintf(page, "%d\n", hba_mode);
2837}
2838
2839static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2840 const char *page, size_t count)
2841{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002842 unsigned long mode_flag;
2843 int ret;
2844
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002845 if (hba->backend->ops->pmode_enable_hba == NULL)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002846 return -EINVAL;
2847
Jingoo Han57103d72013-07-19 16:22:19 +09002848 ret = kstrtoul(page, 0, &mode_flag);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002849 if (ret < 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07002850 pr_err("Unable to extract hba mode flag: %d\n", ret);
Jingoo Han57103d72013-07-19 16:22:19 +09002851 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002852 }
2853
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04002854 if (hba->dev_count) {
Andy Grover6708bb22011-06-08 10:36:43 -07002855 pr_err("Unable to set hba_mode with active devices\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002856 return -EINVAL;
2857 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002858
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002859 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002860 if (ret < 0)
2861 return -EINVAL;
2862 if (ret > 0)
2863 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
2864 else if (ret == 0)
2865 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
2866
2867 return count;
2868}
2869
2870SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2871
2872CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2873
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002874static void target_core_hba_release(struct config_item *item)
2875{
2876 struct se_hba *hba = container_of(to_config_group(item),
2877 struct se_hba, hba_group);
2878 core_delete_hba(hba);
2879}
2880
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002881static struct configfs_attribute *target_core_hba_attrs[] = {
2882 &target_core_hba_hba_info.attr,
2883 &target_core_hba_hba_mode.attr,
2884 NULL,
2885};
2886
2887static struct configfs_item_operations target_core_hba_item_ops = {
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002888 .release = target_core_hba_release,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002889 .show_attribute = target_core_hba_attr_show,
2890 .store_attribute = target_core_hba_attr_store,
2891};
2892
2893static struct config_item_type target_core_hba_cit = {
2894 .ct_item_ops = &target_core_hba_item_ops,
2895 .ct_group_ops = &target_core_hba_group_ops,
2896 .ct_attrs = target_core_hba_attrs,
2897 .ct_owner = THIS_MODULE,
2898};
2899
2900static struct config_group *target_core_call_addhbatotarget(
2901 struct config_group *group,
2902 const char *name)
2903{
2904 char *se_plugin_str, *str, *str2;
2905 struct se_hba *hba;
2906 char buf[TARGET_CORE_NAME_MAX_LEN];
2907 unsigned long plugin_dep_id = 0;
2908 int ret;
2909
2910 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
Dan Carpenter60d645a2011-06-15 10:03:05 -07002911 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
Andy Grover6708bb22011-06-08 10:36:43 -07002912 pr_err("Passed *name strlen(): %d exceeds"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002913 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
2914 TARGET_CORE_NAME_MAX_LEN);
2915 return ERR_PTR(-ENAMETOOLONG);
2916 }
2917 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
2918
2919 str = strstr(buf, "_");
Andy Grover6708bb22011-06-08 10:36:43 -07002920 if (!str) {
2921 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002922 return ERR_PTR(-EINVAL);
2923 }
2924 se_plugin_str = buf;
2925 /*
2926 * Special case for subsystem plugins that have "_" in their names.
2927 * Namely rd_direct and rd_mcp..
2928 */
2929 str2 = strstr(str+1, "_");
Andy Grover6708bb22011-06-08 10:36:43 -07002930 if (str2) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002931 *str2 = '\0'; /* Terminate for *se_plugin_str */
2932 str2++; /* Skip to start of plugin dependent ID */
2933 str = str2;
2934 } else {
2935 *str = '\0'; /* Terminate for *se_plugin_str */
2936 str++; /* Skip to start of plugin dependent ID */
2937 }
2938
Jingoo Han57103d72013-07-19 16:22:19 +09002939 ret = kstrtoul(str, 0, &plugin_dep_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002940 if (ret < 0) {
Jingoo Han57103d72013-07-19 16:22:19 +09002941 pr_err("kstrtoul() returned %d for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002942 " plugin_dep_id\n", ret);
Jingoo Han57103d72013-07-19 16:22:19 +09002943 return ERR_PTR(ret);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002944 }
2945 /*
2946 * Load up TCM subsystem plugins if they have not already been loaded.
2947 */
Nicholas Bellingerdbc56232011-10-22 01:03:54 -07002948 transport_subsystem_check_init();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002949
2950 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
2951 if (IS_ERR(hba))
2952 return ERR_CAST(hba);
2953
2954 config_group_init_type_name(&hba->hba_group, name,
2955 &target_core_hba_cit);
2956
2957 return &hba->hba_group;
2958}
2959
2960static void target_core_call_delhbafromtarget(
2961 struct config_group *group,
2962 struct config_item *item)
2963{
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -08002964 /*
2965 * core_delete_hba() is called from target_core_hba_item_ops->release()
2966 * -> target_core_hba_release()
2967 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002968 config_item_put(item);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002969}
2970
2971static struct configfs_group_operations target_core_group_ops = {
2972 .make_group = target_core_call_addhbatotarget,
2973 .drop_item = target_core_call_delhbafromtarget,
2974};
2975
2976static struct config_item_type target_core_cit = {
2977 .ct_item_ops = NULL,
2978 .ct_group_ops = &target_core_group_ops,
2979 .ct_attrs = NULL,
2980 .ct_owner = THIS_MODULE,
2981};
2982
2983/* Stop functions for struct config_item_type target_core_hba_cit */
2984
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002985void target_setup_backend_cits(struct target_backend *tb)
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08002986{
Christoph Hellwig0a06d432015-05-10 18:14:56 +02002987 target_core_setup_dev_cit(tb);
2988 target_core_setup_dev_attrib_cit(tb);
2989 target_core_setup_dev_pr_cit(tb);
2990 target_core_setup_dev_wwn_cit(tb);
2991 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
2992 target_core_setup_dev_stat_cit(tb);
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08002993}
Nicholas Bellinger73112ed2014-11-27 13:59:20 -08002994
Axel Lin54550fa2011-03-14 04:06:09 -07002995static int __init target_core_init_configfs(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002996{
2997 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
2998 struct config_group *lu_gp_cg = NULL;
Christoph Hellwigd588cf82015-05-03 08:50:52 +02002999 struct configfs_subsystem *subsys = &target_core_fabrics;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003000 struct t10_alua_lu_gp *lu_gp;
3001 int ret;
3002
Andy Grover6708bb22011-06-08 10:36:43 -07003003 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003004 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3005 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3006
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003007 config_group_init(&subsys->su_group);
3008 mutex_init(&subsys->su_mutex);
3009
Andy Grovere3d6f902011-07-19 08:55:10 +00003010 ret = init_se_kmem_caches();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003011 if (ret < 0)
Andy Grovere3d6f902011-07-19 08:55:10 +00003012 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003013 /*
3014 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3015 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3016 */
3017 target_cg = &subsys->su_group;
Andy Groverab6dae82013-12-09 14:27:36 -08003018 target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003019 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07003020 if (!target_cg->default_groups) {
3021 pr_err("Unable to allocate target_cg->default_groups\n");
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003022 ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003023 goto out_global;
3024 }
3025
Andy Grovere3d6f902011-07-19 08:55:10 +00003026 config_group_init_type_name(&target_core_hbagroup,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003027 "core", &target_core_cit);
Andy Grovere3d6f902011-07-19 08:55:10 +00003028 target_cg->default_groups[0] = &target_core_hbagroup;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003029 target_cg->default_groups[1] = NULL;
3030 /*
3031 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3032 */
Andy Grovere3d6f902011-07-19 08:55:10 +00003033 hba_cg = &target_core_hbagroup;
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01003034 hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003035 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07003036 if (!hba_cg->default_groups) {
3037 pr_err("Unable to allocate hba_cg->default_groups\n");
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003038 ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003039 goto out_global;
3040 }
Andy Grovere3d6f902011-07-19 08:55:10 +00003041 config_group_init_type_name(&alua_group,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003042 "alua", &target_core_alua_cit);
Andy Grovere3d6f902011-07-19 08:55:10 +00003043 hba_cg->default_groups[0] = &alua_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003044 hba_cg->default_groups[1] = NULL;
3045 /*
3046 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3047 * groups under /sys/kernel/config/target/core/alua/
3048 */
Andy Grovere3d6f902011-07-19 08:55:10 +00003049 alua_cg = &alua_group;
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01003050 alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003051 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07003052 if (!alua_cg->default_groups) {
3053 pr_err("Unable to allocate alua_cg->default_groups\n");
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003054 ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003055 goto out_global;
3056 }
3057
Andy Grovere3d6f902011-07-19 08:55:10 +00003058 config_group_init_type_name(&alua_lu_gps_group,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003059 "lu_gps", &target_core_alua_lu_gps_cit);
Andy Grovere3d6f902011-07-19 08:55:10 +00003060 alua_cg->default_groups[0] = &alua_lu_gps_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003061 alua_cg->default_groups[1] = NULL;
3062 /*
3063 * Add core/alua/lu_gps/default_lu_gp
3064 */
3065 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003066 if (IS_ERR(lu_gp)) {
3067 ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003068 goto out_global;
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003069 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003070
Andy Grovere3d6f902011-07-19 08:55:10 +00003071 lu_gp_cg = &alua_lu_gps_group;
Sebastian Andrzej Siewior13f6a912012-11-27 18:54:21 +01003072 lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003073 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07003074 if (!lu_gp_cg->default_groups) {
3075 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
Peter Senna Tschudin37bb7892012-09-17 20:05:33 +02003076 ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003077 goto out_global;
3078 }
3079
3080 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3081 &target_core_alua_lu_gp_cit);
3082 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3083 lu_gp_cg->default_groups[1] = NULL;
Andy Grovere3d6f902011-07-19 08:55:10 +00003084 default_lu_gp = lu_gp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003085 /*
3086 * Register the target_core_mod subsystem with configfs.
3087 */
3088 ret = configfs_register_subsystem(subsys);
3089 if (ret < 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07003090 pr_err("Error %d while registering subsystem %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003091 ret, subsys->su_group.cg_item.ci_namebuf);
3092 goto out_global;
3093 }
Andy Grover6708bb22011-06-08 10:36:43 -07003094 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003095 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3096 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3097 /*
3098 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3099 */
3100 ret = rd_module_init();
3101 if (ret < 0)
3102 goto out;
3103
Roland Dreier0d0f9df2012-10-31 09:16:44 -07003104 ret = core_dev_setup_virtual_lun0();
3105 if (ret < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003106 goto out;
3107
Nicholas Bellingerf99715a2013-08-22 12:48:53 -07003108 ret = target_xcopy_setup_pt();
3109 if (ret < 0)
3110 goto out;
3111
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003112 return 0;
3113
3114out:
3115 configfs_unregister_subsystem(subsys);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003116 core_dev_release_virtual_lun0();
3117 rd_module_exit();
3118out_global:
Andy Grovere3d6f902011-07-19 08:55:10 +00003119 if (default_lu_gp) {
3120 core_alua_free_lu_gp(default_lu_gp);
3121 default_lu_gp = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003122 }
3123 if (lu_gp_cg)
3124 kfree(lu_gp_cg->default_groups);
3125 if (alua_cg)
3126 kfree(alua_cg->default_groups);
3127 if (hba_cg)
3128 kfree(hba_cg->default_groups);
3129 kfree(target_cg->default_groups);
Andy Grovere3d6f902011-07-19 08:55:10 +00003130 release_se_kmem_caches();
3131 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003132}
3133
Axel Lin54550fa2011-03-14 04:06:09 -07003134static void __exit target_core_exit_configfs(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003135{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003136 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3137 struct config_item *item;
3138 int i;
3139
Andy Grovere3d6f902011-07-19 08:55:10 +00003140 lu_gp_cg = &alua_lu_gps_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003141 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3142 item = &lu_gp_cg->default_groups[i]->cg_item;
3143 lu_gp_cg->default_groups[i] = NULL;
3144 config_item_put(item);
3145 }
3146 kfree(lu_gp_cg->default_groups);
Nicholas Bellinger7c2bf6e2011-02-09 15:34:53 -08003147 lu_gp_cg->default_groups = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003148
Andy Grovere3d6f902011-07-19 08:55:10 +00003149 alua_cg = &alua_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003150 for (i = 0; alua_cg->default_groups[i]; i++) {
3151 item = &alua_cg->default_groups[i]->cg_item;
3152 alua_cg->default_groups[i] = NULL;
3153 config_item_put(item);
3154 }
3155 kfree(alua_cg->default_groups);
Nicholas Bellinger7c2bf6e2011-02-09 15:34:53 -08003156 alua_cg->default_groups = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003157
Andy Grovere3d6f902011-07-19 08:55:10 +00003158 hba_cg = &target_core_hbagroup;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003159 for (i = 0; hba_cg->default_groups[i]; i++) {
3160 item = &hba_cg->default_groups[i]->cg_item;
3161 hba_cg->default_groups[i] = NULL;
3162 config_item_put(item);
3163 }
3164 kfree(hba_cg->default_groups);
Nicholas Bellinger7c2bf6e2011-02-09 15:34:53 -08003165 hba_cg->default_groups = NULL;
3166 /*
3167 * We expect subsys->su_group.default_groups to be released
3168 * by configfs subsystem provider logic..
3169 */
Christoph Hellwigd588cf82015-05-03 08:50:52 +02003170 configfs_unregister_subsystem(&target_core_fabrics);
3171 kfree(target_core_fabrics.su_group.default_groups);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003172
Andy Grovere3d6f902011-07-19 08:55:10 +00003173 core_alua_free_lu_gp(default_lu_gp);
3174 default_lu_gp = NULL;
Nicholas Bellinger7c2bf6e2011-02-09 15:34:53 -08003175
Andy Grover6708bb22011-06-08 10:36:43 -07003176 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003177 " Infrastructure\n");
3178
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003179 core_dev_release_virtual_lun0();
3180 rd_module_exit();
Nicholas Bellingerf99715a2013-08-22 12:48:53 -07003181 target_xcopy_release_pt();
Andy Grovere3d6f902011-07-19 08:55:10 +00003182 release_se_kmem_caches();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003183}
3184
3185MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3186MODULE_AUTHOR("nab@Linux-iSCSI.org");
3187MODULE_LICENSE("GPL");
3188
3189module_init(target_core_init_configfs);
3190module_exit(target_core_exit_configfs);