blob: 1d90fb33e60bd461886cdbe7e7238d80db0afb62 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001#ifndef TARGET_CORE_BASE_H
2#define TARGET_CORE_BASE_H
3
4#include <linux/in.h>
5#include <linux/configfs.h>
6#include <linux/dma-mapping.h>
7#include <linux/blkdev.h>
8#include <scsi/scsi_cmnd.h>
9#include <net/sock.h>
10#include <net/tcp.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080011
Nicholas Bellingerfa495152011-07-22 08:24:22 +000012#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050013#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080014
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080015/* Maximum Number of LUNs per Target Portal Group */
Andy Groverd0229ae2011-07-08 17:04:53 -070016/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080017#define TRANSPORT_MAX_LUNS_PER_TPG 256
18/*
19 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
20 *
21 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
22 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
23 * 16-byte CDBs by default and require an extra allocation for
Lucas De Marchi25985ed2011-03-30 22:57:33 -030024 * 32-byte CDBs to because of legacy issues.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080025 *
26 * Within TCM Core there are no such legacy limitiations, so we go ahead
27 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
28 * within all TCM Core and subsystem plugin code.
29 */
30#define TCM_MAX_COMMAND_SIZE 32
31/*
32 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
33 * defined 96, but the real limit is 252 (or 260 including the header)
34 */
35#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
36/* Used by transport_send_check_condition_and_sense() */
37#define SPC_SENSE_KEY_OFFSET 2
38#define SPC_ASC_KEY_OFFSET 12
39#define SPC_ASCQ_KEY_OFFSET 13
40#define TRANSPORT_IQN_LEN 224
41/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
42#define LU_GROUP_NAME_BUF 256
43/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
44#define TG_PT_GROUP_NAME_BUF 256
45/* Used to parse VPD into struct t10_vpd */
46#define VPD_TMP_BUF_SIZE 128
47/* Used by transport_generic_cmd_sequencer() */
48#define READ_BLOCK_LEN 6
49#define READ_CAP_LEN 8
50#define READ_POSITION_LEN 20
51#define INQUIRY_LEN 36
52/* Used by transport_get_inquiry_vpd_serial() */
53#define INQUIRY_VPD_SERIAL_LEN 254
54/* Used by transport_get_inquiry_vpd_device_ident() */
55#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
56
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050057/* Attempts before moving from SHORT to LONG */
58#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
59#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
60#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
61
62#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
63
64/*
65 * struct se_subsystem_dev->su_dev_flags
66*/
67#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
68#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
69#define SDF_USING_UDEV_PATH 0x00000004
70#define SDF_USING_ALIAS 0x00000008
71
72/*
73 * struct se_device->dev_flags
74 */
75#define DF_READ_ONLY 0x00000001
76#define DF_SPC2_RESERVATIONS 0x00000002
77#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
78
79/* struct se_dev_attrib sanity values */
80/* Default max_unmap_lba_count */
81#define DA_MAX_UNMAP_LBA_COUNT 0
82/* Default max_unmap_block_desc_count */
83#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
84/* Default unmap_granularity */
85#define DA_UNMAP_GRANULARITY_DEFAULT 0
86/* Default unmap_granularity_alignment */
87#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
88/* Emulation for Direct Page Out */
89#define DA_EMULATE_DPO 0
90/* Emulation for Forced Unit Access WRITEs */
91#define DA_EMULATE_FUA_WRITE 1
92/* Emulation for Forced Unit Access READs */
93#define DA_EMULATE_FUA_READ 0
94/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
95#define DA_EMULATE_WRITE_CACHE 0
96/* Emulation for UNIT ATTENTION Interlock Control */
97#define DA_EMULATE_UA_INTLLCK_CTRL 0
98/* Emulation for TASK_ABORTED status (TAS) by default */
99#define DA_EMULATE_TAS 1
100/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
101#define DA_EMULATE_TPU 0
102/*
103 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
104 * block/blk-lib.c:blkdev_issue_discard()
105 */
106#define DA_EMULATE_TPWS 0
107/* No Emulation for PSCSI by default */
108#define DA_EMULATE_RESERVATIONS 0
109/* No Emulation for PSCSI by default */
110#define DA_EMULATE_ALUA 0
111/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
112#define DA_ENFORCE_PR_ISIDS 1
113#define DA_STATUS_MAX_SECTORS_MIN 16
114#define DA_STATUS_MAX_SECTORS_MAX 8192
115/* By default don't report non-rotating (solid state) medium */
116#define DA_IS_NONROT 0
117/* Queue Algorithm Modifier default for restricted reordering in control mode page */
118#define DA_EMULATE_REST_REORD 0
119
120#define SE_MODE_PAGE_BUF 512
121
122
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800123/* struct se_hba->hba_flags */
124enum hba_flags_table {
125 HBA_FLAGS_INTERNAL_USE = 0x01,
126 HBA_FLAGS_PSCSI_MODE = 0x02,
127};
128
129/* struct se_lun->lun_status */
130enum transport_lun_status_table {
131 TRANSPORT_LUN_STATUS_FREE = 0,
132 TRANSPORT_LUN_STATUS_ACTIVE = 1,
133};
134
135/* struct se_portal_group->se_tpg_type */
136enum transport_tpg_type_table {
137 TRANSPORT_TPG_TYPE_NORMAL = 0,
138 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
139};
140
141/* Used for generate timer flags */
Christoph Hellwig6c76bf92011-10-12 11:07:03 -0400142enum se_task_flags {
143 TF_ACTIVE = (1 << 0),
144 TF_SENT = (1 << 1),
Nicholas Bellinger2e982ab2011-10-23 18:46:36 -0700145 TF_REQUEST_STOP = (1 << 2),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800146};
147
148/* Special transport agnostic struct se_cmd->t_states */
149enum transport_state_table {
150 TRANSPORT_NO_STATE = 0,
151 TRANSPORT_NEW_CMD = 1,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800152 TRANSPORT_WRITE_PENDING = 3,
153 TRANSPORT_PROCESS_WRITE = 4,
154 TRANSPORT_PROCESSING = 5,
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400155 TRANSPORT_COMPLETE = 6,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800156 TRANSPORT_PROCESS_TMR = 9,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800157 TRANSPORT_ISTATE_PROCESSING = 11,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800158 TRANSPORT_NEW_CMD_MAP = 16,
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700159 TRANSPORT_COMPLETE_QF_WP = 18,
Christoph Hellwige057f532011-10-17 13:56:41 -0400160 TRANSPORT_COMPLETE_QF_OK = 19,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800161};
162
163/* Used for struct se_cmd->se_cmd_flags */
164enum se_cmd_flags_table {
165 SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
166 SCF_TRANSPORT_TASK_SENSE = 0x00000002,
167 SCF_EMULATED_TASK_SENSE = 0x00000004,
168 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
169 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800170 SCF_SCSI_NON_DATA_CDB = 0x00000040,
171 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
172 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -0500173 SCF_FUA = 0x00000200,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800174 SCF_SE_LUN_CMD = 0x00000800,
175 SCF_SE_ALLOW_EOO = 0x00001000,
Christoph Hellwig33c3faf2011-11-14 11:36:30 -0500176 SCF_BIDI = 0x00002000,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800177 SCF_SENT_CHECK_CONDITION = 0x00004000,
178 SCF_OVERFLOW_BIT = 0x00008000,
179 SCF_UNDERFLOW_BIT = 0x00010000,
180 SCF_SENT_DELAYED_TAS = 0x00020000,
181 SCF_ALUA_NON_OPTIMIZED = 0x00040000,
182 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
Andy Grover5951146d2011-07-19 10:26:37 +0000183 SCF_UNUSED = 0x00100000,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800184 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800185};
186
187/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
188enum transport_lunflags_table {
189 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
190 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
191 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
192 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
193};
194
195/* struct se_device->dev_status */
196enum transport_device_status_table {
197 TRANSPORT_DEVICE_ACTIVATED = 0x01,
198 TRANSPORT_DEVICE_DEACTIVATED = 0x02,
199 TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
200 TRANSPORT_DEVICE_SHUTDOWN = 0x08,
201 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
202 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
203};
204
205/*
206 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
207 * to signal which ASC/ASCQ sense payload should be built.
208 */
209enum tcm_sense_reason_table {
210 TCM_NON_EXISTENT_LUN = 0x01,
211 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
212 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
213 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
214 TCM_SERVICE_CRC_ERROR = 0x05,
215 TCM_SNACK_REJECTED = 0x06,
216 TCM_SECTOR_COUNT_TOO_MANY = 0x07,
217 TCM_INVALID_CDB_FIELD = 0x08,
218 TCM_INVALID_PARAMETER_LIST = 0x09,
219 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
220 TCM_UNKNOWN_MODE_PAGE = 0x0b,
221 TCM_WRITE_PROTECTED = 0x0c,
222 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
223 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
224 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
Nicholas Bellinger03e98c92011-11-04 02:36:16 -0700225 TCM_RESERVATION_CONFLICT = 0x10,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800226};
227
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500228/* fabric independent task management function values */
229enum tcm_tmreq_table {
230 TMR_ABORT_TASK = 1,
231 TMR_ABORT_TASK_SET = 2,
232 TMR_CLEAR_ACA = 3,
233 TMR_CLEAR_TASK_SET = 4,
234 TMR_LUN_RESET = 5,
235 TMR_TARGET_WARM_RESET = 6,
236 TMR_TARGET_COLD_RESET = 7,
237 TMR_FABRIC_TMR = 255,
238};
239
240/* fabric independent task management response values */
241enum tcm_tmrsp_table {
242 TMR_FUNCTION_COMPLETE = 0,
243 TMR_TASK_DOES_NOT_EXIST = 1,
244 TMR_LUN_DOES_NOT_EXIST = 2,
245 TMR_TASK_STILL_ALLEGIANT = 3,
246 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
247 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
248 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
249 TMR_FUNCTION_REJECTED = 255,
250};
251
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800252struct se_obj {
253 atomic_t obj_access_count;
254} ____cacheline_aligned;
255
256/*
257 * Used by TCM Core internally to signal if ALUA emulation is enabled or
258 * disabled, or running in with TCM/pSCSI passthrough mode
259 */
260typedef enum {
261 SPC_ALUA_PASSTHROUGH,
262 SPC2_ALUA_DISABLED,
263 SPC3_ALUA_EMULATED
264} t10_alua_index_t;
265
266/*
267 * Used by TCM Core internally to signal if SAM Task Attribute emulation
268 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
269 */
270typedef enum {
271 SAM_TASK_ATTR_PASSTHROUGH,
272 SAM_TASK_ATTR_UNTAGGED,
273 SAM_TASK_ATTR_EMULATED
274} t10_task_attr_index_t;
275
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800276/*
277 * Used for target SCSI statistics
278 */
279typedef enum {
280 SCSI_INST_INDEX,
281 SCSI_DEVICE_INDEX,
282 SCSI_AUTH_INTR_INDEX,
283 SCSI_INDEX_TYPE_MAX
284} scsi_index_t;
285
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800286struct se_cmd;
287
288struct t10_alua {
289 t10_alua_index_t alua_type;
290 /* ALUA Target Port Group ID */
291 u16 alua_tg_pt_gps_counter;
292 u32 alua_tg_pt_gps_count;
293 spinlock_t tg_pt_gps_lock;
294 struct se_subsystem_dev *t10_sub_dev;
295 /* Used for default ALUA Target Port Group */
296 struct t10_alua_tg_pt_gp *default_tg_pt_gp;
297 /* Used for default ALUA Target Port Group ConfigFS group */
298 struct config_group alua_tg_pt_gps_group;
299 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
300 struct list_head tg_pt_gps_list;
301} ____cacheline_aligned;
302
303struct t10_alua_lu_gp {
304 u16 lu_gp_id;
305 int lu_gp_valid_id;
306 u32 lu_gp_members;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800307 atomic_t lu_gp_ref_cnt;
308 spinlock_t lu_gp_lock;
309 struct config_group lu_gp_group;
Andy Grovere3d6f902011-07-19 08:55:10 +0000310 struct list_head lu_gp_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800311 struct list_head lu_gp_mem_list;
312} ____cacheline_aligned;
313
314struct t10_alua_lu_gp_member {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700315 bool lu_gp_assoc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800316 atomic_t lu_gp_mem_ref_cnt;
317 spinlock_t lu_gp_mem_lock;
318 struct t10_alua_lu_gp *lu_gp;
319 struct se_device *lu_gp_mem_dev;
320 struct list_head lu_gp_mem_list;
321} ____cacheline_aligned;
322
323struct t10_alua_tg_pt_gp {
324 u16 tg_pt_gp_id;
325 int tg_pt_gp_valid_id;
326 int tg_pt_gp_alua_access_status;
327 int tg_pt_gp_alua_access_type;
328 int tg_pt_gp_nonop_delay_msecs;
329 int tg_pt_gp_trans_delay_msecs;
330 int tg_pt_gp_pref;
331 int tg_pt_gp_write_metadata;
332 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
333#define ALUA_MD_BUF_LEN 1024
334 u32 tg_pt_gp_md_buf_len;
335 u32 tg_pt_gp_members;
336 atomic_t tg_pt_gp_alua_access_state;
337 atomic_t tg_pt_gp_ref_cnt;
338 spinlock_t tg_pt_gp_lock;
339 struct mutex tg_pt_gp_md_mutex;
340 struct se_subsystem_dev *tg_pt_gp_su_dev;
341 struct config_group tg_pt_gp_group;
342 struct list_head tg_pt_gp_list;
343 struct list_head tg_pt_gp_mem_list;
344} ____cacheline_aligned;
345
346struct t10_alua_tg_pt_gp_member {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700347 bool tg_pt_gp_assoc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800348 atomic_t tg_pt_gp_mem_ref_cnt;
349 spinlock_t tg_pt_gp_mem_lock;
350 struct t10_alua_tg_pt_gp *tg_pt_gp;
351 struct se_port *tg_pt;
352 struct list_head tg_pt_gp_mem_list;
353} ____cacheline_aligned;
354
355struct t10_vpd {
356 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
357 int protocol_identifier_set;
358 u32 protocol_identifier;
359 u32 device_identifier_code_set;
360 u32 association;
361 u32 device_identifier_type;
362 struct list_head vpd_list;
363} ____cacheline_aligned;
364
365struct t10_wwn {
Andy Grovere3d6f902011-07-19 08:55:10 +0000366 char vendor[8];
367 char model[16];
368 char revision[4];
369 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800370 spinlock_t t10_vpd_lock;
371 struct se_subsystem_dev *t10_sub_dev;
372 struct config_group t10_wwn_group;
373 struct list_head t10_vpd_list;
374} ____cacheline_aligned;
375
376
377/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300378 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800379 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
380 * mode
381 */
382typedef enum {
383 SPC_PASSTHROUGH,
384 SPC2_RESERVATIONS,
385 SPC3_PERSISTENT_RESERVATIONS
386} t10_reservations_index_t;
387
388struct t10_pr_registration {
389 /* Used for fabrics that contain WWN+ISID */
390#define PR_REG_ISID_LEN 16
391 /* PR_REG_ISID_LEN + ',i,0x' */
392#define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
393 char pr_reg_isid[PR_REG_ISID_LEN];
394 /* Used during APTPL metadata reading */
395#define PR_APTPL_MAX_IPORT_LEN 256
396 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
397 /* Used during APTPL metadata reading */
398#define PR_APTPL_MAX_TPORT_LEN 256
399 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
400 /* For writing out live meta data */
401 unsigned char *pr_aptpl_buf;
402 u16 pr_aptpl_rpti;
403 u16 pr_reg_tpgt;
404 /* Reservation effects all target ports */
405 int pr_reg_all_tg_pt;
406 /* Activate Persistence across Target Power Loss */
407 int pr_reg_aptpl;
408 int pr_res_holder;
409 int pr_res_type;
410 int pr_res_scope;
411 /* Used for fabric initiator WWPNs using a ISID */
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700412 bool isid_present_at_reg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800413 u32 pr_res_mapped_lun;
414 u32 pr_aptpl_target_lun;
415 u32 pr_res_generation;
416 u64 pr_reg_bin_isid;
417 u64 pr_res_key;
418 atomic_t pr_res_holders;
419 struct se_node_acl *pr_reg_nacl;
420 struct se_dev_entry *pr_reg_deve;
421 struct se_lun *pr_reg_tg_pt_lun;
422 struct list_head pr_reg_list;
423 struct list_head pr_reg_abort_list;
424 struct list_head pr_reg_aptpl_list;
425 struct list_head pr_reg_atp_list;
426 struct list_head pr_reg_atp_mem_list;
427} ____cacheline_aligned;
428
429/*
430 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
431 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
432 * core_setup_reservations()
433 */
434struct t10_reservation_ops {
435 int (*t10_reservation_check)(struct se_cmd *, u32 *);
436 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
437 int (*t10_pr_register)(struct se_cmd *);
438 int (*t10_pr_clear)(struct se_cmd *);
439};
440
Andy Grovere3d6f902011-07-19 08:55:10 +0000441struct t10_reservation {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800442 /* Reservation effects all target ports */
443 int pr_all_tg_pt;
444 /* Activate Persistence across Target Power Loss enabled
445 * for SCSI device */
446 int pr_aptpl_active;
Andy Grovere3d6f902011-07-19 08:55:10 +0000447 /* Used by struct t10_reservation->pr_aptpl_buf_len */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800448#define PR_APTPL_BUF_LEN 8192
449 u32 pr_aptpl_buf_len;
450 u32 pr_generation;
451 t10_reservations_index_t res_type;
452 spinlock_t registration_lock;
453 spinlock_t aptpl_reg_lock;
454 /*
455 * This will always be set by one individual I_T Nexus.
456 * However with all_tg_pt=1, other I_T Nexus from the
457 * same initiator can access PR reg/res info on a different
458 * target port.
459 *
460 * There is also the 'All Registrants' case, where there is
461 * a single *pr_res_holder of the reservation, but all
462 * registrations are considered reservation holders.
463 */
464 struct se_node_acl *pr_res_holder;
465 struct list_head registration_list;
466 struct list_head aptpl_reg_list;
467 struct t10_reservation_ops pr_ops;
468} ____cacheline_aligned;
469
470struct se_queue_req {
471 int state;
Andy Grovere3d6f902011-07-19 08:55:10 +0000472 struct se_cmd *cmd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800473 struct list_head qr_list;
474} ____cacheline_aligned;
475
476struct se_queue_obj {
477 atomic_t queue_cnt;
478 spinlock_t cmd_queue_lock;
479 struct list_head qobj_list;
480 wait_queue_head_t thread_wq;
481} ____cacheline_aligned;
482
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800483struct se_task {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800484 unsigned long long task_lba;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400485 u32 task_sectors;
486 u32 task_size;
487 struct se_cmd *task_se_cmd;
488 struct scatterlist *task_sg;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400489 u32 task_sg_nents;
490 u16 task_flags;
491 u8 task_sense;
492 u8 task_scsi_status;
493 int task_error_status;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800494 enum dma_data_direction task_data_direction;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400495 atomic_t task_state_active;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400496 struct list_head t_list;
497 struct list_head t_execute_list;
498 struct list_head t_state_list;
499 struct completion task_stop_comp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800500} ____cacheline_aligned;
501
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800502struct se_cmd {
503 /* SAM response code being sent to initiator */
504 u8 scsi_status;
505 u8 scsi_asc;
506 u8 scsi_ascq;
507 u8 scsi_sense_reason;
508 u16 scsi_sense_length;
509 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
510 int alua_nonop_delay;
511 /* See include/linux/dma-mapping.h */
512 enum dma_data_direction data_direction;
513 /* For SAM Task Attribute */
514 int sam_task_attr;
515 /* Transport protocol dependent state, see transport_state_table */
516 enum transport_state_table t_state;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700517 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
Bart Van Assche5f655e82011-11-08 20:46:29 +0100518 unsigned check_release:1;
519 unsigned cmd_wait_set:1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800520 /* See se_cmd_flags_table */
521 u32 se_cmd_flags;
522 u32 se_ordered_id;
523 /* Total size in bytes associated with command */
524 u32 data_length;
525 /* SCSI Presented Data Transfer Length */
526 u32 cmd_spdtl;
527 u32 residual_count;
528 u32 orig_fe_lun;
529 /* Persistent Reservation key */
530 u64 pr_res_key;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800531 /* Used for sense data */
532 void *sense_buffer;
Andy Grover5951146d2011-07-19 10:26:37 +0000533 struct list_head se_delayed_node;
Andy Grover5951146d2011-07-19 10:26:37 +0000534 struct list_head se_lun_node;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700535 struct list_head se_qf_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800536 struct se_device *se_dev;
537 struct se_dev_entry *se_deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800538 struct se_lun *se_lun;
539 /* Only used for internal passthrough and legacy TCM fabric modules */
540 struct se_session *se_sess;
541 struct se_tmr_req *se_tmr_req;
Andy Grover5951146d2011-07-19 10:26:37 +0000542 struct list_head se_queue_node;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700543 struct list_head se_cmd_list;
544 struct completion cmd_wait_comp;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800545 struct target_core_fabric_ops *se_tfo;
Christoph Hellwige76a35d2011-11-03 17:50:42 -0400546 int (*execute_task)(struct se_task *);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800547 void (*transport_complete_callback)(struct se_cmd *);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700548
Andy Grovera1d8b492011-05-02 17:12:10 -0700549 unsigned char *t_task_cdb;
550 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
551 unsigned long long t_task_lba;
552 int t_tasks_failed;
Andy Grovera1d8b492011-05-02 17:12:10 -0700553 u32 t_tasks_sg_chained_no;
554 atomic_t t_fe_count;
555 atomic_t t_se_count;
556 atomic_t t_task_cdbs_left;
557 atomic_t t_task_cdbs_ex_left;
Andy Grovera1d8b492011-05-02 17:12:10 -0700558 atomic_t t_task_cdbs_sent;
559 atomic_t t_transport_aborted;
560 atomic_t t_transport_active;
561 atomic_t t_transport_complete;
562 atomic_t t_transport_queue_active;
563 atomic_t t_transport_sent;
564 atomic_t t_transport_stop;
Andy Grovera1d8b492011-05-02 17:12:10 -0700565 atomic_t transport_dev_active;
566 atomic_t transport_lun_active;
567 atomic_t transport_lun_fe_stop;
568 atomic_t transport_lun_stop;
569 spinlock_t t_state_lock;
570 struct completion t_transport_stop_comp;
571 struct completion transport_lun_fe_stop_comp;
572 struct completion transport_lun_stop_comp;
573 struct scatterlist *t_tasks_sg_chained;
Andy Grover05d1c7c2011-07-20 19:13:28 +0000574
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400575 struct work_struct work;
576
Andy Groverec98f782011-07-20 19:28:46 +0000577 struct scatterlist *t_data_sg;
578 unsigned int t_data_nents;
579 struct scatterlist *t_bidi_data_sg;
580 unsigned int t_bidi_data_nents;
581
Andy Grovera1d8b492011-05-02 17:12:10 -0700582 /* Used for BIDI READ */
Andy Grovera1d8b492011-05-02 17:12:10 -0700583 struct list_head t_task_list;
584 u32 t_task_list_num;
585
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800586} ____cacheline_aligned;
587
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800588struct se_tmr_req {
589 /* Task Management function to be preformed */
590 u8 function;
591 /* Task Management response to send */
592 u8 response;
593 int call_transport;
594 /* Reference to ITT that Task Mgmt should be preformed */
595 u32 ref_task_tag;
596 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
597 u64 ref_task_lun;
598 void *fabric_tmr_ptr;
599 struct se_cmd *task_cmd;
600 struct se_cmd *ref_cmd;
601 struct se_device *tmr_dev;
602 struct se_lun *tmr_lun;
603 struct list_head tmr_list;
604} ____cacheline_aligned;
605
606struct se_ua {
607 u8 ua_asc;
608 u8 ua_ascq;
609 struct se_node_acl *ua_nacl;
610 struct list_head ua_dev_list;
611 struct list_head ua_nacl_list;
612} ____cacheline_aligned;
613
614struct se_node_acl {
615 char initiatorname[TRANSPORT_IQN_LEN];
616 /* Used to signal demo mode created ACL, disabled by default */
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700617 bool dynamic_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800618 u32 queue_depth;
619 u32 acl_index;
620 u64 num_cmds;
621 u64 read_bytes;
622 u64 write_bytes;
623 spinlock_t stats_lock;
624 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
625 atomic_t acl_pr_ref_count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800626 struct se_dev_entry *device_list;
627 struct se_session *nacl_sess;
628 struct se_portal_group *se_tpg;
629 spinlock_t device_list_lock;
630 spinlock_t nacl_sess_lock;
631 struct config_group acl_group;
632 struct config_group acl_attrib_group;
633 struct config_group acl_auth_group;
634 struct config_group acl_param_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700635 struct config_group acl_fabric_stat_group;
636 struct config_group *acl_default_groups[5];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800637 struct list_head acl_list;
638 struct list_head acl_sess_list;
639} ____cacheline_aligned;
640
641struct se_session {
Bart Van Assche5f655e82011-11-08 20:46:29 +0100642 unsigned sess_tearing_down:1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800643 u64 sess_bin_isid;
644 struct se_node_acl *se_node_acl;
645 struct se_portal_group *se_tpg;
646 void *fabric_sess_ptr;
647 struct list_head sess_list;
648 struct list_head sess_acl_list;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700649 struct list_head sess_cmd_list;
650 struct list_head sess_wait_list;
651 spinlock_t sess_cmd_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800652} ____cacheline_aligned;
653
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800654struct se_device;
655struct se_transform_info;
656struct scatterlist;
657
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700658struct se_ml_stat_grps {
659 struct config_group stat_group;
660 struct config_group scsi_auth_intr_group;
661 struct config_group scsi_att_intr_port_group;
662};
663
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800664struct se_lun_acl {
665 char initiatorname[TRANSPORT_IQN_LEN];
666 u32 mapped_lun;
667 struct se_node_acl *se_lun_nacl;
668 struct se_lun *se_lun;
669 struct list_head lacl_list;
670 struct config_group se_lun_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700671 struct se_ml_stat_grps ml_stat_grps;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800672} ____cacheline_aligned;
673
674struct se_dev_entry {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700675 bool def_pr_registered;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800676 /* See transport_lunflags_table */
677 u32 lun_flags;
678 u32 deve_cmds;
679 u32 mapped_lun;
680 u32 average_bytes;
681 u32 last_byte_count;
682 u32 total_cmds;
683 u32 total_bytes;
684 u64 pr_res_key;
685 u64 creation_time;
686 u32 attach_count;
687 u64 read_bytes;
688 u64 write_bytes;
689 atomic_t ua_count;
690 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
691 atomic_t pr_ref_count;
692 struct se_lun_acl *se_lun_acl;
693 spinlock_t ua_lock;
694 struct se_lun *se_lun;
695 struct list_head alua_port_list;
696 struct list_head ua_list;
697} ____cacheline_aligned;
698
699struct se_dev_limits {
700 /* Max supported HW queue depth */
701 u32 hw_queue_depth;
702 /* Max supported virtual queue depth */
703 u32 queue_depth;
704 /* From include/linux/blkdev.h for the other HW/SW limits. */
705 struct queue_limits limits;
706} ____cacheline_aligned;
707
708struct se_dev_attrib {
709 int emulate_dpo;
710 int emulate_fua_write;
711 int emulate_fua_read;
712 int emulate_write_cache;
713 int emulate_ua_intlck_ctrl;
714 int emulate_tas;
715 int emulate_tpu;
716 int emulate_tpws;
717 int emulate_reservations;
718 int emulate_alua;
719 int enforce_pr_isids;
Roland Dreiere22a7f02011-07-05 13:34:52 -0700720 int is_nonrot;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700721 int emulate_rest_reord;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800722 u32 hw_block_size;
723 u32 block_size;
724 u32 hw_max_sectors;
725 u32 max_sectors;
726 u32 optimal_sectors;
727 u32 hw_queue_depth;
728 u32 queue_depth;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800729 u32 max_unmap_lba_count;
730 u32 max_unmap_block_desc_count;
731 u32 unmap_granularity;
732 u32 unmap_granularity_alignment;
733 struct se_subsystem_dev *da_sub_dev;
734 struct config_group da_group;
735} ____cacheline_aligned;
736
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700737struct se_dev_stat_grps {
738 struct config_group stat_group;
739 struct config_group scsi_dev_group;
740 struct config_group scsi_tgt_dev_group;
741 struct config_group scsi_lu_group;
742};
743
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800744struct se_subsystem_dev {
745/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
746#define SE_DEV_ALIAS_LEN 512
747 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
748/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
749#define SE_UDEV_PATH_LEN 512
750 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
751 u32 su_dev_flags;
752 struct se_hba *se_dev_hba;
753 struct se_device *se_dev_ptr;
754 struct se_dev_attrib se_dev_attrib;
755 /* T10 Asymmetric Logical Unit Assignment for Target Ports */
756 struct t10_alua t10_alua;
757 /* T10 Inquiry and VPD WWN Information */
758 struct t10_wwn t10_wwn;
759 /* T10 SPC-2 + SPC-3 Reservations */
Andy Grovere3d6f902011-07-19 08:55:10 +0000760 struct t10_reservation t10_pr;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800761 spinlock_t se_dev_lock;
762 void *se_dev_su_ptr;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800763 struct config_group se_dev_group;
764 /* For T10 Reservations */
765 struct config_group se_dev_pr_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700766 /* For target_core_stat.c groups */
767 struct se_dev_stat_grps dev_stat_grps;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800768} ____cacheline_aligned;
769
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800770struct se_device {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800771 /* RELATIVE TARGET PORT IDENTIFER Counter */
772 u16 dev_rpti_counter;
773 /* Used for SAM Task Attribute ordering */
774 u32 dev_cur_ordered_id;
775 u32 dev_flags;
776 u32 dev_port_count;
777 /* See transport_device_status_table */
778 u32 dev_status;
779 u32 dev_tcq_window_closed;
780 /* Physical device queue depth */
781 u32 queue_depth;
782 /* Used for SPC-2 reservations enforce of ISIDs */
783 u64 dev_res_bin_isid;
784 t10_task_attr_index_t dev_task_attr_type;
785 /* Pointer to transport specific device structure */
786 void *dev_ptr;
787 u32 dev_index;
788 u64 creation_time;
789 u32 num_resets;
790 u64 num_cmds;
791 u64 read_bytes;
792 u64 write_bytes;
793 spinlock_t stats_lock;
794 /* Active commands on this virtual SE device */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800795 atomic_t simple_cmds;
796 atomic_t depth_left;
797 atomic_t dev_ordered_id;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800798 atomic_t execute_tasks;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800799 atomic_t dev_ordered_sync;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700800 atomic_t dev_qf_count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800801 struct se_obj dev_obj;
802 struct se_obj dev_access_obj;
803 struct se_obj dev_export_obj;
Andy Grovere3d6f902011-07-19 08:55:10 +0000804 struct se_queue_obj dev_queue_obj;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800805 spinlock_t delayed_cmd_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800806 spinlock_t execute_task_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800807 spinlock_t dev_reservation_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800808 spinlock_t dev_status_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800809 spinlock_t se_port_lock;
810 spinlock_t se_tmr_lock;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700811 spinlock_t qf_cmd_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800812 /* Used for legacy SPC-2 reservationsa */
813 struct se_node_acl *dev_reserved_node_acl;
814 /* Used for ALUA Logical Unit Group membership */
815 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
816 /* Used for SPC-3 Persistent Reservations */
817 struct t10_pr_registration *dev_pr_res_holder;
818 struct list_head dev_sep_list;
819 struct list_head dev_tmr_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800820 /* Pointer to descriptor for processing thread */
821 struct task_struct *process_thread;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700822 struct work_struct qf_work_queue;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800823 struct list_head delayed_cmd_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800824 struct list_head execute_task_list;
825 struct list_head state_task_list;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700826 struct list_head qf_cmd_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800827 /* Pointer to associated SE HBA */
828 struct se_hba *se_hba;
829 struct se_subsystem_dev *se_sub_dev;
830 /* Pointer to template of function pointers for transport */
831 struct se_subsystem_api *transport;
832 /* Linked list for struct se_hba struct se_device list */
833 struct list_head dev_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800834} ____cacheline_aligned;
835
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800836struct se_hba {
837 u16 hba_tpgt;
838 u32 hba_id;
839 /* See hba_flags_table */
840 u32 hba_flags;
841 /* Virtual iSCSI devices attached. */
842 u32 dev_count;
843 u32 hba_index;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800844 /* Pointer to transport specific host structure. */
845 void *hba_ptr;
846 /* Linked list for struct se_device */
847 struct list_head hba_dev_list;
Andy Grovere3d6f902011-07-19 08:55:10 +0000848 struct list_head hba_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800849 spinlock_t device_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800850 struct config_group hba_group;
851 struct mutex hba_access_mutex;
852 struct se_subsystem_api *transport;
853} ____cacheline_aligned;
854
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700855struct se_port_stat_grps {
856 struct config_group stat_group;
857 struct config_group scsi_port_group;
858 struct config_group scsi_tgt_port_group;
859 struct config_group scsi_transport_group;
860};
861
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800862struct se_lun {
863 /* See transport_lun_status_table */
864 enum transport_lun_status_table lun_status;
865 u32 lun_access;
866 u32 lun_flags;
867 u32 unpacked_lun;
868 atomic_t lun_acl_count;
869 spinlock_t lun_acl_lock;
870 spinlock_t lun_cmd_lock;
871 spinlock_t lun_sep_lock;
872 struct completion lun_shutdown_comp;
873 struct list_head lun_cmd_list;
874 struct list_head lun_acl_list;
875 struct se_device *lun_se_dev;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700876 struct se_port *lun_sep;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800877 struct config_group lun_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700878 struct se_port_stat_grps port_stat_grps;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800879} ____cacheline_aligned;
880
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800881struct scsi_port_stats {
882 u64 cmd_pdus;
883 u64 tx_data_octets;
884 u64 rx_data_octets;
885} ____cacheline_aligned;
886
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800887struct se_port {
888 /* RELATIVE TARGET PORT IDENTIFER */
889 u16 sep_rtpi;
890 int sep_tg_pt_secondary_stat;
891 int sep_tg_pt_secondary_write_md;
892 u32 sep_index;
893 struct scsi_port_stats sep_stats;
894 /* Used for ALUA Target Port Groups membership */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800895 atomic_t sep_tg_pt_secondary_offline;
896 /* Used for PR ALL_TG_PT=1 */
897 atomic_t sep_tg_pt_ref_cnt;
898 spinlock_t sep_alua_lock;
899 struct mutex sep_tg_pt_md_mutex;
900 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
901 struct se_lun *sep_lun;
902 struct se_portal_group *sep_tpg;
903 struct list_head sep_alua_list;
904 struct list_head sep_list;
905} ____cacheline_aligned;
906
907struct se_tpg_np {
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -0800908 struct se_portal_group *tpg_np_parent;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800909 struct config_group tpg_np_group;
910} ____cacheline_aligned;
911
912struct se_portal_group {
913 /* Type of target portal group, see transport_tpg_type_table */
914 enum transport_tpg_type_table se_tpg_type;
915 /* Number of ACLed Initiator Nodes for this TPG */
916 u32 num_node_acls;
917 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
918 atomic_t tpg_pr_ref_count;
919 /* Spinlock for adding/removing ACLed Nodes */
920 spinlock_t acl_node_lock;
921 /* Spinlock for adding/removing sessions */
922 spinlock_t session_lock;
923 spinlock_t tpg_lun_lock;
924 /* Pointer to $FABRIC_MOD portal group */
925 void *se_tpg_fabric_ptr;
Andy Grovere3d6f902011-07-19 08:55:10 +0000926 struct list_head se_tpg_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800927 /* linked list for initiator ACL list */
928 struct list_head acl_node_list;
929 struct se_lun *tpg_lun_list;
930 struct se_lun tpg_virt_lun0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300931 /* List of TCM sessions associated wth this TPG */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800932 struct list_head tpg_sess_list;
933 /* Pointer to $FABRIC_MOD dependent code */
934 struct target_core_fabric_ops *se_tpg_tfo;
935 struct se_wwn *se_tpg_wwn;
936 struct config_group tpg_group;
937 struct config_group *tpg_default_groups[6];
938 struct config_group tpg_lun_group;
939 struct config_group tpg_np_group;
940 struct config_group tpg_acl_group;
941 struct config_group tpg_attrib_group;
942 struct config_group tpg_param_group;
943} ____cacheline_aligned;
944
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800945struct se_wwn {
946 struct target_fabric_configfs *wwn_tf;
947 struct config_group wwn_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700948 struct config_group *wwn_default_groups[2];
949 struct config_group fabric_stat_group;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800950} ____cacheline_aligned;
951
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800952#endif /* TARGET_CORE_BASE_H */