blob: f8aa300b0d2599153e588202e6f21a0bcdc558fe [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001#ifndef TARGET_CORE_BASE_H
2#define TARGET_CORE_BASE_H
3
4#include <linux/in.h>
5#include <linux/configfs.h>
6#include <linux/dma-mapping.h>
7#include <linux/blkdev.h>
8#include <scsi/scsi_cmnd.h>
9#include <net/sock.h>
10#include <net/tcp.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080011
Nicholas Bellingerfa495152011-07-22 08:24:22 +000012#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050013#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080014
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080015/* Maximum Number of LUNs per Target Portal Group */
Andy Groverd0229ae2011-07-08 17:04:53 -070016/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080017#define TRANSPORT_MAX_LUNS_PER_TPG 256
18/*
19 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
20 *
21 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
22 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
23 * 16-byte CDBs by default and require an extra allocation for
Lucas De Marchi25985ed2011-03-30 22:57:33 -030024 * 32-byte CDBs to because of legacy issues.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080025 *
26 * Within TCM Core there are no such legacy limitiations, so we go ahead
27 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
28 * within all TCM Core and subsystem plugin code.
29 */
30#define TCM_MAX_COMMAND_SIZE 32
31/*
32 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
33 * defined 96, but the real limit is 252 (or 260 including the header)
34 */
35#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
36/* Used by transport_send_check_condition_and_sense() */
37#define SPC_SENSE_KEY_OFFSET 2
Roland Dreier895f3022011-12-13 14:55:33 -080038#define SPC_ADD_SENSE_LEN_OFFSET 7
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080039#define SPC_ASC_KEY_OFFSET 12
40#define SPC_ASCQ_KEY_OFFSET 13
41#define TRANSPORT_IQN_LEN 224
42/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
43#define LU_GROUP_NAME_BUF 256
44/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
45#define TG_PT_GROUP_NAME_BUF 256
46/* Used to parse VPD into struct t10_vpd */
47#define VPD_TMP_BUF_SIZE 128
48/* Used by transport_generic_cmd_sequencer() */
49#define READ_BLOCK_LEN 6
50#define READ_CAP_LEN 8
51#define READ_POSITION_LEN 20
52#define INQUIRY_LEN 36
53/* Used by transport_get_inquiry_vpd_serial() */
54#define INQUIRY_VPD_SERIAL_LEN 254
55/* Used by transport_get_inquiry_vpd_device_ident() */
56#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
57
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050058/* Attempts before moving from SHORT to LONG */
59#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
60#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
61#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
62
63#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
64
65/*
66 * struct se_subsystem_dev->su_dev_flags
67*/
68#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
69#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
70#define SDF_USING_UDEV_PATH 0x00000004
71#define SDF_USING_ALIAS 0x00000008
72
73/*
74 * struct se_device->dev_flags
75 */
76#define DF_READ_ONLY 0x00000001
77#define DF_SPC2_RESERVATIONS 0x00000002
78#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
79
80/* struct se_dev_attrib sanity values */
81/* Default max_unmap_lba_count */
82#define DA_MAX_UNMAP_LBA_COUNT 0
83/* Default max_unmap_block_desc_count */
84#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
85/* Default unmap_granularity */
86#define DA_UNMAP_GRANULARITY_DEFAULT 0
87/* Default unmap_granularity_alignment */
88#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
Roland Dreier015487b2012-02-13 16:18:17 -080089/* Default max transfer length */
90#define DA_FABRIC_MAX_SECTORS 8192
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050091/* Emulation for Direct Page Out */
92#define DA_EMULATE_DPO 0
93/* Emulation for Forced Unit Access WRITEs */
94#define DA_EMULATE_FUA_WRITE 1
95/* Emulation for Forced Unit Access READs */
96#define DA_EMULATE_FUA_READ 0
97/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
98#define DA_EMULATE_WRITE_CACHE 0
99/* Emulation for UNIT ATTENTION Interlock Control */
100#define DA_EMULATE_UA_INTLLCK_CTRL 0
101/* Emulation for TASK_ABORTED status (TAS) by default */
102#define DA_EMULATE_TAS 1
103/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
104#define DA_EMULATE_TPU 0
105/*
106 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
107 * block/blk-lib.c:blkdev_issue_discard()
108 */
109#define DA_EMULATE_TPWS 0
110/* No Emulation for PSCSI by default */
111#define DA_EMULATE_RESERVATIONS 0
112/* No Emulation for PSCSI by default */
113#define DA_EMULATE_ALUA 0
114/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
115#define DA_ENFORCE_PR_ISIDS 1
116#define DA_STATUS_MAX_SECTORS_MIN 16
117#define DA_STATUS_MAX_SECTORS_MAX 8192
118/* By default don't report non-rotating (solid state) medium */
119#define DA_IS_NONROT 0
120/* Queue Algorithm Modifier default for restricted reordering in control mode page */
121#define DA_EMULATE_REST_REORD 0
122
Roland Dreierd95b8242012-02-13 16:18:14 -0800123#define SE_INQUIRY_BUF 512
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500124#define SE_MODE_PAGE_BUF 512
125
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800126/* struct se_hba->hba_flags */
127enum hba_flags_table {
128 HBA_FLAGS_INTERNAL_USE = 0x01,
129 HBA_FLAGS_PSCSI_MODE = 0x02,
130};
131
132/* struct se_lun->lun_status */
133enum transport_lun_status_table {
134 TRANSPORT_LUN_STATUS_FREE = 0,
135 TRANSPORT_LUN_STATUS_ACTIVE = 1,
136};
137
138/* struct se_portal_group->se_tpg_type */
139enum transport_tpg_type_table {
140 TRANSPORT_TPG_TYPE_NORMAL = 0,
141 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
142};
143
Christoph Hellwigef804a82011-11-23 06:53:58 -0500144/* struct se_task->task_flags */
Christoph Hellwig6c76bf92011-10-12 11:07:03 -0400145enum se_task_flags {
146 TF_ACTIVE = (1 << 0),
147 TF_SENT = (1 << 1),
Nicholas Bellinger2e982ab2011-10-23 18:46:36 -0700148 TF_REQUEST_STOP = (1 << 2),
Christoph Hellwigef804a82011-11-23 06:53:58 -0500149 TF_HAS_SENSE = (1 << 3),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800150};
151
152/* Special transport agnostic struct se_cmd->t_states */
153enum transport_state_table {
154 TRANSPORT_NO_STATE = 0,
155 TRANSPORT_NEW_CMD = 1,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800156 TRANSPORT_WRITE_PENDING = 3,
157 TRANSPORT_PROCESS_WRITE = 4,
158 TRANSPORT_PROCESSING = 5,
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400159 TRANSPORT_COMPLETE = 6,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800160 TRANSPORT_PROCESS_TMR = 9,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800161 TRANSPORT_ISTATE_PROCESSING = 11,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800162 TRANSPORT_NEW_CMD_MAP = 16,
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700163 TRANSPORT_COMPLETE_QF_WP = 18,
Christoph Hellwige057f532011-10-17 13:56:41 -0400164 TRANSPORT_COMPLETE_QF_OK = 19,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800165};
166
167/* Used for struct se_cmd->se_cmd_flags */
168enum se_cmd_flags_table {
169 SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
170 SCF_TRANSPORT_TASK_SENSE = 0x00000002,
171 SCF_EMULATED_TASK_SENSE = 0x00000004,
172 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
173 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
Andy Groverc8e31f22012-01-19 13:39:17 -0800174 SCF_SCSI_NON_DATA_CDB = 0x00000020,
175 SCF_SCSI_TMR_CDB = 0x00000040,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800176 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
177 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -0500178 SCF_FUA = 0x00000200,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800179 SCF_SE_LUN_CMD = 0x00000800,
180 SCF_SE_ALLOW_EOO = 0x00001000,
Christoph Hellwig33c3faf2011-11-14 11:36:30 -0500181 SCF_BIDI = 0x00002000,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800182 SCF_SENT_CHECK_CONDITION = 0x00004000,
183 SCF_OVERFLOW_BIT = 0x00008000,
184 SCF_UNDERFLOW_BIT = 0x00010000,
185 SCF_SENT_DELAYED_TAS = 0x00020000,
186 SCF_ALUA_NON_OPTIMIZED = 0x00040000,
187 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
Andy Grover5951146d2011-07-19 10:26:37 +0000188 SCF_UNUSED = 0x00100000,
Nicholas Bellinger86715562012-02-13 01:07:22 -0800189 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000,
190 SCF_ACK_KREF = 0x00400000,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800191};
192
193/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
194enum transport_lunflags_table {
195 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
196 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
197 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
198 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
199};
200
201/* struct se_device->dev_status */
202enum transport_device_status_table {
203 TRANSPORT_DEVICE_ACTIVATED = 0x01,
204 TRANSPORT_DEVICE_DEACTIVATED = 0x02,
205 TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
206 TRANSPORT_DEVICE_SHUTDOWN = 0x08,
207 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
208 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
209};
210
211/*
212 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
213 * to signal which ASC/ASCQ sense payload should be built.
214 */
215enum tcm_sense_reason_table {
216 TCM_NON_EXISTENT_LUN = 0x01,
217 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
218 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
219 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
220 TCM_SERVICE_CRC_ERROR = 0x05,
221 TCM_SNACK_REJECTED = 0x06,
222 TCM_SECTOR_COUNT_TOO_MANY = 0x07,
223 TCM_INVALID_CDB_FIELD = 0x08,
224 TCM_INVALID_PARAMETER_LIST = 0x09,
225 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
226 TCM_UNKNOWN_MODE_PAGE = 0x0b,
227 TCM_WRITE_PROTECTED = 0x0c,
228 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
229 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
230 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
Nicholas Bellinger03e98c92011-11-04 02:36:16 -0700231 TCM_RESERVATION_CONFLICT = 0x10,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800232};
233
Nicholas Bellingera6360782011-11-18 20:36:22 -0800234enum target_sc_flags_table {
235 TARGET_SCF_BIDI_OP = 0x01,
236 TARGET_SCF_ACK_KREF = 0x02,
237};
238
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500239/* fabric independent task management function values */
240enum tcm_tmreq_table {
241 TMR_ABORT_TASK = 1,
242 TMR_ABORT_TASK_SET = 2,
243 TMR_CLEAR_ACA = 3,
244 TMR_CLEAR_TASK_SET = 4,
245 TMR_LUN_RESET = 5,
246 TMR_TARGET_WARM_RESET = 6,
247 TMR_TARGET_COLD_RESET = 7,
248 TMR_FABRIC_TMR = 255,
249};
250
251/* fabric independent task management response values */
252enum tcm_tmrsp_table {
253 TMR_FUNCTION_COMPLETE = 0,
254 TMR_TASK_DOES_NOT_EXIST = 1,
255 TMR_LUN_DOES_NOT_EXIST = 2,
256 TMR_TASK_STILL_ALLEGIANT = 3,
257 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
258 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
259 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
260 TMR_FUNCTION_REJECTED = 255,
261};
262
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800263struct se_obj {
264 atomic_t obj_access_count;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500265};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800266
267/*
268 * Used by TCM Core internally to signal if ALUA emulation is enabled or
269 * disabled, or running in with TCM/pSCSI passthrough mode
270 */
271typedef enum {
272 SPC_ALUA_PASSTHROUGH,
273 SPC2_ALUA_DISABLED,
274 SPC3_ALUA_EMULATED
275} t10_alua_index_t;
276
277/*
278 * Used by TCM Core internally to signal if SAM Task Attribute emulation
279 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
280 */
281typedef enum {
282 SAM_TASK_ATTR_PASSTHROUGH,
283 SAM_TASK_ATTR_UNTAGGED,
284 SAM_TASK_ATTR_EMULATED
285} t10_task_attr_index_t;
286
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800287/*
288 * Used for target SCSI statistics
289 */
290typedef enum {
291 SCSI_INST_INDEX,
292 SCSI_DEVICE_INDEX,
293 SCSI_AUTH_INTR_INDEX,
294 SCSI_INDEX_TYPE_MAX
295} scsi_index_t;
296
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800297struct se_cmd;
298
299struct t10_alua {
300 t10_alua_index_t alua_type;
301 /* ALUA Target Port Group ID */
302 u16 alua_tg_pt_gps_counter;
303 u32 alua_tg_pt_gps_count;
304 spinlock_t tg_pt_gps_lock;
305 struct se_subsystem_dev *t10_sub_dev;
306 /* Used for default ALUA Target Port Group */
307 struct t10_alua_tg_pt_gp *default_tg_pt_gp;
308 /* Used for default ALUA Target Port Group ConfigFS group */
309 struct config_group alua_tg_pt_gps_group;
310 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
311 struct list_head tg_pt_gps_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500312};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800313
314struct t10_alua_lu_gp {
315 u16 lu_gp_id;
316 int lu_gp_valid_id;
317 u32 lu_gp_members;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800318 atomic_t lu_gp_ref_cnt;
319 spinlock_t lu_gp_lock;
320 struct config_group lu_gp_group;
Andy Grovere3d6f902011-07-19 08:55:10 +0000321 struct list_head lu_gp_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800322 struct list_head lu_gp_mem_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500323};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800324
325struct t10_alua_lu_gp_member {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700326 bool lu_gp_assoc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800327 atomic_t lu_gp_mem_ref_cnt;
328 spinlock_t lu_gp_mem_lock;
329 struct t10_alua_lu_gp *lu_gp;
330 struct se_device *lu_gp_mem_dev;
331 struct list_head lu_gp_mem_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500332};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800333
334struct t10_alua_tg_pt_gp {
335 u16 tg_pt_gp_id;
336 int tg_pt_gp_valid_id;
337 int tg_pt_gp_alua_access_status;
338 int tg_pt_gp_alua_access_type;
339 int tg_pt_gp_nonop_delay_msecs;
340 int tg_pt_gp_trans_delay_msecs;
341 int tg_pt_gp_pref;
342 int tg_pt_gp_write_metadata;
343 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
344#define ALUA_MD_BUF_LEN 1024
345 u32 tg_pt_gp_md_buf_len;
346 u32 tg_pt_gp_members;
347 atomic_t tg_pt_gp_alua_access_state;
348 atomic_t tg_pt_gp_ref_cnt;
349 spinlock_t tg_pt_gp_lock;
350 struct mutex tg_pt_gp_md_mutex;
351 struct se_subsystem_dev *tg_pt_gp_su_dev;
352 struct config_group tg_pt_gp_group;
353 struct list_head tg_pt_gp_list;
354 struct list_head tg_pt_gp_mem_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500355};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800356
357struct t10_alua_tg_pt_gp_member {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700358 bool tg_pt_gp_assoc;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800359 atomic_t tg_pt_gp_mem_ref_cnt;
360 spinlock_t tg_pt_gp_mem_lock;
361 struct t10_alua_tg_pt_gp *tg_pt_gp;
362 struct se_port *tg_pt;
363 struct list_head tg_pt_gp_mem_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500364};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800365
366struct t10_vpd {
367 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
368 int protocol_identifier_set;
369 u32 protocol_identifier;
370 u32 device_identifier_code_set;
371 u32 association;
372 u32 device_identifier_type;
373 struct list_head vpd_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500374};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800375
376struct t10_wwn {
Andy Grovere3d6f902011-07-19 08:55:10 +0000377 char vendor[8];
378 char model[16];
379 char revision[4];
380 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800381 spinlock_t t10_vpd_lock;
382 struct se_subsystem_dev *t10_sub_dev;
383 struct config_group t10_wwn_group;
384 struct list_head t10_vpd_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500385};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800386
387
388/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300389 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800390 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
391 * mode
392 */
393typedef enum {
394 SPC_PASSTHROUGH,
395 SPC2_RESERVATIONS,
396 SPC3_PERSISTENT_RESERVATIONS
397} t10_reservations_index_t;
398
399struct t10_pr_registration {
400 /* Used for fabrics that contain WWN+ISID */
401#define PR_REG_ISID_LEN 16
402 /* PR_REG_ISID_LEN + ',i,0x' */
403#define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
404 char pr_reg_isid[PR_REG_ISID_LEN];
405 /* Used during APTPL metadata reading */
406#define PR_APTPL_MAX_IPORT_LEN 256
407 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
408 /* Used during APTPL metadata reading */
409#define PR_APTPL_MAX_TPORT_LEN 256
410 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
411 /* For writing out live meta data */
412 unsigned char *pr_aptpl_buf;
413 u16 pr_aptpl_rpti;
414 u16 pr_reg_tpgt;
415 /* Reservation effects all target ports */
416 int pr_reg_all_tg_pt;
417 /* Activate Persistence across Target Power Loss */
418 int pr_reg_aptpl;
419 int pr_res_holder;
420 int pr_res_type;
421 int pr_res_scope;
422 /* Used for fabric initiator WWPNs using a ISID */
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700423 bool isid_present_at_reg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800424 u32 pr_res_mapped_lun;
425 u32 pr_aptpl_target_lun;
426 u32 pr_res_generation;
427 u64 pr_reg_bin_isid;
428 u64 pr_res_key;
429 atomic_t pr_res_holders;
430 struct se_node_acl *pr_reg_nacl;
431 struct se_dev_entry *pr_reg_deve;
432 struct se_lun *pr_reg_tg_pt_lun;
433 struct list_head pr_reg_list;
434 struct list_head pr_reg_abort_list;
435 struct list_head pr_reg_aptpl_list;
436 struct list_head pr_reg_atp_list;
437 struct list_head pr_reg_atp_mem_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500438};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800439
440/*
441 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
442 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
443 * core_setup_reservations()
444 */
445struct t10_reservation_ops {
446 int (*t10_reservation_check)(struct se_cmd *, u32 *);
447 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
448 int (*t10_pr_register)(struct se_cmd *);
449 int (*t10_pr_clear)(struct se_cmd *);
450};
451
Andy Grovere3d6f902011-07-19 08:55:10 +0000452struct t10_reservation {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800453 /* Reservation effects all target ports */
454 int pr_all_tg_pt;
455 /* Activate Persistence across Target Power Loss enabled
456 * for SCSI device */
457 int pr_aptpl_active;
Andy Grovere3d6f902011-07-19 08:55:10 +0000458 /* Used by struct t10_reservation->pr_aptpl_buf_len */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800459#define PR_APTPL_BUF_LEN 8192
460 u32 pr_aptpl_buf_len;
461 u32 pr_generation;
462 t10_reservations_index_t res_type;
463 spinlock_t registration_lock;
464 spinlock_t aptpl_reg_lock;
465 /*
466 * This will always be set by one individual I_T Nexus.
467 * However with all_tg_pt=1, other I_T Nexus from the
468 * same initiator can access PR reg/res info on a different
469 * target port.
470 *
471 * There is also the 'All Registrants' case, where there is
472 * a single *pr_res_holder of the reservation, but all
473 * registrations are considered reservation holders.
474 */
475 struct se_node_acl *pr_res_holder;
476 struct list_head registration_list;
477 struct list_head aptpl_reg_list;
478 struct t10_reservation_ops pr_ops;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500479};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800480
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800481struct se_queue_obj {
482 atomic_t queue_cnt;
483 spinlock_t cmd_queue_lock;
484 struct list_head qobj_list;
485 wait_queue_head_t thread_wq;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500486};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800487
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800488struct se_task {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800489 unsigned long long task_lba;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400490 u32 task_sectors;
491 u32 task_size;
492 struct se_cmd *task_se_cmd;
493 struct scatterlist *task_sg;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400494 u32 task_sg_nents;
495 u16 task_flags;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400496 u8 task_scsi_status;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800497 enum dma_data_direction task_data_direction;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400498 struct list_head t_list;
499 struct list_head t_execute_list;
500 struct list_head t_state_list;
Christoph Hellwig1880807a2011-11-23 06:54:36 -0500501 bool t_state_active;
Christoph Hellwig3189b062011-10-12 11:07:07 -0400502 struct completion task_stop_comp;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500503};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800504
Andy Groverc8e31f22012-01-19 13:39:17 -0800505struct se_tmr_req {
506 /* Task Management function to be performed */
507 u8 function;
508 /* Task Management response to send */
509 u8 response;
510 int call_transport;
511 /* Reference to ITT that Task Mgmt should be performed */
512 u32 ref_task_tag;
513 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
514 u64 ref_task_lun;
515 void *fabric_tmr_ptr;
516 struct se_cmd *task_cmd;
517 struct se_cmd *ref_cmd;
518 struct se_device *tmr_dev;
519 struct se_lun *tmr_lun;
520 struct list_head tmr_list;
521};
522
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800523struct se_cmd {
524 /* SAM response code being sent to initiator */
525 u8 scsi_status;
526 u8 scsi_asc;
527 u8 scsi_ascq;
528 u8 scsi_sense_reason;
529 u16 scsi_sense_length;
530 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
531 int alua_nonop_delay;
532 /* See include/linux/dma-mapping.h */
533 enum dma_data_direction data_direction;
534 /* For SAM Task Attribute */
535 int sam_task_attr;
536 /* Transport protocol dependent state, see transport_state_table */
537 enum transport_state_table t_state;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700538 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
Bart Van Assche5f655e82011-11-08 20:46:29 +0100539 unsigned check_release:1;
540 unsigned cmd_wait_set:1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800541 /* See se_cmd_flags_table */
542 u32 se_cmd_flags;
543 u32 se_ordered_id;
544 /* Total size in bytes associated with command */
545 u32 data_length;
546 /* SCSI Presented Data Transfer Length */
547 u32 cmd_spdtl;
548 u32 residual_count;
549 u32 orig_fe_lun;
550 /* Persistent Reservation key */
551 u64 pr_res_key;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800552 /* Used for sense data */
553 void *sense_buffer;
Andy Grover5951146d2011-07-19 10:26:37 +0000554 struct list_head se_delayed_node;
Andy Grover5951146d2011-07-19 10:26:37 +0000555 struct list_head se_lun_node;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700556 struct list_head se_qf_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800557 struct se_device *se_dev;
558 struct se_dev_entry *se_deve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800559 struct se_lun *se_lun;
560 /* Only used for internal passthrough and legacy TCM fabric modules */
561 struct se_session *se_sess;
562 struct se_tmr_req *se_tmr_req;
Andy Grover5951146d2011-07-19 10:26:37 +0000563 struct list_head se_queue_node;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700564 struct list_head se_cmd_list;
565 struct completion cmd_wait_comp;
Nicholas Bellinger7481deb2011-11-12 00:32:17 -0800566 struct kref cmd_kref;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800567 struct target_core_fabric_ops *se_tfo;
Christoph Hellwige76a35d2011-11-03 17:50:42 -0400568 int (*execute_task)(struct se_task *);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800569 void (*transport_complete_callback)(struct se_cmd *);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700570
Andy Grovera1d8b492011-05-02 17:12:10 -0700571 unsigned char *t_task_cdb;
572 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
573 unsigned long long t_task_lba;
Andy Grovera1d8b492011-05-02 17:12:10 -0700574 u32 t_tasks_sg_chained_no;
575 atomic_t t_fe_count;
576 atomic_t t_se_count;
577 atomic_t t_task_cdbs_left;
578 atomic_t t_task_cdbs_ex_left;
Andy Grovera1d8b492011-05-02 17:12:10 -0700579 atomic_t t_task_cdbs_sent;
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500580 unsigned int transport_state;
581#define CMD_T_ABORTED (1 << 0)
582#define CMD_T_ACTIVE (1 << 1)
583#define CMD_T_COMPLETE (1 << 2)
584#define CMD_T_QUEUED (1 << 3)
585#define CMD_T_SENT (1 << 4)
586#define CMD_T_STOP (1 << 5)
587#define CMD_T_FAILED (1 << 6)
588#define CMD_T_LUN_STOP (1 << 7)
589#define CMD_T_LUN_FE_STOP (1 << 8)
590#define CMD_T_DEV_ACTIVE (1 << 9)
Andy Grovera1d8b492011-05-02 17:12:10 -0700591 spinlock_t t_state_lock;
592 struct completion t_transport_stop_comp;
593 struct completion transport_lun_fe_stop_comp;
594 struct completion transport_lun_stop_comp;
595 struct scatterlist *t_tasks_sg_chained;
Andy Grover05d1c7c2011-07-20 19:13:28 +0000596
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400597 struct work_struct work;
598
Andy Groverec98f782011-07-20 19:28:46 +0000599 struct scatterlist *t_data_sg;
600 unsigned int t_data_nents;
Andy Grover49493142012-01-16 16:57:08 -0800601 void *t_data_vmap;
Andy Groverec98f782011-07-20 19:28:46 +0000602 struct scatterlist *t_bidi_data_sg;
603 unsigned int t_bidi_data_nents;
604
Andy Grovera1d8b492011-05-02 17:12:10 -0700605 /* Used for BIDI READ */
Andy Grovera1d8b492011-05-02 17:12:10 -0700606 struct list_head t_task_list;
607 u32 t_task_list_num;
608
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500609};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800610
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800611struct se_ua {
612 u8 ua_asc;
613 u8 ua_ascq;
614 struct se_node_acl *ua_nacl;
615 struct list_head ua_dev_list;
616 struct list_head ua_nacl_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500617};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800618
619struct se_node_acl {
620 char initiatorname[TRANSPORT_IQN_LEN];
621 /* Used to signal demo mode created ACL, disabled by default */
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700622 bool dynamic_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800623 u32 queue_depth;
624 u32 acl_index;
625 u64 num_cmds;
626 u64 read_bytes;
627 u64 write_bytes;
628 spinlock_t stats_lock;
629 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
630 atomic_t acl_pr_ref_count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800631 struct se_dev_entry *device_list;
632 struct se_session *nacl_sess;
633 struct se_portal_group *se_tpg;
634 spinlock_t device_list_lock;
635 spinlock_t nacl_sess_lock;
636 struct config_group acl_group;
637 struct config_group acl_attrib_group;
638 struct config_group acl_auth_group;
639 struct config_group acl_param_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700640 struct config_group acl_fabric_stat_group;
641 struct config_group *acl_default_groups[5];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800642 struct list_head acl_list;
643 struct list_head acl_sess_list;
Nicholas Bellinger01468342012-03-10 14:32:52 -0800644 struct completion acl_free_comp;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500645};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800646
647struct se_session {
Bart Van Assche5f655e82011-11-08 20:46:29 +0100648 unsigned sess_tearing_down:1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800649 u64 sess_bin_isid;
650 struct se_node_acl *se_node_acl;
651 struct se_portal_group *se_tpg;
652 void *fabric_sess_ptr;
653 struct list_head sess_list;
654 struct list_head sess_acl_list;
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700655 struct list_head sess_cmd_list;
656 struct list_head sess_wait_list;
657 spinlock_t sess_cmd_lock;
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800658 struct kref sess_kref;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500659};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800660
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800661struct se_device;
662struct se_transform_info;
663struct scatterlist;
664
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700665struct se_ml_stat_grps {
666 struct config_group stat_group;
667 struct config_group scsi_auth_intr_group;
668 struct config_group scsi_att_intr_port_group;
669};
670
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800671struct se_lun_acl {
672 char initiatorname[TRANSPORT_IQN_LEN];
673 u32 mapped_lun;
674 struct se_node_acl *se_lun_nacl;
675 struct se_lun *se_lun;
676 struct list_head lacl_list;
677 struct config_group se_lun_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700678 struct se_ml_stat_grps ml_stat_grps;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500679};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800680
681struct se_dev_entry {
Dan Carpenter5dd7ed22011-03-14 04:06:01 -0700682 bool def_pr_registered;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800683 /* See transport_lunflags_table */
684 u32 lun_flags;
685 u32 deve_cmds;
686 u32 mapped_lun;
687 u32 average_bytes;
688 u32 last_byte_count;
689 u32 total_cmds;
690 u32 total_bytes;
691 u64 pr_res_key;
692 u64 creation_time;
693 u32 attach_count;
694 u64 read_bytes;
695 u64 write_bytes;
696 atomic_t ua_count;
697 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
698 atomic_t pr_ref_count;
699 struct se_lun_acl *se_lun_acl;
700 spinlock_t ua_lock;
701 struct se_lun *se_lun;
702 struct list_head alua_port_list;
703 struct list_head ua_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500704};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800705
706struct se_dev_limits {
707 /* Max supported HW queue depth */
708 u32 hw_queue_depth;
709 /* Max supported virtual queue depth */
710 u32 queue_depth;
711 /* From include/linux/blkdev.h for the other HW/SW limits. */
712 struct queue_limits limits;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500713};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800714
715struct se_dev_attrib {
716 int emulate_dpo;
717 int emulate_fua_write;
718 int emulate_fua_read;
719 int emulate_write_cache;
720 int emulate_ua_intlck_ctrl;
721 int emulate_tas;
722 int emulate_tpu;
723 int emulate_tpws;
724 int emulate_reservations;
725 int emulate_alua;
726 int enforce_pr_isids;
Roland Dreiere22a7f02011-07-05 13:34:52 -0700727 int is_nonrot;
Nicholas Bellinger5de619a2011-07-17 02:57:58 -0700728 int emulate_rest_reord;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800729 u32 hw_block_size;
730 u32 block_size;
731 u32 hw_max_sectors;
732 u32 max_sectors;
Roland Dreier015487b2012-02-13 16:18:17 -0800733 u32 fabric_max_sectors;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800734 u32 optimal_sectors;
735 u32 hw_queue_depth;
736 u32 queue_depth;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800737 u32 max_unmap_lba_count;
738 u32 max_unmap_block_desc_count;
739 u32 unmap_granularity;
740 u32 unmap_granularity_alignment;
741 struct se_subsystem_dev *da_sub_dev;
742 struct config_group da_group;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500743};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800744
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700745struct se_dev_stat_grps {
746 struct config_group stat_group;
747 struct config_group scsi_dev_group;
748 struct config_group scsi_tgt_dev_group;
749 struct config_group scsi_lu_group;
750};
751
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800752struct se_subsystem_dev {
753/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
754#define SE_DEV_ALIAS_LEN 512
755 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
756/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
757#define SE_UDEV_PATH_LEN 512
758 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
759 u32 su_dev_flags;
760 struct se_hba *se_dev_hba;
761 struct se_device *se_dev_ptr;
762 struct se_dev_attrib se_dev_attrib;
763 /* T10 Asymmetric Logical Unit Assignment for Target Ports */
764 struct t10_alua t10_alua;
765 /* T10 Inquiry and VPD WWN Information */
766 struct t10_wwn t10_wwn;
767 /* T10 SPC-2 + SPC-3 Reservations */
Andy Grovere3d6f902011-07-19 08:55:10 +0000768 struct t10_reservation t10_pr;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800769 spinlock_t se_dev_lock;
770 void *se_dev_su_ptr;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800771 struct config_group se_dev_group;
772 /* For T10 Reservations */
773 struct config_group se_dev_pr_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700774 /* For target_core_stat.c groups */
775 struct se_dev_stat_grps dev_stat_grps;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500776};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800777
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800778struct se_device {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800779 /* RELATIVE TARGET PORT IDENTIFER Counter */
780 u16 dev_rpti_counter;
781 /* Used for SAM Task Attribute ordering */
782 u32 dev_cur_ordered_id;
783 u32 dev_flags;
784 u32 dev_port_count;
785 /* See transport_device_status_table */
786 u32 dev_status;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800787 /* Physical device queue depth */
788 u32 queue_depth;
789 /* Used for SPC-2 reservations enforce of ISIDs */
790 u64 dev_res_bin_isid;
791 t10_task_attr_index_t dev_task_attr_type;
792 /* Pointer to transport specific device structure */
793 void *dev_ptr;
794 u32 dev_index;
795 u64 creation_time;
796 u32 num_resets;
797 u64 num_cmds;
798 u64 read_bytes;
799 u64 write_bytes;
800 spinlock_t stats_lock;
801 /* Active commands on this virtual SE device */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800802 atomic_t simple_cmds;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800803 atomic_t dev_ordered_id;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800804 atomic_t execute_tasks;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800805 atomic_t dev_ordered_sync;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700806 atomic_t dev_qf_count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800807 struct se_obj dev_obj;
808 struct se_obj dev_access_obj;
809 struct se_obj dev_export_obj;
Andy Grovere3d6f902011-07-19 08:55:10 +0000810 struct se_queue_obj dev_queue_obj;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800811 spinlock_t delayed_cmd_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800812 spinlock_t execute_task_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800813 spinlock_t dev_reservation_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800814 spinlock_t dev_status_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800815 spinlock_t se_port_lock;
816 spinlock_t se_tmr_lock;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700817 spinlock_t qf_cmd_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800818 /* Used for legacy SPC-2 reservationsa */
819 struct se_node_acl *dev_reserved_node_acl;
820 /* Used for ALUA Logical Unit Group membership */
821 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
822 /* Used for SPC-3 Persistent Reservations */
823 struct t10_pr_registration *dev_pr_res_holder;
824 struct list_head dev_sep_list;
825 struct list_head dev_tmr_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800826 /* Pointer to descriptor for processing thread */
827 struct task_struct *process_thread;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700828 struct work_struct qf_work_queue;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800829 struct list_head delayed_cmd_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800830 struct list_head execute_task_list;
831 struct list_head state_task_list;
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700832 struct list_head qf_cmd_list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800833 /* Pointer to associated SE HBA */
834 struct se_hba *se_hba;
835 struct se_subsystem_dev *se_sub_dev;
836 /* Pointer to template of function pointers for transport */
837 struct se_subsystem_api *transport;
838 /* Linked list for struct se_hba struct se_device list */
839 struct list_head dev_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500840};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800841
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800842struct se_hba {
843 u16 hba_tpgt;
844 u32 hba_id;
845 /* See hba_flags_table */
846 u32 hba_flags;
847 /* Virtual iSCSI devices attached. */
848 u32 dev_count;
849 u32 hba_index;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800850 /* Pointer to transport specific host structure. */
851 void *hba_ptr;
852 /* Linked list for struct se_device */
853 struct list_head hba_dev_list;
Andy Grovere3d6f902011-07-19 08:55:10 +0000854 struct list_head hba_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800855 spinlock_t device_lock;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800856 struct config_group hba_group;
857 struct mutex hba_access_mutex;
858 struct se_subsystem_api *transport;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500859};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800860
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700861struct se_port_stat_grps {
862 struct config_group stat_group;
863 struct config_group scsi_port_group;
864 struct config_group scsi_tgt_port_group;
865 struct config_group scsi_transport_group;
866};
867
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800868struct se_lun {
869 /* See transport_lun_status_table */
870 enum transport_lun_status_table lun_status;
871 u32 lun_access;
872 u32 lun_flags;
873 u32 unpacked_lun;
874 atomic_t lun_acl_count;
875 spinlock_t lun_acl_lock;
876 spinlock_t lun_cmd_lock;
877 spinlock_t lun_sep_lock;
878 struct completion lun_shutdown_comp;
879 struct list_head lun_cmd_list;
880 struct list_head lun_acl_list;
881 struct se_device *lun_se_dev;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700882 struct se_port *lun_sep;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800883 struct config_group lun_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700884 struct se_port_stat_grps port_stat_grps;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500885};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800886
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800887struct scsi_port_stats {
888 u64 cmd_pdus;
889 u64 tx_data_octets;
890 u64 rx_data_octets;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500891};
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800892
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800893struct se_port {
894 /* RELATIVE TARGET PORT IDENTIFER */
895 u16 sep_rtpi;
896 int sep_tg_pt_secondary_stat;
897 int sep_tg_pt_secondary_write_md;
898 u32 sep_index;
899 struct scsi_port_stats sep_stats;
900 /* Used for ALUA Target Port Groups membership */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800901 atomic_t sep_tg_pt_secondary_offline;
902 /* Used for PR ALL_TG_PT=1 */
903 atomic_t sep_tg_pt_ref_cnt;
904 spinlock_t sep_alua_lock;
905 struct mutex sep_tg_pt_md_mutex;
906 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
907 struct se_lun *sep_lun;
908 struct se_portal_group *sep_tpg;
909 struct list_head sep_alua_list;
910 struct list_head sep_list;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500911};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800912
913struct se_tpg_np {
Nicholas Bellinger1f6fe7c2011-02-09 15:34:54 -0800914 struct se_portal_group *tpg_np_parent;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800915 struct config_group tpg_np_group;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500916};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800917
918struct se_portal_group {
919 /* Type of target portal group, see transport_tpg_type_table */
920 enum transport_tpg_type_table se_tpg_type;
921 /* Number of ACLed Initiator Nodes for this TPG */
922 u32 num_node_acls;
923 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
924 atomic_t tpg_pr_ref_count;
925 /* Spinlock for adding/removing ACLed Nodes */
926 spinlock_t acl_node_lock;
927 /* Spinlock for adding/removing sessions */
928 spinlock_t session_lock;
929 spinlock_t tpg_lun_lock;
930 /* Pointer to $FABRIC_MOD portal group */
931 void *se_tpg_fabric_ptr;
Andy Grovere3d6f902011-07-19 08:55:10 +0000932 struct list_head se_tpg_node;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800933 /* linked list for initiator ACL list */
934 struct list_head acl_node_list;
935 struct se_lun *tpg_lun_list;
936 struct se_lun tpg_virt_lun0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300937 /* List of TCM sessions associated wth this TPG */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800938 struct list_head tpg_sess_list;
939 /* Pointer to $FABRIC_MOD dependent code */
940 struct target_core_fabric_ops *se_tpg_tfo;
941 struct se_wwn *se_tpg_wwn;
942 struct config_group tpg_group;
943 struct config_group *tpg_default_groups[6];
944 struct config_group tpg_lun_group;
945 struct config_group tpg_np_group;
946 struct config_group tpg_acl_group;
947 struct config_group tpg_attrib_group;
948 struct config_group tpg_param_group;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500949};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800950
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800951struct se_wwn {
952 struct target_fabric_configfs *wwn_tf;
953 struct config_group wwn_group;
Nicholas Bellinger12d2338422011-03-14 04:06:11 -0700954 struct config_group *wwn_default_groups[2];
955 struct config_group fabric_stat_group;
Christoph Hellwige0a53e72011-11-29 03:29:38 -0500956};
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800957
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800958#endif /* TARGET_CORE_BASE_H */