blob: 5cd6faa6e0d166ed07444cf5e3735e2626483205 [file] [log] [blame]
Christoph Hellwigc4795fb2011-11-16 09:46:48 -05001#ifndef TARGET_CORE_FABRIC_H
2#define TARGET_CORE_FABRIC_H
3
4struct target_core_fabric_ops {
Christoph Hellwig9ac89282015-04-08 20:01:35 +02005 struct module *module;
6 const char *name;
Christoph Hellwig144bc4c2015-04-13 19:51:16 +02007 size_t node_acl_size;
Nicholas Bellinger8f9b5652015-07-30 18:28:13 -07008 /*
9 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
10 * Setting this value tells target-core to enforce this limit, and
11 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
12 *
13 * target-core will currently reset se_cmd->data_length to this
14 * maximum size, and set UNDERFLOW residual count if length exceeds
15 * this limit.
16 *
17 * XXX: Not all initiator hosts honor this block-limit EVPD
18 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
19 */
20 u32 max_data_sg_nents;
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050021 char *(*get_fabric_name)(void);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050022 char *(*tpg_get_wwn)(struct se_portal_group *);
23 u16 (*tpg_get_tag)(struct se_portal_group *);
24 u32 (*tpg_get_default_depth)(struct se_portal_group *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050025 int (*tpg_check_demo_mode)(struct se_portal_group *);
26 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
27 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
28 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
29 /*
30 * Optionally used by fabrics to allow demo-mode login, but not
31 * expose any TPG LUNs, and return 'not connected' in standard
32 * inquiry response
33 */
34 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
Nicholas Bellinger38b57f82015-02-27 22:05:21 -080035 /*
36 * Optionally used as a configfs tunable to determine when
37 * target-core should signal the PROTECT=1 feature bit for
38 * backends that don't support T10-PI, so that either fabric
39 * HW offload or target-core emulation performs the associated
40 * WRITE_STRIP and READ_INSERT operations.
41 */
42 int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050043 u32 (*tpg_get_inst_index)(struct se_portal_group *);
44 /*
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050045 * Optional to release struct se_cmd and fabric dependent allocated
46 * I/O descriptor in transport_cmd_check_stop().
47 *
48 * Returning 1 will signal a descriptor has been released.
49 * Returning 0 will signal a descriptor has not been released.
50 */
51 int (*check_stop_free)(struct se_cmd *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050052 void (*release_cmd)(struct se_cmd *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050053 void (*close_session)(struct se_session *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050054 u32 (*sess_get_index)(struct se_session *);
55 /*
56 * Used only for SCSI fabrics that contain multi-value TransportIDs
57 * (like iSCSI). All other SCSI fabrics should set this to NULL.
58 */
59 u32 (*sess_get_initiator_sid)(struct se_session *,
60 unsigned char *, u32);
61 int (*write_pending)(struct se_cmd *);
62 int (*write_pending_status)(struct se_cmd *);
63 void (*set_default_node_attributes)(struct se_node_acl *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050064 int (*get_cmd_state)(struct se_cmd *);
65 int (*queue_data_in)(struct se_cmd *);
66 int (*queue_status)(struct se_cmd *);
Joern Engelb79fafa2013-07-03 11:22:17 -040067 void (*queue_tm_rsp)(struct se_cmd *);
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -070068 void (*aborted_task)(struct se_cmd *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050069 /*
70 * fabric module calls for target_core_fabric_configfs.c
71 */
72 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
73 struct config_group *, const char *);
74 void (*fabric_drop_wwn)(struct se_wwn *);
Christoph Hellwig839559e2016-03-29 13:03:35 +020075 void (*add_wwn_groups)(struct se_wwn *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050076 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
77 struct config_group *, const char *);
78 void (*fabric_drop_tpg)(struct se_portal_group *);
79 int (*fabric_post_link)(struct se_portal_group *,
80 struct se_lun *);
81 void (*fabric_pre_unlink)(struct se_portal_group *,
82 struct se_lun *);
83 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
84 struct config_group *, const char *);
85 void (*fabric_drop_np)(struct se_tpg_np *);
Christoph Hellwigc7d6a802015-04-13 19:51:14 +020086 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
Christoph Hellwig9ac89282015-04-08 20:01:35 +020087
88 struct configfs_attribute **tfc_discovery_attrs;
89 struct configfs_attribute **tfc_wwn_attrs;
90 struct configfs_attribute **tfc_tpg_base_attrs;
91 struct configfs_attribute **tfc_tpg_np_base_attrs;
92 struct configfs_attribute **tfc_tpg_attrib_attrs;
93 struct configfs_attribute **tfc_tpg_auth_attrs;
94 struct configfs_attribute **tfc_tpg_param_attrs;
95 struct configfs_attribute **tfc_tpg_nacl_base_attrs;
96 struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
97 struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
98 struct configfs_attribute **tfc_tpg_nacl_param_attrs;
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050099};
100
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200101int target_register_template(const struct target_core_fabric_ops *fo);
102void target_unregister_template(const struct target_core_fabric_ops *fo);
103
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200104int target_depend_item(struct config_item *item);
105void target_undepend_item(struct config_item *item);
106
Nicholas Bellinger78617282016-01-09 05:29:24 -0800107struct se_session *target_alloc_session(struct se_portal_group *,
108 unsigned int, unsigned int, enum target_prot_op prot_op,
109 const char *, void *,
110 int (*callback)(struct se_portal_group *,
111 struct se_session *, void *));
112
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700113struct se_session *transport_init_session(enum target_prot_op);
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700114int transport_alloc_session_tags(struct se_session *, unsigned int,
115 unsigned int);
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700116struct se_session *transport_init_session_tags(unsigned int, unsigned int,
117 enum target_prot_op);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500118void __transport_register_session(struct se_portal_group *,
119 struct se_node_acl *, struct se_session *, void *);
120void transport_register_session(struct se_portal_group *,
121 struct se_node_acl *, struct se_session *, void *);
Nicholas Bellingerf8e471f2015-03-06 20:34:32 -0800122ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500123void transport_free_session(struct se_session *);
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800124void target_put_nacl(struct se_node_acl *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500125void transport_deregister_session_configfs(struct se_session *);
126void transport_deregister_session(struct se_session *);
127
128
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200129void transport_init_se_cmd(struct se_cmd *,
130 const struct target_core_fabric_ops *,
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500131 struct se_session *, u32, int, int, unsigned char *);
Hannes Reineckef2d30682015-06-10 08:41:22 +0200132sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800133sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
Nicholas Bellingera0267572012-10-01 17:23:22 -0700134int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200135 unsigned char *, unsigned char *, u64, u32, int, int, int,
Nicholas Bellingerdef2b332013-12-23 20:38:30 +0000136 struct scatterlist *, u32, struct scatterlist *, u32,
137 struct scatterlist *, u32);
Roland Dreierd6dfc862012-07-16 11:04:39 -0700138int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200139 unsigned char *, u64, u32, int, int, int);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -0800140int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
Hannes Reineckef2d30682015-06-10 08:41:22 +0200141 unsigned char *sense, u64 unpacked_lun,
Nicholas Bellingerc0974f82012-02-25 05:10:04 -0800142 void *fabric_tmr_ptr, unsigned char tm_type,
Bart Van Assche5261d862016-01-05 14:46:39 +0100143 gfp_t, u64, int);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500144int transport_handle_cdb_direct(struct se_cmd *);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800145sense_reason_t transport_generic_new_cmd(struct se_cmd *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500146
Christoph Hellwig70baf0a2012-07-08 15:58:39 -0400147void target_execute_cmd(struct se_cmd *cmd);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500148
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -0700149int transport_generic_free_cmd(struct se_cmd *, int);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500150
151bool transport_wait_for_tasks(struct se_cmd *);
152int transport_check_aborted_status(struct se_cmd *, int);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800153int transport_send_check_condition_and_sense(struct se_cmd *,
154 sense_reason_t, int);
Bart Van Asscheafc16602015-04-27 13:52:36 +0200155int target_get_sess_cmd(struct se_cmd *, bool);
156int target_put_sess_cmd(struct se_cmd *);
Roland Dreier1c7b13f2012-07-16 11:04:42 -0700157void target_sess_cmd_list_set_waiting(struct se_session *);
Joern Engelbe646c2d2013-05-15 00:44:07 -0700158void target_wait_for_sess_cmds(struct se_session *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500159
160int core_alua_check_nonop_delay(struct se_cmd *);
161
Andy Groverc8e31f22012-01-19 13:39:17 -0800162int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500163void core_tmr_release_req(struct se_tmr_req *);
164int transport_generic_handle_tmr(struct se_cmd *);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800165void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
Hannes Reineckef2d30682015-06-10 08:41:22 +0200166int transport_lookup_tmr_lun(struct se_cmd *, u64);
Hannes Reineckee986a352015-06-18 11:43:38 +0200167void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500168
Thomas Glanzmannb3fde032013-10-07 23:13:02 +0200169struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
170 unsigned char *);
Nicholas Bellinger21aaa232016-01-07 22:09:27 -0800171bool target_tpg_has_node_acl(struct se_portal_group *tpg,
172 const char *);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500173struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
174 unsigned char *);
Nicholas Bellingerd36ad772016-01-07 22:15:06 -0800175int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
Andy Grover79e62fc2012-12-11 16:30:53 -0800176int core_tpg_set_initiator_node_tag(struct se_portal_group *,
177 struct se_node_acl *, const char *);
Nicholas Bellingerbc0c94b2015-05-20 21:48:03 -0700178int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500179int core_tpg_deregister(struct se_portal_group *);
180
Christoph Hellwige64aa652016-05-03 18:01:10 +0200181int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
182 u32 length, bool zero_page, bool chainable);
183void target_free_sgl(struct scatterlist *sgl, int nents);
184
Nicholas Bellingerb3faa2e2013-08-21 14:54:54 -0700185/*
186 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
187 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
188 * that data is coming from the target (eg handling a READ). However,
189 * this is just the opposite of what we have to tell the DMA mapping
190 * layer -- eg when handling a READ, the HBA will have to DMA the data
191 * out of memory so it can send it to the initiator, which means we
192 * need to use DMA_TO_DEVICE when we map the data.
193 */
194static inline enum dma_data_direction
195target_reverse_dma_direction(struct se_cmd *se_cmd)
196{
197 if (se_cmd->se_cmd_flags & SCF_BIDI)
198 return DMA_BIDIRECTIONAL;
199
200 switch (se_cmd->data_direction) {
201 case DMA_TO_DEVICE:
202 return DMA_FROM_DEVICE;
203 case DMA_FROM_DEVICE:
204 return DMA_TO_DEVICE;
205 case DMA_NONE:
206 default:
207 return DMA_NONE;
208 }
209}
210
Christoph Hellwigc4795fb2011-11-16 09:46:48 -0500211#endif /* TARGET_CORE_FABRICH */