Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 1 | #ifndef TARGET_CORE_FABRIC_H |
| 2 | #define TARGET_CORE_FABRIC_H |
| 3 | |
| 4 | struct target_core_fabric_ops { |
Christoph Hellwig | 9ac8928 | 2015-04-08 20:01:35 +0200 | [diff] [blame] | 5 | struct module *module; |
| 6 | const char *name; |
Christoph Hellwig | 144bc4c | 2015-04-13 19:51:16 +0200 | [diff] [blame] | 7 | size_t node_acl_size; |
Nicholas Bellinger | 8f9b565 | 2015-07-30 18:28:13 -0700 | [diff] [blame] | 8 | /* |
| 9 | * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload. |
| 10 | * Setting this value tells target-core to enforce this limit, and |
| 11 | * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH. |
| 12 | * |
| 13 | * target-core will currently reset se_cmd->data_length to this |
| 14 | * maximum size, and set UNDERFLOW residual count if length exceeds |
| 15 | * this limit. |
| 16 | * |
| 17 | * XXX: Not all initiator hosts honor this block-limit EVPD |
| 18 | * XXX: Currently assumes single PAGE_SIZE per scatterlist entry |
| 19 | */ |
| 20 | u32 max_data_sg_nents; |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 21 | char *(*get_fabric_name)(void); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 22 | char *(*tpg_get_wwn)(struct se_portal_group *); |
| 23 | u16 (*tpg_get_tag)(struct se_portal_group *); |
| 24 | u32 (*tpg_get_default_depth)(struct se_portal_group *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 25 | int (*tpg_check_demo_mode)(struct se_portal_group *); |
| 26 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); |
| 27 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); |
| 28 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); |
| 29 | /* |
| 30 | * Optionally used by fabrics to allow demo-mode login, but not |
| 31 | * expose any TPG LUNs, and return 'not connected' in standard |
| 32 | * inquiry response |
| 33 | */ |
| 34 | int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); |
Nicholas Bellinger | 38b57f8 | 2015-02-27 22:05:21 -0800 | [diff] [blame] | 35 | /* |
| 36 | * Optionally used as a configfs tunable to determine when |
| 37 | * target-core should signal the PROTECT=1 feature bit for |
| 38 | * backends that don't support T10-PI, so that either fabric |
| 39 | * HW offload or target-core emulation performs the associated |
| 40 | * WRITE_STRIP and READ_INSERT operations. |
| 41 | */ |
| 42 | int (*tpg_check_prot_fabric_only)(struct se_portal_group *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 43 | u32 (*tpg_get_inst_index)(struct se_portal_group *); |
| 44 | /* |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 45 | * Optional to release struct se_cmd and fabric dependent allocated |
| 46 | * I/O descriptor in transport_cmd_check_stop(). |
| 47 | * |
| 48 | * Returning 1 will signal a descriptor has been released. |
| 49 | * Returning 0 will signal a descriptor has not been released. |
| 50 | */ |
| 51 | int (*check_stop_free)(struct se_cmd *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 52 | void (*release_cmd)(struct se_cmd *); |
| 53 | /* |
| 54 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
| 55 | */ |
| 56 | int (*shutdown_session)(struct se_session *); |
| 57 | void (*close_session)(struct se_session *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 58 | u32 (*sess_get_index)(struct se_session *); |
| 59 | /* |
| 60 | * Used only for SCSI fabrics that contain multi-value TransportIDs |
| 61 | * (like iSCSI). All other SCSI fabrics should set this to NULL. |
| 62 | */ |
| 63 | u32 (*sess_get_initiator_sid)(struct se_session *, |
| 64 | unsigned char *, u32); |
| 65 | int (*write_pending)(struct se_cmd *); |
| 66 | int (*write_pending_status)(struct se_cmd *); |
| 67 | void (*set_default_node_attributes)(struct se_node_acl *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 68 | int (*get_cmd_state)(struct se_cmd *); |
| 69 | int (*queue_data_in)(struct se_cmd *); |
| 70 | int (*queue_status)(struct se_cmd *); |
Joern Engel | b79fafa | 2013-07-03 11:22:17 -0400 | [diff] [blame] | 71 | void (*queue_tm_rsp)(struct se_cmd *); |
Nicholas Bellinger | 131e6ab | 2014-03-22 14:55:56 -0700 | [diff] [blame] | 72 | void (*aborted_task)(struct se_cmd *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 73 | /* |
| 74 | * fabric module calls for target_core_fabric_configfs.c |
| 75 | */ |
| 76 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, |
| 77 | struct config_group *, const char *); |
| 78 | void (*fabric_drop_wwn)(struct se_wwn *); |
| 79 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, |
| 80 | struct config_group *, const char *); |
| 81 | void (*fabric_drop_tpg)(struct se_portal_group *); |
| 82 | int (*fabric_post_link)(struct se_portal_group *, |
| 83 | struct se_lun *); |
| 84 | void (*fabric_pre_unlink)(struct se_portal_group *, |
| 85 | struct se_lun *); |
| 86 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, |
| 87 | struct config_group *, const char *); |
| 88 | void (*fabric_drop_np)(struct se_tpg_np *); |
Christoph Hellwig | c7d6a80 | 2015-04-13 19:51:14 +0200 | [diff] [blame] | 89 | int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); |
| 90 | void (*fabric_cleanup_nodeacl)(struct se_node_acl *); |
Christoph Hellwig | 9ac8928 | 2015-04-08 20:01:35 +0200 | [diff] [blame] | 91 | |
| 92 | struct configfs_attribute **tfc_discovery_attrs; |
| 93 | struct configfs_attribute **tfc_wwn_attrs; |
| 94 | struct configfs_attribute **tfc_tpg_base_attrs; |
| 95 | struct configfs_attribute **tfc_tpg_np_base_attrs; |
| 96 | struct configfs_attribute **tfc_tpg_attrib_attrs; |
| 97 | struct configfs_attribute **tfc_tpg_auth_attrs; |
| 98 | struct configfs_attribute **tfc_tpg_param_attrs; |
| 99 | struct configfs_attribute **tfc_tpg_nacl_base_attrs; |
| 100 | struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; |
| 101 | struct configfs_attribute **tfc_tpg_nacl_auth_attrs; |
| 102 | struct configfs_attribute **tfc_tpg_nacl_param_attrs; |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 103 | }; |
| 104 | |
Christoph Hellwig | 9ac8928 | 2015-04-08 20:01:35 +0200 | [diff] [blame] | 105 | int target_register_template(const struct target_core_fabric_ops *fo); |
| 106 | void target_unregister_template(const struct target_core_fabric_ops *fo); |
| 107 | |
Christoph Hellwig | d588cf8 | 2015-05-03 08:50:52 +0200 | [diff] [blame] | 108 | int target_depend_item(struct config_item *item); |
| 109 | void target_undepend_item(struct config_item *item); |
| 110 | |
Nicholas Bellinger | e70beee | 2014-04-02 12:52:38 -0700 | [diff] [blame] | 111 | struct se_session *transport_init_session(enum target_prot_op); |
Nicholas Bellinger | c0add7f | 2013-06-07 17:38:58 -0700 | [diff] [blame] | 112 | int transport_alloc_session_tags(struct se_session *, unsigned int, |
| 113 | unsigned int); |
Nicholas Bellinger | e70beee | 2014-04-02 12:52:38 -0700 | [diff] [blame] | 114 | struct se_session *transport_init_session_tags(unsigned int, unsigned int, |
| 115 | enum target_prot_op); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 116 | void __transport_register_session(struct se_portal_group *, |
| 117 | struct se_node_acl *, struct se_session *, void *); |
| 118 | void transport_register_session(struct se_portal_group *, |
| 119 | struct se_node_acl *, struct se_session *, void *); |
Nicholas Bellinger | d36ad77 | 2016-01-07 22:15:06 -0800 | [diff] [blame] | 120 | int target_get_session(struct se_session *); |
Jörn Engel | 33933a0 | 2012-05-11 10:35:08 -0400 | [diff] [blame] | 121 | void target_put_session(struct se_session *); |
Nicholas Bellinger | f8e471f | 2015-03-06 20:34:32 -0800 | [diff] [blame] | 122 | ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 123 | void transport_free_session(struct se_session *); |
Nicholas Bellinger | afb999f | 2012-03-08 23:45:02 -0800 | [diff] [blame] | 124 | void target_put_nacl(struct se_node_acl *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 125 | void transport_deregister_session_configfs(struct se_session *); |
| 126 | void transport_deregister_session(struct se_session *); |
| 127 | |
| 128 | |
Christoph Hellwig | 9ac8928 | 2015-04-08 20:01:35 +0200 | [diff] [blame] | 129 | void transport_init_se_cmd(struct se_cmd *, |
| 130 | const struct target_core_fabric_ops *, |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 131 | struct se_session *, u32, int, int, unsigned char *); |
Hannes Reinecke | f2d3068 | 2015-06-10 08:41:22 +0200 | [diff] [blame] | 132 | sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64); |
Christoph Hellwig | de103c9 | 2012-11-06 12:24:09 -0800 | [diff] [blame] | 133 | sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); |
Nicholas Bellinger | a026757 | 2012-10-01 17:23:22 -0700 | [diff] [blame] | 134 | int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, |
Hannes Reinecke | f2d3068 | 2015-06-10 08:41:22 +0200 | [diff] [blame] | 135 | unsigned char *, unsigned char *, u64, u32, int, int, int, |
Nicholas Bellinger | def2b33 | 2013-12-23 20:38:30 +0000 | [diff] [blame] | 136 | struct scatterlist *, u32, struct scatterlist *, u32, |
| 137 | struct scatterlist *, u32); |
Roland Dreier | d6dfc86 | 2012-07-16 11:04:39 -0700 | [diff] [blame] | 138 | int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, |
Hannes Reinecke | f2d3068 | 2015-06-10 08:41:22 +0200 | [diff] [blame] | 139 | unsigned char *, u64, u32, int, int, int); |
Nicholas Bellinger | c7042ca | 2012-02-25 01:40:24 -0800 | [diff] [blame] | 140 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, |
Hannes Reinecke | f2d3068 | 2015-06-10 08:41:22 +0200 | [diff] [blame] | 141 | unsigned char *sense, u64 unpacked_lun, |
Nicholas Bellinger | c0974f8 | 2012-02-25 05:10:04 -0800 | [diff] [blame] | 142 | void *fabric_tmr_ptr, unsigned char tm_type, |
Bart Van Assche | 5261d86 | 2016-01-05 14:46:39 +0100 | [diff] [blame] | 143 | gfp_t, u64, int); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 144 | int transport_handle_cdb_direct(struct se_cmd *); |
Christoph Hellwig | de103c9 | 2012-11-06 12:24:09 -0800 | [diff] [blame] | 145 | sense_reason_t transport_generic_new_cmd(struct se_cmd *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 146 | |
Christoph Hellwig | 70baf0a | 2012-07-08 15:58:39 -0400 | [diff] [blame] | 147 | void target_execute_cmd(struct se_cmd *cmd); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 148 | |
Nicholas Bellinger | d5ddad416 | 2013-05-31 00:46:11 -0700 | [diff] [blame] | 149 | int transport_generic_free_cmd(struct se_cmd *, int); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 150 | |
| 151 | bool transport_wait_for_tasks(struct se_cmd *); |
| 152 | int transport_check_aborted_status(struct se_cmd *, int); |
Christoph Hellwig | de103c9 | 2012-11-06 12:24:09 -0800 | [diff] [blame] | 153 | int transport_send_check_condition_and_sense(struct se_cmd *, |
| 154 | sense_reason_t, int); |
Bart Van Assche | afc1660 | 2015-04-27 13:52:36 +0200 | [diff] [blame] | 155 | int target_get_sess_cmd(struct se_cmd *, bool); |
| 156 | int target_put_sess_cmd(struct se_cmd *); |
Roland Dreier | 1c7b13f | 2012-07-16 11:04:42 -0700 | [diff] [blame] | 157 | void target_sess_cmd_list_set_waiting(struct se_session *); |
Joern Engel | be646c2d | 2013-05-15 00:44:07 -0700 | [diff] [blame] | 158 | void target_wait_for_sess_cmds(struct se_session *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 159 | |
| 160 | int core_alua_check_nonop_delay(struct se_cmd *); |
| 161 | |
Andy Grover | c8e31f2 | 2012-01-19 13:39:17 -0800 | [diff] [blame] | 162 | int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 163 | void core_tmr_release_req(struct se_tmr_req *); |
| 164 | int transport_generic_handle_tmr(struct se_cmd *); |
Christoph Hellwig | de103c9 | 2012-11-06 12:24:09 -0800 | [diff] [blame] | 165 | void transport_generic_request_failure(struct se_cmd *, sense_reason_t); |
Nicholas Bellinger | 76dde50 | 2013-08-21 16:04:10 -0700 | [diff] [blame] | 166 | void __target_execute_cmd(struct se_cmd *); |
Hannes Reinecke | f2d3068 | 2015-06-10 08:41:22 +0200 | [diff] [blame] | 167 | int transport_lookup_tmr_lun(struct se_cmd *, u64); |
Hannes Reinecke | e986a35 | 2015-06-18 11:43:38 +0200 | [diff] [blame] | 168 | void core_allocate_nexus_loss_ua(struct se_node_acl *acl); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 169 | |
Thomas Glanzmann | b3fde03 | 2013-10-07 23:13:02 +0200 | [diff] [blame] | 170 | struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, |
| 171 | unsigned char *); |
Nicholas Bellinger | 21aaa23 | 2016-01-07 22:09:27 -0800 | [diff] [blame^] | 172 | bool target_tpg_has_node_acl(struct se_portal_group *tpg, |
| 173 | const char *); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 174 | struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, |
| 175 | unsigned char *); |
Nicholas Bellinger | d36ad77 | 2016-01-07 22:15:06 -0800 | [diff] [blame] | 176 | int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32); |
Andy Grover | 79e62fc | 2012-12-11 16:30:53 -0800 | [diff] [blame] | 177 | int core_tpg_set_initiator_node_tag(struct se_portal_group *, |
| 178 | struct se_node_acl *, const char *); |
Nicholas Bellinger | bc0c94b | 2015-05-20 21:48:03 -0700 | [diff] [blame] | 179 | int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 180 | int core_tpg_deregister(struct se_portal_group *); |
| 181 | |
Nicholas Bellinger | b3faa2e | 2013-08-21 14:54:54 -0700 | [diff] [blame] | 182 | /* |
| 183 | * The LIO target core uses DMA_TO_DEVICE to mean that data is going |
| 184 | * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean |
| 185 | * that data is coming from the target (eg handling a READ). However, |
| 186 | * this is just the opposite of what we have to tell the DMA mapping |
| 187 | * layer -- eg when handling a READ, the HBA will have to DMA the data |
| 188 | * out of memory so it can send it to the initiator, which means we |
| 189 | * need to use DMA_TO_DEVICE when we map the data. |
| 190 | */ |
| 191 | static inline enum dma_data_direction |
| 192 | target_reverse_dma_direction(struct se_cmd *se_cmd) |
| 193 | { |
| 194 | if (se_cmd->se_cmd_flags & SCF_BIDI) |
| 195 | return DMA_BIDIRECTIONAL; |
| 196 | |
| 197 | switch (se_cmd->data_direction) { |
| 198 | case DMA_TO_DEVICE: |
| 199 | return DMA_FROM_DEVICE; |
| 200 | case DMA_FROM_DEVICE: |
| 201 | return DMA_TO_DEVICE; |
| 202 | case DMA_NONE: |
| 203 | default: |
| 204 | return DMA_NONE; |
| 205 | } |
| 206 | } |
| 207 | |
Christoph Hellwig | c4795fb | 2011-11-16 09:46:48 -0500 | [diff] [blame] | 208 | #endif /* TARGET_CORE_FABRICH */ |