Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 1 | /* |
| 2 | * QLogic iSCSI Offload Driver |
| 3 | * Copyright (c) 2016 Cavium Inc. |
| 4 | * |
| 5 | * This software is available under the terms of the GNU General Public License |
| 6 | * (GPL) Version 2, available from the file COPYING in the main directory of |
| 7 | * this source tree. |
| 8 | */ |
| 9 | |
| 10 | #ifndef _QEDI_H_ |
| 11 | #define _QEDI_H_ |
| 12 | |
| 13 | #define __PREVENT_QED_HSI__ |
| 14 | |
| 15 | #include <scsi/scsi_transport_iscsi.h> |
| 16 | #include <scsi/libiscsi.h> |
| 17 | #include <scsi/scsi_host.h> |
| 18 | #include <linux/uio_driver.h> |
| 19 | |
| 20 | #include "qedi_hsi.h" |
| 21 | #include <linux/qed/qed_if.h> |
| 22 | #include "qedi_dbg.h" |
| 23 | #include <linux/qed/qed_iscsi_if.h> |
| 24 | #include <linux/qed/qed_ll2_if.h> |
| 25 | #include "qedi_version.h" |
Nilesh Javali | c57ec8f | 2017-06-27 02:26:56 -0700 | [diff] [blame] | 26 | #include "qedi_nvm_iscsi_cfg.h" |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 27 | |
| 28 | #define QEDI_MODULE_NAME "qedi" |
| 29 | |
| 30 | struct qedi_endpoint; |
| 31 | |
Nilesh Javali | c57ec8f | 2017-06-27 02:26:56 -0700 | [diff] [blame] | 32 | #ifndef GET_FIELD2 |
| 33 | #define GET_FIELD2(value, name) \ |
| 34 | (((value) & (name ## _MASK)) >> (name ## _OFFSET)) |
| 35 | #endif |
| 36 | |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 37 | /* |
| 38 | * PCI function probe defines |
| 39 | */ |
| 40 | #define QEDI_MODE_NORMAL 0 |
| 41 | #define QEDI_MODE_RECOVERY 1 |
| 42 | |
| 43 | #define ISCSI_WQE_SET_PTU_INVALIDATE 1 |
| 44 | #define QEDI_MAX_ISCSI_TASK 4096 |
| 45 | #define QEDI_MAX_TASK_NUM 0x0FFF |
| 46 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 |
manish.rangankar@cavium.com | fc2fbf0 | 2017-05-19 01:33:16 -0700 | [diff] [blame] | 47 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 48 | #define MAX_OUSTANDING_TASKS_PER_CON 1024 |
| 49 | |
| 50 | #define QEDI_MAX_BD_LEN 0xffff |
| 51 | #define QEDI_BD_SPLIT_SZ 0x1000 |
| 52 | #define QEDI_PAGE_SIZE 4096 |
| 53 | #define QEDI_FAST_SGE_COUNT 4 |
| 54 | /* MAX Length for cached SGL */ |
| 55 | #define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1) |
| 56 | |
| 57 | #define MAX_NUM_MSIX_PF 8 |
| 58 | #define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus()) |
| 59 | |
| 60 | #define QEDI_LOCAL_PORT_MIN 60000 |
| 61 | #define QEDI_LOCAL_PORT_MAX 61024 |
| 62 | #define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN) |
| 63 | #define QEDI_LOCAL_PORT_INVALID 0xffff |
| 64 | #define TX_RX_RING 16 |
| 65 | #define RX_RING (TX_RX_RING - 1) |
| 66 | #define LL2_SINGLE_BUF_SIZE 0x400 |
| 67 | #define QEDI_PAGE_SIZE 4096 |
| 68 | #define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE) |
| 69 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) |
| 70 | |
| 71 | #define QEDI_PAGE_SIZE 4096 |
manish.rangankar@cavium.com | d0788a5 | 2017-05-19 01:33:17 -0700 | [diff] [blame] | 72 | #define QEDI_HW_DMA_BOUNDARY 0xfff |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 73 | #define QEDI_PATH_HANDLE 0xFE0000000UL |
| 74 | |
Nilesh Javali | c57ec8f | 2017-06-27 02:26:56 -0700 | [diff] [blame] | 75 | enum qedi_nvm_tgts { |
| 76 | QEDI_NVM_TGT_PRI, |
| 77 | QEDI_NVM_TGT_SEC, |
| 78 | }; |
| 79 | |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 80 | struct qedi_uio_ctrl { |
| 81 | /* meta data */ |
| 82 | u32 uio_hsi_version; |
| 83 | |
| 84 | /* user writes */ |
| 85 | u32 host_tx_prod; |
| 86 | u32 host_rx_cons; |
| 87 | u32 host_rx_bd_cons; |
| 88 | u32 host_tx_pkt_len; |
| 89 | u32 host_rx_cons_cnt; |
| 90 | |
| 91 | /* driver writes */ |
| 92 | u32 hw_tx_cons; |
| 93 | u32 hw_rx_prod; |
| 94 | u32 hw_rx_bd_prod; |
| 95 | u32 hw_rx_prod_cnt; |
| 96 | |
| 97 | /* other */ |
| 98 | u8 mac_addr[6]; |
| 99 | u8 reserve[2]; |
| 100 | }; |
| 101 | |
| 102 | struct qedi_rx_bd { |
| 103 | u32 rx_pkt_index; |
| 104 | u32 rx_pkt_len; |
| 105 | u16 vlan_id; |
| 106 | }; |
| 107 | |
| 108 | #define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd)) |
| 109 | #define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1) |
| 110 | #define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1) |
| 111 | #define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1) |
| 112 | |
| 113 | #define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \ |
| 114 | (QEDI_MAX_RX_DESC_CNT - 1)) ? \ |
| 115 | (x) + 2 : (x) + 1) |
| 116 | |
| 117 | struct qedi_uio_dev { |
| 118 | struct uio_info qedi_uinfo; |
| 119 | u32 uio_dev; |
| 120 | struct list_head list; |
| 121 | |
| 122 | u32 ll2_ring_size; |
| 123 | void *ll2_ring; |
| 124 | |
| 125 | u32 ll2_buf_size; |
| 126 | void *ll2_buf; |
| 127 | |
| 128 | void *rx_pkt; |
| 129 | void *tx_pkt; |
| 130 | |
| 131 | struct qedi_ctx *qedi; |
| 132 | struct pci_dev *pdev; |
| 133 | void *uctrl; |
| 134 | }; |
| 135 | |
| 136 | /* List to maintain the skb pointers */ |
| 137 | struct skb_work_list { |
| 138 | struct list_head list; |
| 139 | struct sk_buff *skb; |
| 140 | u16 vlan_id; |
| 141 | }; |
| 142 | |
| 143 | /* Queue sizes in number of elements */ |
| 144 | #define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON |
| 145 | #define QEDI_CQ_SIZE 2048 |
| 146 | #define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK |
| 147 | #define QEDI_PROTO_CQ_PROD_IDX 0 |
| 148 | |
| 149 | struct qedi_glbl_q_params { |
| 150 | u64 hw_p_cq; /* Completion queue PBL */ |
| 151 | u64 hw_p_rq; /* Request queue PBL */ |
| 152 | u64 hw_p_cmdq; /* Command queue PBL */ |
| 153 | }; |
| 154 | |
| 155 | struct global_queue { |
| 156 | union iscsi_cqe *cq; |
| 157 | dma_addr_t cq_dma; |
| 158 | u32 cq_mem_size; |
| 159 | u32 cq_cons_idx; /* Completion queue consumer index */ |
| 160 | |
| 161 | void *cq_pbl; |
| 162 | dma_addr_t cq_pbl_dma; |
| 163 | u32 cq_pbl_size; |
| 164 | |
| 165 | }; |
| 166 | |
| 167 | struct qedi_fastpath { |
| 168 | struct qed_sb_info *sb_info; |
| 169 | u16 sb_id; |
| 170 | #define QEDI_NAME_SIZE 16 |
| 171 | char name[QEDI_NAME_SIZE]; |
| 172 | struct qedi_ctx *qedi; |
| 173 | }; |
| 174 | |
| 175 | /* Used to pass fastpath information needed to process CQEs */ |
| 176 | struct qedi_io_work { |
| 177 | struct list_head list; |
| 178 | struct iscsi_cqe_solicited cqe; |
| 179 | u16 que_idx; |
| 180 | }; |
| 181 | |
| 182 | /** |
| 183 | * struct iscsi_cid_queue - Per adapter iscsi cid queue |
| 184 | * |
| 185 | * @cid_que_base: queue base memory |
| 186 | * @cid_que: queue memory pointer |
| 187 | * @cid_q_prod_idx: produce index |
| 188 | * @cid_q_cons_idx: consumer index |
| 189 | * @cid_q_max_idx: max index. used to detect wrap around condition |
| 190 | * @cid_free_cnt: queue size |
| 191 | * @conn_cid_tbl: iscsi cid to conn structure mapping table |
| 192 | * |
| 193 | * Per adapter iSCSI CID Queue |
| 194 | */ |
| 195 | struct iscsi_cid_queue { |
| 196 | void *cid_que_base; |
| 197 | u32 *cid_que; |
| 198 | u32 cid_q_prod_idx; |
| 199 | u32 cid_q_cons_idx; |
| 200 | u32 cid_q_max_idx; |
| 201 | u32 cid_free_cnt; |
| 202 | struct qedi_conn **conn_cid_tbl; |
| 203 | }; |
| 204 | |
| 205 | struct qedi_portid_tbl { |
| 206 | spinlock_t lock; /* Port id lock */ |
| 207 | u16 start; |
| 208 | u16 max; |
| 209 | u16 next; |
| 210 | unsigned long *table; |
| 211 | }; |
| 212 | |
| 213 | struct qedi_itt_map { |
| 214 | __le32 itt; |
| 215 | struct qedi_cmd *p_cmd; |
| 216 | }; |
| 217 | |
| 218 | /* I/O tracing entry */ |
| 219 | #define QEDI_IO_TRACE_SIZE 2048 |
| 220 | struct qedi_io_log { |
| 221 | #define QEDI_IO_TRACE_REQ 0 |
| 222 | #define QEDI_IO_TRACE_RSP 1 |
| 223 | u8 direction; |
| 224 | u16 task_id; |
| 225 | u32 cid; |
| 226 | u32 port_id; /* Remote port fabric ID */ |
| 227 | int lun; |
| 228 | u8 op; /* SCSI CDB */ |
| 229 | u8 lba[4]; |
| 230 | unsigned int bufflen; /* SCSI buffer length */ |
| 231 | unsigned int sg_count; /* Number of SG elements */ |
| 232 | u8 fast_sgs; /* number of fast sgls */ |
| 233 | u8 slow_sgs; /* number of slow sgls */ |
| 234 | u8 cached_sgs; /* number of cached sgls */ |
| 235 | int result; /* Result passed back to mid-layer */ |
| 236 | unsigned long jiffies; /* Time stamp when I/O logged */ |
| 237 | int refcount; /* Reference count for task id */ |
| 238 | unsigned int blk_req_cpu; /* CPU that the task is queued on by |
| 239 | * blk layer |
| 240 | */ |
| 241 | unsigned int req_cpu; /* CPU that the task is queued on */ |
| 242 | unsigned int intr_cpu; /* Interrupt CPU that the task is received on */ |
| 243 | unsigned int blk_rsp_cpu;/* CPU that task is actually processed and |
| 244 | * returned to blk layer |
| 245 | */ |
| 246 | bool cached_sge; |
| 247 | bool slow_sge; |
| 248 | bool fast_sge; |
| 249 | }; |
| 250 | |
| 251 | /* Number of entries in BDQ */ |
| 252 | #define QEDI_BDQ_NUM 256 |
| 253 | #define QEDI_BDQ_BUF_SIZE 256 |
| 254 | |
| 255 | /* DMA coherent buffers for BDQ */ |
| 256 | struct qedi_bdq_buf { |
| 257 | void *buf_addr; |
| 258 | dma_addr_t buf_dma; |
| 259 | }; |
| 260 | |
| 261 | /* Main port level struct */ |
| 262 | struct qedi_ctx { |
| 263 | struct qedi_dbg_ctx dbg_ctx; |
| 264 | struct Scsi_Host *shost; |
| 265 | struct pci_dev *pdev; |
| 266 | struct qed_dev *cdev; |
| 267 | struct qed_dev_iscsi_info dev_info; |
| 268 | struct qed_int_info int_info; |
| 269 | struct qedi_glbl_q_params *p_cpuq; |
| 270 | struct global_queue **global_queues; |
| 271 | /* uio declaration */ |
| 272 | struct qedi_uio_dev *udev; |
| 273 | struct list_head ll2_skb_list; |
| 274 | spinlock_t ll2_lock; /* Light L2 lock */ |
| 275 | spinlock_t hba_lock; /* per port lock */ |
| 276 | struct task_struct *ll2_recv_thread; |
| 277 | unsigned long flags; |
| 278 | #define UIO_DEV_OPENED 1 |
| 279 | #define QEDI_IOTHREAD_WAKE 2 |
| 280 | #define QEDI_IN_RECOVERY 5 |
| 281 | #define QEDI_IN_OFFLINE 6 |
| 282 | |
| 283 | u8 mac[ETH_ALEN]; |
| 284 | u32 src_ip[4]; |
| 285 | u8 ip_type; |
| 286 | |
| 287 | /* Physical address of above array */ |
| 288 | dma_addr_t hw_p_cpuq; |
| 289 | |
| 290 | struct qedi_bdq_buf bdq[QEDI_BDQ_NUM]; |
| 291 | void *bdq_pbl; |
| 292 | dma_addr_t bdq_pbl_dma; |
| 293 | size_t bdq_pbl_mem_size; |
| 294 | void *bdq_pbl_list; |
| 295 | dma_addr_t bdq_pbl_list_dma; |
| 296 | u8 bdq_pbl_list_num_entries; |
Nilesh Javali | c57ec8f | 2017-06-27 02:26:56 -0700 | [diff] [blame] | 297 | struct nvm_iscsi_cfg *iscsi_cfg; |
| 298 | dma_addr_t nvm_buf_dma; |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 299 | void __iomem *bdq_primary_prod; |
| 300 | void __iomem *bdq_secondary_prod; |
| 301 | u16 bdq_prod_idx; |
| 302 | u16 rq_num_entries; |
| 303 | |
| 304 | u32 msix_count; |
| 305 | u32 max_sqes; |
| 306 | u8 num_queues; |
| 307 | u32 max_active_conns; |
| 308 | |
| 309 | struct iscsi_cid_queue cid_que; |
| 310 | struct qedi_endpoint **ep_tbl; |
| 311 | struct qedi_portid_tbl lcl_port_tbl; |
| 312 | |
| 313 | /* Rx fast path intr context */ |
| 314 | struct qed_sb_info *sb_array; |
| 315 | struct qedi_fastpath *fp_array; |
| 316 | struct qed_iscsi_tid tasks; |
| 317 | |
| 318 | #define QEDI_LINK_DOWN 0 |
| 319 | #define QEDI_LINK_UP 1 |
| 320 | atomic_t link_state; |
| 321 | |
| 322 | #define QEDI_RESERVE_TASK_ID 0 |
| 323 | #define MAX_ISCSI_TASK_ENTRIES 4096 |
| 324 | #define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1) |
| 325 | unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG]; |
| 326 | struct qedi_itt_map *itt_map; |
| 327 | u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK]; |
| 328 | struct qed_pf_params pf_params; |
| 329 | |
| 330 | struct workqueue_struct *tmf_thread; |
| 331 | struct workqueue_struct *offload_thread; |
| 332 | |
| 333 | u16 ll2_mtu; |
| 334 | |
| 335 | struct workqueue_struct *dpc_wq; |
| 336 | |
| 337 | spinlock_t task_idx_lock; /* To protect gbl context */ |
| 338 | s32 last_tidx_alloc; |
| 339 | s32 last_tidx_clear; |
| 340 | |
| 341 | struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE]; |
| 342 | spinlock_t io_trace_lock; /* prtect trace Log buf */ |
| 343 | u16 io_trace_idx; |
| 344 | unsigned int intr_cpu; |
| 345 | u32 cached_sgls; |
| 346 | bool use_cached_sge; |
| 347 | u32 slow_sgls; |
| 348 | bool use_slow_sge; |
| 349 | u32 fast_sgls; |
| 350 | bool use_fast_sge; |
| 351 | |
| 352 | atomic_t num_offloads; |
Nilesh Javali | c57ec8f | 2017-06-27 02:26:56 -0700 | [diff] [blame] | 353 | #define SYSFS_FLAG_FW_SEL_BOOT 2 |
| 354 | #define IPV6_LEN 41 |
| 355 | #define IPV4_LEN 17 |
| 356 | struct iscsi_boot_kset *boot_kset; |
Manish Rangankar | ace7f46 | 2016-12-01 00:21:08 -0800 | [diff] [blame] | 357 | }; |
| 358 | |
| 359 | struct qedi_work { |
| 360 | struct list_head list; |
| 361 | struct qedi_ctx *qedi; |
| 362 | union iscsi_cqe cqe; |
| 363 | u16 que_idx; |
| 364 | bool is_solicited; |
| 365 | }; |
| 366 | |
| 367 | struct qedi_percpu_s { |
| 368 | struct task_struct *iothread; |
| 369 | struct list_head work_list; |
| 370 | spinlock_t p_work_lock; /* Per cpu worker lock */ |
| 371 | }; |
| 372 | |
| 373 | static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid) |
| 374 | { |
| 375 | return (info->blocks[tid / info->num_tids_per_block] + |
| 376 | (tid % info->num_tids_per_block) * info->size); |
| 377 | } |
| 378 | |
| 379 | #define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32)) |
| 380 | #define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) |
| 381 | |
| 382 | #endif /* _QEDI_H_ */ |