Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 1 | /* cnic_if.h: Broadcom CNIC core network driver. |
| 2 | * |
Michael Chan | 1d9cfc4 | 2010-02-24 14:42:09 +0000 | [diff] [blame] | 3 | * Copyright (c) 2006-2010 Broadcom Corporation |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | |
| 12 | #ifndef CNIC_IF_H |
| 13 | #define CNIC_IF_H |
| 14 | |
Michael Chan | 72b8a16 | 2010-06-24 14:58:42 +0000 | [diff] [blame] | 15 | #define CNIC_MODULE_VERSION "2.1.3" |
| 16 | #define CNIC_MODULE_RELDATE "June 24, 2010" |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 17 | |
| 18 | #define CNIC_ULP_RDMA 0 |
| 19 | #define CNIC_ULP_ISCSI 1 |
| 20 | #define CNIC_ULP_L4 2 |
| 21 | #define MAX_CNIC_ULP_TYPE_EXT 2 |
| 22 | #define MAX_CNIC_ULP_TYPE 3 |
| 23 | |
| 24 | struct kwqe { |
| 25 | u32 kwqe_op_flag; |
| 26 | |
| 27 | #define KWQE_OPCODE_MASK 0x00ff0000 |
| 28 | #define KWQE_OPCODE_SHIFT 16 |
| 29 | #define KWQE_FLAGS_LAYER_SHIFT 28 |
| 30 | #define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) |
| 31 | |
| 32 | u32 kwqe_info0; |
| 33 | u32 kwqe_info1; |
| 34 | u32 kwqe_info2; |
| 35 | u32 kwqe_info3; |
| 36 | u32 kwqe_info4; |
| 37 | u32 kwqe_info5; |
| 38 | u32 kwqe_info6; |
| 39 | }; |
| 40 | |
| 41 | struct kwqe_16 { |
| 42 | u32 kwqe_info0; |
| 43 | u32 kwqe_info1; |
| 44 | u32 kwqe_info2; |
| 45 | u32 kwqe_info3; |
| 46 | }; |
| 47 | |
| 48 | struct kcqe { |
| 49 | u32 kcqe_info0; |
| 50 | u32 kcqe_info1; |
| 51 | u32 kcqe_info2; |
| 52 | u32 kcqe_info3; |
| 53 | u32 kcqe_info4; |
| 54 | u32 kcqe_info5; |
| 55 | u32 kcqe_info6; |
| 56 | u32 kcqe_op_flag; |
| 57 | #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ |
| 58 | #define KCQE_FLAGS_LAYER_MASK (0x7<<28) |
| 59 | #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) |
| 60 | #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) |
| 61 | #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) |
| 62 | #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) |
| 63 | #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) |
| 64 | #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) |
| 65 | #define KCQE_FLAGS_NEXT (1<<31) |
| 66 | #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) |
| 67 | #define KCQE_FLAGS_OPCODE_SHIFT (16) |
| 68 | #define KCQE_OPCODE(op) \ |
| 69 | (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) |
| 70 | }; |
| 71 | |
| 72 | #define MAX_CNIC_CTL_DATA 64 |
| 73 | #define MAX_DRV_CTL_DATA 64 |
| 74 | |
| 75 | #define CNIC_CTL_STOP_CMD 1 |
| 76 | #define CNIC_CTL_START_CMD 2 |
| 77 | #define CNIC_CTL_COMPLETION_CMD 3 |
| 78 | |
| 79 | #define DRV_CTL_IO_WR_CMD 0x101 |
| 80 | #define DRV_CTL_IO_RD_CMD 0x102 |
| 81 | #define DRV_CTL_CTX_WR_CMD 0x103 |
| 82 | #define DRV_CTL_CTXTBL_WR_CMD 0x104 |
| 83 | #define DRV_CTL_COMPLETION_CMD 0x105 |
Michael Chan | 993ac7b | 2009-10-10 13:46:56 +0000 | [diff] [blame] | 84 | #define DRV_CTL_START_L2_CMD 0x106 |
| 85 | #define DRV_CTL_STOP_L2_CMD 0x107 |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 86 | |
| 87 | struct cnic_ctl_completion { |
| 88 | u32 cid; |
| 89 | }; |
| 90 | |
| 91 | struct drv_ctl_completion { |
| 92 | u32 comp_count; |
| 93 | }; |
| 94 | |
| 95 | struct cnic_ctl_info { |
| 96 | int cmd; |
| 97 | union { |
| 98 | struct cnic_ctl_completion comp; |
| 99 | char bytes[MAX_CNIC_CTL_DATA]; |
| 100 | } data; |
| 101 | }; |
| 102 | |
| 103 | struct drv_ctl_io { |
| 104 | u32 cid_addr; |
| 105 | u32 offset; |
| 106 | u32 data; |
| 107 | dma_addr_t dma_addr; |
| 108 | }; |
| 109 | |
Michael Chan | 993ac7b | 2009-10-10 13:46:56 +0000 | [diff] [blame] | 110 | struct drv_ctl_l2_ring { |
| 111 | u32 client_id; |
| 112 | u32 cid; |
| 113 | }; |
| 114 | |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 115 | struct drv_ctl_info { |
| 116 | int cmd; |
| 117 | union { |
| 118 | struct drv_ctl_completion comp; |
| 119 | struct drv_ctl_io io; |
Michael Chan | 993ac7b | 2009-10-10 13:46:56 +0000 | [diff] [blame] | 120 | struct drv_ctl_l2_ring ring; |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 121 | char bytes[MAX_DRV_CTL_DATA]; |
| 122 | } data; |
| 123 | }; |
| 124 | |
| 125 | struct cnic_ops { |
| 126 | struct module *cnic_owner; |
| 127 | /* Calls to these functions are protected by RCU. When |
| 128 | * unregistering, we wait for any calls to complete before |
| 129 | * continuing. |
| 130 | */ |
| 131 | int (*cnic_handler)(void *, void *); |
| 132 | int (*cnic_ctl)(void *, struct cnic_ctl_info *); |
| 133 | }; |
| 134 | |
| 135 | #define MAX_CNIC_VEC 8 |
| 136 | |
| 137 | struct cnic_irq { |
| 138 | unsigned int vector; |
| 139 | void *status_blk; |
| 140 | u32 status_blk_num; |
| 141 | u32 irq_flags; |
| 142 | #define CNIC_IRQ_FL_MSIX 0x00000001 |
| 143 | }; |
| 144 | |
| 145 | struct cnic_eth_dev { |
| 146 | struct module *drv_owner; |
| 147 | u32 drv_state; |
| 148 | #define CNIC_DRV_STATE_REGD 0x00000001 |
| 149 | #define CNIC_DRV_STATE_USING_MSIX 0x00000002 |
| 150 | u32 chip_id; |
| 151 | u32 max_kwqe_pending; |
| 152 | struct pci_dev *pdev; |
| 153 | void __iomem *io_base; |
Michael Chan | 993ac7b | 2009-10-10 13:46:56 +0000 | [diff] [blame] | 154 | void __iomem *io_base2; |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 155 | |
| 156 | u32 ctx_tbl_offset; |
| 157 | u32 ctx_tbl_len; |
| 158 | int ctx_blk_size; |
| 159 | u32 starting_cid; |
| 160 | u32 max_iscsi_conn; |
| 161 | u32 max_fcoe_conn; |
| 162 | u32 max_rdma_conn; |
| 163 | u32 reserved0[2]; |
| 164 | |
| 165 | int num_irq; |
| 166 | struct cnic_irq irq_arr[MAX_CNIC_VEC]; |
| 167 | int (*drv_register_cnic)(struct net_device *, |
| 168 | struct cnic_ops *, void *); |
| 169 | int (*drv_unregister_cnic)(struct net_device *); |
| 170 | int (*drv_submit_kwqes_32)(struct net_device *, |
| 171 | struct kwqe *[], u32); |
| 172 | int (*drv_submit_kwqes_16)(struct net_device *, |
| 173 | struct kwqe_16 *[], u32); |
| 174 | int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); |
| 175 | unsigned long reserved1[2]; |
| 176 | }; |
| 177 | |
| 178 | struct cnic_sockaddr { |
| 179 | union { |
| 180 | struct sockaddr_in v4; |
| 181 | struct sockaddr_in6 v6; |
| 182 | } local; |
| 183 | union { |
| 184 | struct sockaddr_in v4; |
| 185 | struct sockaddr_in6 v6; |
| 186 | } remote; |
| 187 | }; |
| 188 | |
| 189 | struct cnic_sock { |
| 190 | struct cnic_dev *dev; |
| 191 | void *context; |
| 192 | u32 src_ip[4]; |
| 193 | u32 dst_ip[4]; |
| 194 | u16 src_port; |
| 195 | u16 dst_port; |
| 196 | u16 vlan_id; |
| 197 | unsigned char old_ha[6]; |
| 198 | unsigned char ha[6]; |
| 199 | u32 mtu; |
| 200 | u32 cid; |
| 201 | u32 l5_cid; |
| 202 | u32 pg_cid; |
| 203 | int ulp_type; |
| 204 | |
| 205 | u32 ka_timeout; |
| 206 | u32 ka_interval; |
| 207 | u8 ka_max_probe_count; |
| 208 | u8 tos; |
| 209 | u8 ttl; |
| 210 | u8 snd_seq_scale; |
| 211 | u32 rcv_buf; |
| 212 | u32 snd_buf; |
| 213 | u32 seed; |
| 214 | |
| 215 | unsigned long tcp_flags; |
| 216 | #define SK_TCP_NO_DELAY_ACK 0x1 |
| 217 | #define SK_TCP_KEEP_ALIVE 0x2 |
| 218 | #define SK_TCP_NAGLE 0x4 |
| 219 | #define SK_TCP_TIMESTAMP 0x8 |
| 220 | #define SK_TCP_SACK 0x10 |
| 221 | #define SK_TCP_SEG_SCALING 0x20 |
| 222 | unsigned long flags; |
| 223 | #define SK_F_INUSE 0 |
| 224 | #define SK_F_OFFLD_COMPLETE 1 |
| 225 | #define SK_F_OFFLD_SCHED 2 |
| 226 | #define SK_F_PG_OFFLD_COMPLETE 3 |
| 227 | #define SK_F_CONNECT_START 4 |
| 228 | #define SK_F_IPV6 5 |
| 229 | #define SK_F_CLOSING 7 |
| 230 | |
| 231 | atomic_t ref_count; |
| 232 | u32 state; |
| 233 | struct kwqe kwqe1; |
| 234 | struct kwqe kwqe2; |
| 235 | struct kwqe kwqe3; |
| 236 | }; |
| 237 | |
| 238 | struct cnic_dev { |
| 239 | struct net_device *netdev; |
| 240 | struct pci_dev *pcidev; |
| 241 | void __iomem *regview; |
| 242 | struct list_head list; |
| 243 | |
| 244 | int (*register_device)(struct cnic_dev *dev, int ulp_type, |
| 245 | void *ulp_ctx); |
| 246 | int (*unregister_device)(struct cnic_dev *dev, int ulp_type); |
| 247 | int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], |
| 248 | u32 num_wqes); |
| 249 | int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], |
| 250 | u32 num_wqes); |
| 251 | |
| 252 | int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, |
| 253 | void *); |
| 254 | int (*cm_destroy)(struct cnic_sock *); |
| 255 | int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); |
| 256 | int (*cm_abort)(struct cnic_sock *); |
| 257 | int (*cm_close)(struct cnic_sock *); |
| 258 | struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); |
| 259 | int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, |
| 260 | char *data, u16 data_size); |
| 261 | unsigned long flags; |
| 262 | #define CNIC_F_CNIC_UP 1 |
| 263 | #define CNIC_F_BNX2_CLASS 3 |
| 264 | #define CNIC_F_BNX2X_CLASS 4 |
| 265 | atomic_t ref_count; |
| 266 | u8 mac_addr[6]; |
| 267 | |
| 268 | int max_iscsi_conn; |
| 269 | int max_fcoe_conn; |
| 270 | int max_rdma_conn; |
| 271 | |
| 272 | void *cnic_priv; |
| 273 | }; |
| 274 | |
| 275 | #define CNIC_WR(dev, off, val) writel(val, dev->regview + off) |
| 276 | #define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) |
| 277 | #define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) |
| 278 | #define CNIC_RD(dev, off) readl(dev->regview + off) |
| 279 | #define CNIC_RD16(dev, off) readw(dev->regview + off) |
| 280 | |
| 281 | struct cnic_ulp_ops { |
| 282 | /* Calls to these functions are protected by RCU. When |
| 283 | * unregistering, we wait for any calls to complete before |
| 284 | * continuing. |
| 285 | */ |
| 286 | |
| 287 | void (*cnic_init)(struct cnic_dev *dev); |
| 288 | void (*cnic_exit)(struct cnic_dev *dev); |
| 289 | void (*cnic_start)(void *ulp_ctx); |
| 290 | void (*cnic_stop)(void *ulp_ctx); |
| 291 | void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], |
| 292 | u32 num_cqes); |
| 293 | void (*indicate_netevent)(void *ulp_ctx, unsigned long event); |
| 294 | void (*cm_connect_complete)(struct cnic_sock *); |
| 295 | void (*cm_close_complete)(struct cnic_sock *); |
| 296 | void (*cm_abort_complete)(struct cnic_sock *); |
| 297 | void (*cm_remote_close)(struct cnic_sock *); |
| 298 | void (*cm_remote_abort)(struct cnic_sock *); |
| 299 | void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, |
| 300 | char *data, u16 data_size); |
| 301 | struct module *owner; |
Michael Chan | 7fc1ece | 2009-08-14 15:49:47 +0000 | [diff] [blame] | 302 | atomic_t ref_count; |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 303 | }; |
| 304 | |
| 305 | extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); |
| 306 | |
| 307 | extern int cnic_unregister_driver(int ulp_type); |
| 308 | |
Michael Chan | e2ee361 | 2009-06-13 17:43:02 -0700 | [diff] [blame] | 309 | extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); |
Michael Chan | 993ac7b | 2009-10-10 13:46:56 +0000 | [diff] [blame] | 310 | extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); |
Michael Chan | e2ee361 | 2009-06-13 17:43:02 -0700 | [diff] [blame] | 311 | |
Michael Chan | a463696 | 2009-06-08 18:14:43 -0700 | [diff] [blame] | 312 | #endif |