blob: 5f4d5573a73dbb8252d34ec6419750c3ec06eee1 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic_if.h: Broadcom CNIC core network driver.
2 *
Michael Chanc3661282014-03-17 19:19:08 -08003 * Copyright (c) 2006-2014 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
Barak Witkowski2e499d32012-06-26 01:31:19 +000015#include "bnx2x/bnx2x_mfw_req.h"
16
Michael Chanc3661282014-03-17 19:19:08 -080017#define CNIC_MODULE_VERSION "2.5.20"
18#define CNIC_MODULE_RELDATE "March 14, 2014"
Michael Chana4636962009-06-08 18:14:43 -070019
20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1
Michael Chane1928c82010-12-23 07:43:04 +000022#define CNIC_ULP_FCOE 2
23#define CNIC_ULP_L4 3
24#define MAX_CNIC_ULP_TYPE_EXT 3
25#define MAX_CNIC_ULP_TYPE 4
Michael Chana4636962009-06-08 18:14:43 -070026
Michael Chanbe1fefc2014-03-17 19:19:07 -080027/* Use CPU native page size up to 16K for cnic ring sizes. */
28#if (PAGE_SHIFT > 14)
29#define CNIC_PAGE_BITS 14
30#else
31#define CNIC_PAGE_BITS PAGE_SHIFT
32#endif
33#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
34#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
35#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
36
Michael Chana4636962009-06-08 18:14:43 -070037struct kwqe {
38 u32 kwqe_op_flag;
39
Michael Chane1928c82010-12-23 07:43:04 +000040#define KWQE_QID_SHIFT 8
Michael Chana4636962009-06-08 18:14:43 -070041#define KWQE_OPCODE_MASK 0x00ff0000
42#define KWQE_OPCODE_SHIFT 16
Michael Chana4636962009-06-08 18:14:43 -070043#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
Michael Chane1928c82010-12-23 07:43:04 +000044#define KWQE_LAYER_MASK 0x70000000
45#define KWQE_LAYER_SHIFT 28
46#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
47#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
48#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
49#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
50#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
51#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
Michael Chana4636962009-06-08 18:14:43 -070052
53 u32 kwqe_info0;
54 u32 kwqe_info1;
55 u32 kwqe_info2;
56 u32 kwqe_info3;
57 u32 kwqe_info4;
58 u32 kwqe_info5;
59 u32 kwqe_info6;
60};
61
62struct kwqe_16 {
63 u32 kwqe_info0;
64 u32 kwqe_info1;
65 u32 kwqe_info2;
66 u32 kwqe_info3;
67};
68
69struct kcqe {
70 u32 kcqe_info0;
71 u32 kcqe_info1;
72 u32 kcqe_info2;
73 u32 kcqe_info3;
74 u32 kcqe_info4;
75 u32 kcqe_info5;
76 u32 kcqe_info6;
77 u32 kcqe_op_flag;
78 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
79 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
80 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
81 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
82 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
83 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
84 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
85 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
Michael Chane1928c82010-12-23 07:43:04 +000086 #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
Michael Chana4636962009-06-08 18:14:43 -070087 #define KCQE_FLAGS_NEXT (1<<31)
88 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
89 #define KCQE_FLAGS_OPCODE_SHIFT (16)
90 #define KCQE_OPCODE(op) \
91 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
92};
93
94#define MAX_CNIC_CTL_DATA 64
95#define MAX_DRV_CTL_DATA 64
96
97#define CNIC_CTL_STOP_CMD 1
98#define CNIC_CTL_START_CMD 2
99#define CNIC_CTL_COMPLETION_CMD 3
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -0700100#define CNIC_CTL_STOP_ISCSI_CMD 4
Barak Witkowski1d187b32011-12-05 22:41:50 +0000101#define CNIC_CTL_FCOE_STATS_GET_CMD 5
102#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
Michael Chana4636962009-06-08 18:14:43 -0700103
104#define DRV_CTL_IO_WR_CMD 0x101
105#define DRV_CTL_IO_RD_CMD 0x102
106#define DRV_CTL_CTX_WR_CMD 0x103
107#define DRV_CTL_CTXTBL_WR_CMD 0x104
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000108#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
Michael Chan993ac7b2009-10-10 13:46:56 +0000109#define DRV_CTL_START_L2_CMD 0x106
110#define DRV_CTL_STOP_L2_CMD 0x107
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000111#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -0700112#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
Barak Witkowski1d187b32011-12-05 22:41:50 +0000113#define DRV_CTL_ULP_REGISTER_CMD 0x10e
114#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
Michael Chana4636962009-06-08 18:14:43 -0700115
116struct cnic_ctl_completion {
117 u32 cid;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 u8 opcode;
119 u8 error;
Michael Chana4636962009-06-08 18:14:43 -0700120};
121
Michael Chana4636962009-06-08 18:14:43 -0700122struct cnic_ctl_info {
123 int cmd;
124 union {
125 struct cnic_ctl_completion comp;
126 char bytes[MAX_CNIC_CTL_DATA];
127 } data;
128};
129
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000130struct drv_ctl_spq_credit {
131 u32 credit_count;
132};
133
Michael Chana4636962009-06-08 18:14:43 -0700134struct drv_ctl_io {
135 u32 cid_addr;
136 u32 offset;
137 u32 data;
138 dma_addr_t dma_addr;
139};
140
Michael Chan993ac7b2009-10-10 13:46:56 +0000141struct drv_ctl_l2_ring {
142 u32 client_id;
143 u32 cid;
144};
145
Barak Witkowski2e499d32012-06-26 01:31:19 +0000146struct drv_ctl_register_data {
147 int ulp_type;
148 struct fcoe_capabilities fcoe_features;
149};
150
Michael Chana4636962009-06-08 18:14:43 -0700151struct drv_ctl_info {
152 int cmd;
153 union {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000154 struct drv_ctl_spq_credit credit;
Michael Chana4636962009-06-08 18:14:43 -0700155 struct drv_ctl_io io;
Michael Chan993ac7b2009-10-10 13:46:56 +0000156 struct drv_ctl_l2_ring ring;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000157 int ulp_type;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000158 struct drv_ctl_register_data register_data;
Michael Chana4636962009-06-08 18:14:43 -0700159 char bytes[MAX_DRV_CTL_DATA];
160 } data;
161};
162
163struct cnic_ops {
164 struct module *cnic_owner;
165 /* Calls to these functions are protected by RCU. When
166 * unregistering, we wait for any calls to complete before
167 * continuing.
168 */
169 int (*cnic_handler)(void *, void *);
170 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
171};
172
173#define MAX_CNIC_VEC 8
174
175struct cnic_irq {
176 unsigned int vector;
177 void *status_blk;
178 u32 status_blk_num;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000179 u32 status_blk_num2;
Michael Chana4636962009-06-08 18:14:43 -0700180 u32 irq_flags;
181#define CNIC_IRQ_FL_MSIX 0x00000001
182};
183
184struct cnic_eth_dev {
185 struct module *drv_owner;
186 u32 drv_state;
187#define CNIC_DRV_STATE_REGD 0x00000001
188#define CNIC_DRV_STATE_USING_MSIX 0x00000002
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +0000189#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
190#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
191#define CNIC_DRV_STATE_NO_FCOE 0x00000010
Michael Chanad9b4352013-01-23 03:21:52 +0000192#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020
Michael Chana4636962009-06-08 18:14:43 -0700193 u32 chip_id;
194 u32 max_kwqe_pending;
195 struct pci_dev *pdev;
196 void __iomem *io_base;
Michael Chan993ac7b2009-10-10 13:46:56 +0000197 void __iomem *io_base2;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300198 const void *iro_arr;
Michael Chana4636962009-06-08 18:14:43 -0700199
200 u32 ctx_tbl_offset;
201 u32 ctx_tbl_len;
202 int ctx_blk_size;
203 u32 starting_cid;
204 u32 max_iscsi_conn;
205 u32 max_fcoe_conn;
206 u32 max_rdma_conn;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000207 u32 fcoe_init_cid;
Bhanu Prakash Gollapudi0eb43b42013-04-22 19:22:30 +0000208 u32 max_fcoe_exchanges;
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +0000209 u32 fcoe_wwn_port_name_hi;
210 u32 fcoe_wwn_port_name_lo;
211 u32 fcoe_wwn_node_name_hi;
212 u32 fcoe_wwn_node_name_lo;
213
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000214 u16 iscsi_l2_client_id;
215 u16 iscsi_l2_cid;
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +0000216 u8 iscsi_mac[ETH_ALEN];
Michael Chana4636962009-06-08 18:14:43 -0700217
218 int num_irq;
219 struct cnic_irq irq_arr[MAX_CNIC_VEC];
220 int (*drv_register_cnic)(struct net_device *,
221 struct cnic_ops *, void *);
222 int (*drv_unregister_cnic)(struct net_device *);
223 int (*drv_submit_kwqes_32)(struct net_device *,
224 struct kwqe *[], u32);
225 int (*drv_submit_kwqes_16)(struct net_device *,
226 struct kwqe_16 *[], u32);
227 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
228 unsigned long reserved1[2];
Barak Witkowski1d187b32011-12-05 22:41:50 +0000229 union drv_info_to_mcp *addr_drv_info_to_mcp;
Michael Chana4636962009-06-08 18:14:43 -0700230};
231
232struct cnic_sockaddr {
233 union {
234 struct sockaddr_in v4;
235 struct sockaddr_in6 v6;
236 } local;
237 union {
238 struct sockaddr_in v4;
239 struct sockaddr_in6 v6;
240 } remote;
241};
242
243struct cnic_sock {
244 struct cnic_dev *dev;
245 void *context;
246 u32 src_ip[4];
247 u32 dst_ip[4];
248 u16 src_port;
249 u16 dst_port;
250 u16 vlan_id;
Joe Perches1409a932013-08-01 16:17:49 -0700251 unsigned char old_ha[ETH_ALEN];
252 unsigned char ha[ETH_ALEN];
Michael Chana4636962009-06-08 18:14:43 -0700253 u32 mtu;
254 u32 cid;
255 u32 l5_cid;
256 u32 pg_cid;
257 int ulp_type;
258
259 u32 ka_timeout;
260 u32 ka_interval;
261 u8 ka_max_probe_count;
262 u8 tos;
263 u8 ttl;
264 u8 snd_seq_scale;
265 u32 rcv_buf;
266 u32 snd_buf;
267 u32 seed;
268
269 unsigned long tcp_flags;
270#define SK_TCP_NO_DELAY_ACK 0x1
271#define SK_TCP_KEEP_ALIVE 0x2
272#define SK_TCP_NAGLE 0x4
273#define SK_TCP_TIMESTAMP 0x8
274#define SK_TCP_SACK 0x10
275#define SK_TCP_SEG_SCALING 0x20
276 unsigned long flags;
277#define SK_F_INUSE 0
278#define SK_F_OFFLD_COMPLETE 1
279#define SK_F_OFFLD_SCHED 2
280#define SK_F_PG_OFFLD_COMPLETE 3
281#define SK_F_CONNECT_START 4
282#define SK_F_IPV6 5
283#define SK_F_CLOSING 7
Michael Chan23021c22012-01-04 12:12:28 +0000284#define SK_F_HW_ERR 8
Michael Chana4636962009-06-08 18:14:43 -0700285
286 atomic_t ref_count;
287 u32 state;
288 struct kwqe kwqe1;
289 struct kwqe kwqe2;
290 struct kwqe kwqe3;
291};
292
293struct cnic_dev {
294 struct net_device *netdev;
295 struct pci_dev *pcidev;
296 void __iomem *regview;
297 struct list_head list;
298
299 int (*register_device)(struct cnic_dev *dev, int ulp_type,
300 void *ulp_ctx);
301 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
302 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
303 u32 num_wqes);
304 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
305 u32 num_wqes);
306
307 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
308 void *);
309 int (*cm_destroy)(struct cnic_sock *);
310 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
311 int (*cm_abort)(struct cnic_sock *);
312 int (*cm_close)(struct cnic_sock *);
313 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
314 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
315 char *data, u16 data_size);
316 unsigned long flags;
317#define CNIC_F_CNIC_UP 1
318#define CNIC_F_BNX2_CLASS 3
319#define CNIC_F_BNX2X_CLASS 4
320 atomic_t ref_count;
Joe Perches1409a932013-08-01 16:17:49 -0700321 u8 mac_addr[ETH_ALEN];
Michael Chana4636962009-06-08 18:14:43 -0700322
323 int max_iscsi_conn;
324 int max_fcoe_conn;
325 int max_rdma_conn;
326
Bhanu Prakash Gollapudi0eb43b42013-04-22 19:22:30 +0000327 int max_fcoe_exchanges;
328
Barak Witkowski1d187b32011-12-05 22:41:50 +0000329 union drv_info_to_mcp *stats_addr;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000330 struct fcoe_capabilities *fcoe_cap;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000331
Michael Chana4636962009-06-08 18:14:43 -0700332 void *cnic_priv;
333};
334
335#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
336#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
337#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
338#define CNIC_RD(dev, off) readl(dev->regview + off)
339#define CNIC_RD16(dev, off) readw(dev->regview + off)
340
341struct cnic_ulp_ops {
342 /* Calls to these functions are protected by RCU. When
343 * unregistering, we wait for any calls to complete before
344 * continuing.
345 */
346
347 void (*cnic_init)(struct cnic_dev *dev);
348 void (*cnic_exit)(struct cnic_dev *dev);
349 void (*cnic_start)(void *ulp_ctx);
350 void (*cnic_stop)(void *ulp_ctx);
351 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
352 u32 num_cqes);
Michael Chan415199f2011-07-20 14:55:24 +0000353 void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
Michael Chana4636962009-06-08 18:14:43 -0700354 void (*cm_connect_complete)(struct cnic_sock *);
355 void (*cm_close_complete)(struct cnic_sock *);
356 void (*cm_abort_complete)(struct cnic_sock *);
357 void (*cm_remote_close)(struct cnic_sock *);
358 void (*cm_remote_abort)(struct cnic_sock *);
Michael Chan939b82e2010-12-23 07:42:58 +0000359 int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
Michael Chana4636962009-06-08 18:14:43 -0700360 char *data, u16 data_size);
Barak Witkowski1d187b32011-12-05 22:41:50 +0000361 int (*cnic_get_stats)(void *ulp_ctx);
Michael Chana4636962009-06-08 18:14:43 -0700362 struct module *owner;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000363 atomic_t ref_count;
Michael Chana4636962009-06-08 18:14:43 -0700364};
365
Joe Perches1ee17252013-09-23 15:11:33 -0700366int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -0700367
Joe Perches1ee17252013-09-23 15:11:33 -0700368int cnic_unregister_driver(int ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700369
Michael Chana4636962009-06-08 18:14:43 -0700370#endif