blob: 0dbeaec4f03a6888ac22dc350c2c2e2e0c49001e [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic_if.h: Broadcom CNIC core network driver.
2 *
Michael Chan1d9cfc42010-02-24 14:42:09 +00003 * Copyright (c) 2006-2010 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
Michael Chanee87a822010-10-13 14:06:51 +000015#define CNIC_MODULE_VERSION "2.2.6"
16#define CNIC_MODULE_RELDATE "Oct 12, 2010"
Michael Chana4636962009-06-08 18:14:43 -070017
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000083#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
Michael Chan993ac7b2009-10-10 13:46:56 +000084#define DRV_CTL_START_L2_CMD 0x106
85#define DRV_CTL_STOP_L2_CMD 0x107
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000086#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
Michael Chana4636962009-06-08 18:14:43 -070087
88struct cnic_ctl_completion {
89 u32 cid;
90};
91
Michael Chana4636962009-06-08 18:14:43 -070092struct cnic_ctl_info {
93 int cmd;
94 union {
95 struct cnic_ctl_completion comp;
96 char bytes[MAX_CNIC_CTL_DATA];
97 } data;
98};
99
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000100struct drv_ctl_spq_credit {
101 u32 credit_count;
102};
103
Michael Chana4636962009-06-08 18:14:43 -0700104struct drv_ctl_io {
105 u32 cid_addr;
106 u32 offset;
107 u32 data;
108 dma_addr_t dma_addr;
109};
110
Michael Chan993ac7b2009-10-10 13:46:56 +0000111struct drv_ctl_l2_ring {
112 u32 client_id;
113 u32 cid;
114};
115
Michael Chana4636962009-06-08 18:14:43 -0700116struct drv_ctl_info {
117 int cmd;
118 union {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000119 struct drv_ctl_spq_credit credit;
Michael Chana4636962009-06-08 18:14:43 -0700120 struct drv_ctl_io io;
Michael Chan993ac7b2009-10-10 13:46:56 +0000121 struct drv_ctl_l2_ring ring;
Michael Chana4636962009-06-08 18:14:43 -0700122 char bytes[MAX_DRV_CTL_DATA];
123 } data;
124};
125
126struct cnic_ops {
127 struct module *cnic_owner;
128 /* Calls to these functions are protected by RCU. When
129 * unregistering, we wait for any calls to complete before
130 * continuing.
131 */
132 int (*cnic_handler)(void *, void *);
133 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
134};
135
136#define MAX_CNIC_VEC 8
137
138struct cnic_irq {
139 unsigned int vector;
140 void *status_blk;
141 u32 status_blk_num;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000142 u32 status_blk_num2;
Michael Chana4636962009-06-08 18:14:43 -0700143 u32 irq_flags;
144#define CNIC_IRQ_FL_MSIX 0x00000001
145};
146
147struct cnic_eth_dev {
148 struct module *drv_owner;
149 u32 drv_state;
150#define CNIC_DRV_STATE_REGD 0x00000001
151#define CNIC_DRV_STATE_USING_MSIX 0x00000002
152 u32 chip_id;
153 u32 max_kwqe_pending;
154 struct pci_dev *pdev;
155 void __iomem *io_base;
Michael Chan993ac7b2009-10-10 13:46:56 +0000156 void __iomem *io_base2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000157 void *iro_arr;
Michael Chana4636962009-06-08 18:14:43 -0700158
159 u32 ctx_tbl_offset;
160 u32 ctx_tbl_len;
161 int ctx_blk_size;
162 u32 starting_cid;
163 u32 max_iscsi_conn;
164 u32 max_fcoe_conn;
165 u32 max_rdma_conn;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000166 u32 fcoe_init_cid;
167 u16 iscsi_l2_client_id;
168 u16 iscsi_l2_cid;
Michael Chana4636962009-06-08 18:14:43 -0700169
170 int num_irq;
171 struct cnic_irq irq_arr[MAX_CNIC_VEC];
172 int (*drv_register_cnic)(struct net_device *,
173 struct cnic_ops *, void *);
174 int (*drv_unregister_cnic)(struct net_device *);
175 int (*drv_submit_kwqes_32)(struct net_device *,
176 struct kwqe *[], u32);
177 int (*drv_submit_kwqes_16)(struct net_device *,
178 struct kwqe_16 *[], u32);
179 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
180 unsigned long reserved1[2];
181};
182
183struct cnic_sockaddr {
184 union {
185 struct sockaddr_in v4;
186 struct sockaddr_in6 v6;
187 } local;
188 union {
189 struct sockaddr_in v4;
190 struct sockaddr_in6 v6;
191 } remote;
192};
193
194struct cnic_sock {
195 struct cnic_dev *dev;
196 void *context;
197 u32 src_ip[4];
198 u32 dst_ip[4];
199 u16 src_port;
200 u16 dst_port;
201 u16 vlan_id;
202 unsigned char old_ha[6];
203 unsigned char ha[6];
204 u32 mtu;
205 u32 cid;
206 u32 l5_cid;
207 u32 pg_cid;
208 int ulp_type;
209
210 u32 ka_timeout;
211 u32 ka_interval;
212 u8 ka_max_probe_count;
213 u8 tos;
214 u8 ttl;
215 u8 snd_seq_scale;
216 u32 rcv_buf;
217 u32 snd_buf;
218 u32 seed;
219
220 unsigned long tcp_flags;
221#define SK_TCP_NO_DELAY_ACK 0x1
222#define SK_TCP_KEEP_ALIVE 0x2
223#define SK_TCP_NAGLE 0x4
224#define SK_TCP_TIMESTAMP 0x8
225#define SK_TCP_SACK 0x10
226#define SK_TCP_SEG_SCALING 0x20
227 unsigned long flags;
228#define SK_F_INUSE 0
229#define SK_F_OFFLD_COMPLETE 1
230#define SK_F_OFFLD_SCHED 2
231#define SK_F_PG_OFFLD_COMPLETE 3
232#define SK_F_CONNECT_START 4
233#define SK_F_IPV6 5
234#define SK_F_CLOSING 7
235
236 atomic_t ref_count;
237 u32 state;
238 struct kwqe kwqe1;
239 struct kwqe kwqe2;
240 struct kwqe kwqe3;
241};
242
243struct cnic_dev {
244 struct net_device *netdev;
245 struct pci_dev *pcidev;
246 void __iomem *regview;
247 struct list_head list;
248
249 int (*register_device)(struct cnic_dev *dev, int ulp_type,
250 void *ulp_ctx);
251 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
252 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
253 u32 num_wqes);
254 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
255 u32 num_wqes);
256
257 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
258 void *);
259 int (*cm_destroy)(struct cnic_sock *);
260 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
261 int (*cm_abort)(struct cnic_sock *);
262 int (*cm_close)(struct cnic_sock *);
263 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
264 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
265 char *data, u16 data_size);
266 unsigned long flags;
267#define CNIC_F_CNIC_UP 1
268#define CNIC_F_BNX2_CLASS 3
269#define CNIC_F_BNX2X_CLASS 4
270 atomic_t ref_count;
271 u8 mac_addr[6];
272
273 int max_iscsi_conn;
274 int max_fcoe_conn;
275 int max_rdma_conn;
276
277 void *cnic_priv;
278};
279
280#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
281#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
282#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
283#define CNIC_RD(dev, off) readl(dev->regview + off)
284#define CNIC_RD16(dev, off) readw(dev->regview + off)
285
286struct cnic_ulp_ops {
287 /* Calls to these functions are protected by RCU. When
288 * unregistering, we wait for any calls to complete before
289 * continuing.
290 */
291
292 void (*cnic_init)(struct cnic_dev *dev);
293 void (*cnic_exit)(struct cnic_dev *dev);
294 void (*cnic_start)(void *ulp_ctx);
295 void (*cnic_stop)(void *ulp_ctx);
296 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
297 u32 num_cqes);
298 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
299 void (*cm_connect_complete)(struct cnic_sock *);
300 void (*cm_close_complete)(struct cnic_sock *);
301 void (*cm_abort_complete)(struct cnic_sock *);
302 void (*cm_remote_close)(struct cnic_sock *);
303 void (*cm_remote_abort)(struct cnic_sock *);
304 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
305 char *data, u16 data_size);
306 struct module *owner;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000307 atomic_t ref_count;
Michael Chana4636962009-06-08 18:14:43 -0700308};
309
310extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
311
312extern int cnic_unregister_driver(int ulp_type);
313
Michael Chane2ee3612009-06-13 17:43:02 -0700314extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
Michael Chan993ac7b2009-10-10 13:46:56 +0000315extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
Michael Chane2ee3612009-06-13 17:43:02 -0700316
Michael Chana4636962009-06-08 18:14:43 -0700317#endif