blob: b8650939a4bec1668b55fb1a35a7c10337211229 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chan3238a9b2012-02-05 15:24:40 +00003 * Copyright (c) 2006-2012 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040030#include <linux/prefetch.h>
Michael Chan973e5742011-07-13 17:24:17 +000031#include <linux/random.h>
Michael Chana4636962009-06-08 18:14:43 -070032#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33#define BCM_VLAN 1
34#endif
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/route.h>
38#include <net/ipv6.h>
39#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070040#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070041#include <scsi/iscsi_if.h>
42
43#include "cnic_if.h"
44#include "bnx2.h"
Michael Chan68c64d22012-12-06 10:33:11 +000045#include "bnx2x/bnx2x.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000046#include "bnx2x/bnx2x_reg.h"
47#include "bnx2x/bnx2x_fw_defs.h"
48#include "bnx2x/bnx2x_hsi.h"
Jeff Kirsheradfc5212011-04-07 06:03:04 -070049#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
50#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chan8ec3e702012-03-21 15:38:34 +000051#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
Michael Chana4636962009-06-08 18:14:43 -070052#include "cnic.h"
53#include "cnic_defs.h"
54
Michael Chan68c64d22012-12-06 10:33:11 +000055#define CNIC_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070056
Bill Pemberton047fc562012-12-03 09:24:23 -050057static char version[] =
Michael Chan68c64d22012-12-06 10:33:11 +000058 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
Michael Chana4636962009-06-08 18:14:43 -070059
60MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
61 "Chen (zongxi@broadcom.com");
62MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
63MODULE_LICENSE("GPL");
64MODULE_VERSION(CNIC_MODULE_VERSION);
65
Michael Chan8adc92402010-12-23 07:42:57 +000066/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070067static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000068static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070069static DEFINE_RWLOCK(cnic_dev_lock);
70static DEFINE_MUTEX(cnic_lock);
71
Eric Dumazet13707f92011-01-26 19:28:23 +000072static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
73
74/* helper function, assuming cnic_lock is held */
75static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
76{
77 return rcu_dereference_protected(cnic_ulp_tbl[type],
78 lockdep_is_held(&cnic_lock));
79}
Michael Chana4636962009-06-08 18:14:43 -070080
81static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000082static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070083static int cnic_ctl(void *, struct cnic_ctl_info *);
84
85static struct cnic_ops cnic_bnx2_ops = {
86 .cnic_owner = THIS_MODULE,
87 .cnic_handler = cnic_service_bnx2,
88 .cnic_ctl = cnic_ctl,
89};
90
Michael Chan71034ba2009-10-10 13:46:59 +000091static struct cnic_ops cnic_bnx2x_ops = {
92 .cnic_owner = THIS_MODULE,
93 .cnic_handler = cnic_service_bnx2x,
94 .cnic_ctl = cnic_ctl,
95};
96
Michael Chanfdf24082010-10-13 14:06:47 +000097static struct workqueue_struct *cnic_wq;
98
Michael Chan86b53602009-10-10 13:46:57 +000099static void cnic_shutdown_rings(struct cnic_dev *);
100static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -0700101static int cnic_cm_set_pg(struct cnic_sock *);
102
103static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
104{
Michael Chancd801532010-10-13 14:06:49 +0000105 struct cnic_uio_dev *udev = uinfo->priv;
106 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -0700107
108 if (!capable(CAP_NET_ADMIN))
109 return -EPERM;
110
Michael Chancd801532010-10-13 14:06:49 +0000111 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700112 return -EBUSY;
113
Michael Chan86b53602009-10-10 13:46:57 +0000114 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000115 dev = udev->dev;
116
Michael Chana3ceeeb2010-10-13 14:06:50 +0000117 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000118 rtnl_unlock();
119 return -ENODEV;
120 }
121
Michael Chancd801532010-10-13 14:06:49 +0000122 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700123
Michael Chana3ceeeb2010-10-13 14:06:50 +0000124 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000125 cnic_init_rings(dev);
126 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700127
128 return 0;
129}
130
131static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
132{
Michael Chancd801532010-10-13 14:06:49 +0000133 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000134
Michael Chancd801532010-10-13 14:06:49 +0000135 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700136 return 0;
137}
138
139static inline void cnic_hold(struct cnic_dev *dev)
140{
141 atomic_inc(&dev->ref_count);
142}
143
144static inline void cnic_put(struct cnic_dev *dev)
145{
146 atomic_dec(&dev->ref_count);
147}
148
149static inline void csk_hold(struct cnic_sock *csk)
150{
151 atomic_inc(&csk->ref_count);
152}
153
154static inline void csk_put(struct cnic_sock *csk)
155{
156 atomic_dec(&csk->ref_count);
157}
158
159static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
160{
161 struct cnic_dev *cdev;
162
163 read_lock(&cnic_dev_lock);
164 list_for_each_entry(cdev, &cnic_dev_list, list) {
165 if (netdev == cdev->netdev) {
166 cnic_hold(cdev);
167 read_unlock(&cnic_dev_lock);
168 return cdev;
169 }
170 }
171 read_unlock(&cnic_dev_lock);
172 return NULL;
173}
174
Michael Chan7fc1ece2009-08-14 15:49:47 +0000175static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
176{
177 atomic_inc(&ulp_ops->ref_count);
178}
179
180static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
181{
182 atomic_dec(&ulp_ops->ref_count);
183}
184
Michael Chana4636962009-06-08 18:14:43 -0700185static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
186{
187 struct cnic_local *cp = dev->cnic_priv;
188 struct cnic_eth_dev *ethdev = cp->ethdev;
189 struct drv_ctl_info info;
190 struct drv_ctl_io *io = &info.data.io;
191
192 info.cmd = DRV_CTL_CTX_WR_CMD;
193 io->cid_addr = cid_addr;
194 io->offset = off;
195 io->data = val;
196 ethdev->drv_ctl(dev->netdev, &info);
197}
198
Michael Chan71034ba2009-10-10 13:46:59 +0000199static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
200{
201 struct cnic_local *cp = dev->cnic_priv;
202 struct cnic_eth_dev *ethdev = cp->ethdev;
203 struct drv_ctl_info info;
204 struct drv_ctl_io *io = &info.data.io;
205
206 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
207 io->offset = off;
208 io->dma_addr = addr;
209 ethdev->drv_ctl(dev->netdev, &info);
210}
211
212static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
213{
214 struct cnic_local *cp = dev->cnic_priv;
215 struct cnic_eth_dev *ethdev = cp->ethdev;
216 struct drv_ctl_info info;
217 struct drv_ctl_l2_ring *ring = &info.data.ring;
218
219 if (start)
220 info.cmd = DRV_CTL_START_L2_CMD;
221 else
222 info.cmd = DRV_CTL_STOP_L2_CMD;
223
224 ring->cid = cid;
225 ring->client_id = cl_id;
226 ethdev->drv_ctl(dev->netdev, &info);
227}
228
Michael Chana4636962009-06-08 18:14:43 -0700229static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
230{
231 struct cnic_local *cp = dev->cnic_priv;
232 struct cnic_eth_dev *ethdev = cp->ethdev;
233 struct drv_ctl_info info;
234 struct drv_ctl_io *io = &info.data.io;
235
236 info.cmd = DRV_CTL_IO_WR_CMD;
237 io->offset = off;
238 io->data = val;
239 ethdev->drv_ctl(dev->netdev, &info);
240}
241
242static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
243{
244 struct cnic_local *cp = dev->cnic_priv;
245 struct cnic_eth_dev *ethdev = cp->ethdev;
246 struct drv_ctl_info info;
247 struct drv_ctl_io *io = &info.data.io;
248
249 info.cmd = DRV_CTL_IO_RD_CMD;
250 io->offset = off;
251 ethdev->drv_ctl(dev->netdev, &info);
252 return io->data;
253}
254
Barak Witkowski1d187b32011-12-05 22:41:50 +0000255static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
256{
257 struct cnic_local *cp = dev->cnic_priv;
258 struct cnic_eth_dev *ethdev = cp->ethdev;
259 struct drv_ctl_info info;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000260 struct fcoe_capabilities *fcoe_cap =
261 &info.data.register_data.fcoe_features;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000262
Barak Witkowski2e499d32012-06-26 01:31:19 +0000263 if (reg) {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000264 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000265 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
266 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
267 } else {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000268 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000269 }
Barak Witkowski1d187b32011-12-05 22:41:50 +0000270
271 info.data.ulp_type = ulp_type;
272 ethdev->drv_ctl(dev->netdev, &info);
273}
274
Michael Chana4636962009-06-08 18:14:43 -0700275static int cnic_in_use(struct cnic_sock *csk)
276{
277 return test_bit(SK_F_INUSE, &csk->flags);
278}
279
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000280static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700281{
282 struct cnic_local *cp = dev->cnic_priv;
283 struct cnic_eth_dev *ethdev = cp->ethdev;
284 struct drv_ctl_info info;
285
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000286 info.cmd = cmd;
287 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700288 ethdev->drv_ctl(dev->netdev, &info);
289}
290
Michael Chan71034ba2009-10-10 13:46:59 +0000291static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
292{
293 u32 i;
294
Michael Chana2028b232012-06-27 15:08:19 +0000295 if (!cp->ctx_tbl)
296 return -EINVAL;
297
Michael Chan520efdf2010-06-24 14:58:37 +0000298 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000299 if (cp->ctx_tbl[i].cid == cid) {
300 *l5_cid = i;
301 return 0;
302 }
303 }
304 return -EINVAL;
305}
306
Michael Chana4636962009-06-08 18:14:43 -0700307static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
308 struct cnic_sock *csk)
309{
310 struct iscsi_path path_req;
311 char *buf = NULL;
312 u16 len = 0;
313 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
314 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000315 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000316 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700317
Michael Chancd801532010-10-13 14:06:49 +0000318 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700319 return -ENODEV;
320
321 if (csk) {
322 len = sizeof(path_req);
323 buf = (char *) &path_req;
324 memset(&path_req, 0, len);
325
326 msg_type = ISCSI_KEVENT_PATH_REQ;
327 path_req.handle = (u64) csk->l5_cid;
328 if (test_bit(SK_F_IPV6, &csk->flags)) {
329 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
330 sizeof(struct in6_addr));
331 path_req.ip_addr_len = 16;
332 } else {
333 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
334 sizeof(struct in_addr));
335 path_req.ip_addr_len = 4;
336 }
337 path_req.vlan_id = csk->vlan_id;
338 path_req.pmtu = csk->mtu;
339 }
340
Michael Chan939b82e2010-12-23 07:42:58 +0000341 while (retry < 3) {
342 rc = 0;
343 rcu_read_lock();
344 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
345 if (ulp_ops)
346 rc = ulp_ops->iscsi_nl_send_msg(
347 cp->ulp_handle[CNIC_ULP_ISCSI],
348 msg_type, buf, len);
349 rcu_read_unlock();
350 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
351 break;
352
353 msleep(100);
354 retry++;
355 }
Michael Chan558e4c72011-07-13 17:24:20 +0000356 return rc;
Michael Chana4636962009-06-08 18:14:43 -0700357}
358
Eddie Wai42ecbb82010-12-23 07:43:02 +0000359static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
360
Michael Chana4636962009-06-08 18:14:43 -0700361static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
362 char *buf, u16 len)
363{
364 int rc = -EINVAL;
365
366 switch (msg_type) {
367 case ISCSI_UEVENT_PATH_UPDATE: {
368 struct cnic_local *cp;
369 u32 l5_cid;
370 struct cnic_sock *csk;
371 struct iscsi_path *path_resp;
372
373 if (len < sizeof(*path_resp))
374 break;
375
376 path_resp = (struct iscsi_path *) buf;
377 cp = dev->cnic_priv;
378 l5_cid = (u32) path_resp->handle;
379 if (l5_cid >= MAX_CM_SK_TBL_SZ)
380 break;
381
Michael Chand02a5e62010-02-24 14:42:06 +0000382 rcu_read_lock();
383 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
384 rc = -ENODEV;
385 rcu_read_unlock();
386 break;
387 }
Michael Chana4636962009-06-08 18:14:43 -0700388 csk = &cp->csk_tbl[l5_cid];
389 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000390 if (cnic_in_use(csk) &&
391 test_bit(SK_F_CONNECT_START, &csk->flags)) {
392
Eddie Wai4cbbb042012-02-08 17:33:57 +0000393 csk->vlan_id = path_resp->vlan_id;
394
Michael Chana4636962009-06-08 18:14:43 -0700395 memcpy(csk->ha, path_resp->mac_addr, 6);
396 if (test_bit(SK_F_IPV6, &csk->flags))
397 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
398 sizeof(struct in6_addr));
399 else
400 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
401 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000402
403 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700404 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000405 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
406 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
407
408 cnic_cm_upcall(cp, csk,
409 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
410 clear_bit(SK_F_CONNECT_START, &csk->flags);
411 }
Michael Chana4636962009-06-08 18:14:43 -0700412 }
413 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000414 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700415 rc = 0;
416 }
417 }
418
419 return rc;
420}
421
422static int cnic_offld_prep(struct cnic_sock *csk)
423{
424 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
425 return 0;
426
427 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
428 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
429 return 0;
430 }
431
432 return 1;
433}
434
435static int cnic_close_prep(struct cnic_sock *csk)
436{
437 clear_bit(SK_F_CONNECT_START, &csk->flags);
438 smp_mb__after_clear_bit();
439
440 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
441 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
442 msleep(1);
443
444 return 1;
445 }
446 return 0;
447}
448
449static int cnic_abort_prep(struct cnic_sock *csk)
450{
451 clear_bit(SK_F_CONNECT_START, &csk->flags);
452 smp_mb__after_clear_bit();
453
454 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
455 msleep(1);
456
457 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
458 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
459 return 1;
460 }
461
462 return 0;
463}
464
465int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
466{
467 struct cnic_dev *dev;
468
roel kluin0d37f362009-11-02 06:53:44 +0000469 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000470 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700471 return -EINVAL;
472 }
473 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000474 if (cnic_ulp_tbl_prot(ulp_type)) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000475 pr_err("%s: Type %d has already been registered\n",
476 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700477 mutex_unlock(&cnic_lock);
478 return -EBUSY;
479 }
480
481 read_lock(&cnic_dev_lock);
482 list_for_each_entry(dev, &cnic_dev_list, list) {
483 struct cnic_local *cp = dev->cnic_priv;
484
485 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
486 }
487 read_unlock(&cnic_dev_lock);
488
Michael Chan7fc1ece2009-08-14 15:49:47 +0000489 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700490 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
491 mutex_unlock(&cnic_lock);
492
493 /* Prevent race conditions with netdev_event */
494 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700495 list_for_each_entry(dev, &cnic_dev_list, list) {
496 struct cnic_local *cp = dev->cnic_priv;
497
498 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
499 ulp_ops->cnic_init(dev);
500 }
Michael Chana4636962009-06-08 18:14:43 -0700501 rtnl_unlock();
502
503 return 0;
504}
505
506int cnic_unregister_driver(int ulp_type)
507{
508 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000509 struct cnic_ulp_ops *ulp_ops;
510 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700511
roel kluin0d37f362009-11-02 06:53:44 +0000512 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000513 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700514 return -EINVAL;
515 }
516 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000517 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000518 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000519 pr_err("%s: Type %d has not been registered\n",
520 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700521 goto out_unlock;
522 }
523 read_lock(&cnic_dev_lock);
524 list_for_each_entry(dev, &cnic_dev_list, list) {
525 struct cnic_local *cp = dev->cnic_priv;
526
527 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000528 pr_err("%s: Type %d still has devices registered\n",
529 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700530 read_unlock(&cnic_dev_lock);
531 goto out_unlock;
532 }
533 }
534 read_unlock(&cnic_dev_lock);
535
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000536 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700537
538 mutex_unlock(&cnic_lock);
539 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000540 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
541 msleep(100);
542 i++;
543 }
544
545 if (atomic_read(&ulp_ops->ref_count) != 0)
Julia Lawall022f0972012-07-08 01:37:43 +0000546 pr_warn("%s: Failed waiting for ref count to go to zero\n",
547 __func__);
Michael Chana4636962009-06-08 18:14:43 -0700548 return 0;
549
550out_unlock:
551 mutex_unlock(&cnic_lock);
552 return -EINVAL;
553}
554
555static int cnic_start_hw(struct cnic_dev *);
556static void cnic_stop_hw(struct cnic_dev *);
557
558static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
559 void *ulp_ctx)
560{
561 struct cnic_local *cp = dev->cnic_priv;
562 struct cnic_ulp_ops *ulp_ops;
563
roel kluin0d37f362009-11-02 06:53:44 +0000564 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000565 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700566 return -EINVAL;
567 }
568 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000569 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000570 pr_err("%s: Driver with type %d has not been registered\n",
571 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700572 mutex_unlock(&cnic_lock);
573 return -EAGAIN;
574 }
575 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000576 pr_err("%s: Type %d has already been registered to this device\n",
577 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700578 mutex_unlock(&cnic_lock);
579 return -EBUSY;
580 }
581
582 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
583 cp->ulp_handle[ulp_type] = ulp_ctx;
Eric Dumazet13707f92011-01-26 19:28:23 +0000584 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700585 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
586 cnic_hold(dev);
587
588 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
589 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
590 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
591
592 mutex_unlock(&cnic_lock);
593
Barak Witkowski1d187b32011-12-05 22:41:50 +0000594 cnic_ulp_ctl(dev, ulp_type, true);
595
Michael Chana4636962009-06-08 18:14:43 -0700596 return 0;
597
598}
599EXPORT_SYMBOL(cnic_register_driver);
600
601static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
602{
603 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000604 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700605
roel kluin0d37f362009-11-02 06:53:44 +0000606 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000607 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700608 return -EINVAL;
609 }
610 mutex_lock(&cnic_lock);
611 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000612 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700613 cnic_put(dev);
614 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000615 pr_err("%s: device not registered to this ulp type %d\n",
616 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700617 mutex_unlock(&cnic_lock);
618 return -EINVAL;
619 }
620 mutex_unlock(&cnic_lock);
621
Michael Chan42bb8d52011-01-03 15:21:46 +0000622 if (ulp_type == CNIC_ULP_ISCSI)
623 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Barak Witkowski2e499d32012-06-26 01:31:19 +0000624 else if (ulp_type == CNIC_ULP_FCOE)
625 dev->fcoe_cap = NULL;
Michael Chan42bb8d52011-01-03 15:21:46 +0000626
Michael Chana4636962009-06-08 18:14:43 -0700627 synchronize_rcu();
628
Michael Chan681dbd72009-08-14 15:49:46 +0000629 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
630 i < 20) {
631 msleep(100);
632 i++;
633 }
634 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000635 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000636
Barak Witkowski1d187b32011-12-05 22:41:50 +0000637 cnic_ulp_ctl(dev, ulp_type, false);
638
Michael Chana4636962009-06-08 18:14:43 -0700639 return 0;
640}
641EXPORT_SYMBOL(cnic_unregister_driver);
642
Eddie Wai11f23aa2011-06-08 19:29:34 +0000643static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
644 u32 next)
Michael Chana4636962009-06-08 18:14:43 -0700645{
646 id_tbl->start = start_id;
647 id_tbl->max = size;
Eddie Wai11f23aa2011-06-08 19:29:34 +0000648 id_tbl->next = next;
Michael Chana4636962009-06-08 18:14:43 -0700649 spin_lock_init(&id_tbl->lock);
650 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
651 if (!id_tbl->table)
652 return -ENOMEM;
653
654 return 0;
655}
656
657static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
658{
659 kfree(id_tbl->table);
660 id_tbl->table = NULL;
661}
662
663static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
664{
665 int ret = -1;
666
667 id -= id_tbl->start;
668 if (id >= id_tbl->max)
669 return ret;
670
671 spin_lock(&id_tbl->lock);
672 if (!test_bit(id, id_tbl->table)) {
673 set_bit(id, id_tbl->table);
674 ret = 0;
675 }
676 spin_unlock(&id_tbl->lock);
677 return ret;
678}
679
680/* Returns -1 if not successful */
681static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
682{
683 u32 id;
684
685 spin_lock(&id_tbl->lock);
686 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
687 if (id >= id_tbl->max) {
688 id = -1;
689 if (id_tbl->next != 0) {
690 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
691 if (id >= id_tbl->next)
692 id = -1;
693 }
694 }
695
696 if (id < id_tbl->max) {
697 set_bit(id, id_tbl->table);
698 id_tbl->next = (id + 1) & (id_tbl->max - 1);
699 id += id_tbl->start;
700 }
701
702 spin_unlock(&id_tbl->lock);
703
704 return id;
705}
706
707static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
708{
709 if (id == -1)
710 return;
711
712 id -= id_tbl->start;
713 if (id >= id_tbl->max)
714 return;
715
716 clear_bit(id, id_tbl->table);
717}
718
719static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
720{
721 int i;
722
723 if (!dma->pg_arr)
724 return;
725
726 for (i = 0; i < dma->num_pages; i++) {
727 if (dma->pg_arr[i]) {
Michael Chan2bc40782012-12-06 10:33:09 +0000728 dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000729 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700730 dma->pg_arr[i] = NULL;
731 }
732 }
733 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000734 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
735 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700736 dma->pgtbl = NULL;
737 }
738 kfree(dma->pg_arr);
739 dma->pg_arr = NULL;
740 dma->num_pages = 0;
741}
742
743static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
744{
745 int i;
Michael Chan51388262011-01-25 22:14:50 +0000746 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chana4636962009-06-08 18:14:43 -0700747
748 for (i = 0; i < dma->num_pages; i++) {
749 /* Each entry needs to be in big endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000750 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chana4636962009-06-08 18:14:43 -0700751 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000752 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chana4636962009-06-08 18:14:43 -0700753 page_table++;
754 }
755}
756
Michael Chan71034ba2009-10-10 13:46:59 +0000757static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
758{
759 int i;
Michael Chan51388262011-01-25 22:14:50 +0000760 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chan71034ba2009-10-10 13:46:59 +0000761
762 for (i = 0; i < dma->num_pages; i++) {
763 /* Each entry needs to be in little endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000764 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +0000765 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000766 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +0000767 page_table++;
768 }
769}
770
Michael Chana4636962009-06-08 18:14:43 -0700771static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
772 int pages, int use_pg_tbl)
773{
774 int i, size;
775 struct cnic_local *cp = dev->cnic_priv;
776
777 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
778 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
779 if (dma->pg_arr == NULL)
780 return -ENOMEM;
781
782 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
783 dma->num_pages = pages;
784
785 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000786 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
Michael Chan2bc40782012-12-06 10:33:09 +0000787 BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000788 &dma->pg_map_arr[i],
789 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700790 if (dma->pg_arr[i] == NULL)
791 goto error;
792 }
793 if (!use_pg_tbl)
794 return 0;
795
Michael Chan2bc40782012-12-06 10:33:09 +0000796 dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
797 ~(BNX2_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000798 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
799 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700800 if (dma->pgtbl == NULL)
801 goto error;
802
803 cp->setup_pgtbl(dev, dma);
804
805 return 0;
806
807error:
808 cnic_free_dma(dev, dma);
809 return -ENOMEM;
810}
811
Michael Chan86b53602009-10-10 13:46:57 +0000812static void cnic_free_context(struct cnic_dev *dev)
813{
814 struct cnic_local *cp = dev->cnic_priv;
815 int i;
816
817 for (i = 0; i < cp->ctx_blks; i++) {
818 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000819 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
820 cp->ctx_arr[i].ctx,
821 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000822 cp->ctx_arr[i].ctx = NULL;
823 }
824 }
825}
826
Michael Chan74dd0c42012-09-08 06:01:01 +0000827static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700828{
Michael Chancd801532010-10-13 14:06:49 +0000829 if (udev->l2_buf) {
830 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
831 udev->l2_buf, udev->l2_buf_map);
832 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700833 }
834
Michael Chancd801532010-10-13 14:06:49 +0000835 if (udev->l2_ring) {
836 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
837 udev->l2_ring, udev->l2_ring_map);
838 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700839 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000840
Michael Chan74dd0c42012-09-08 06:01:01 +0000841}
842
843static void __cnic_free_uio(struct cnic_uio_dev *udev)
844{
845 uio_unregister_device(&udev->cnic_uinfo);
846
847 __cnic_free_uio_rings(udev);
848
Michael Chana3ceeeb2010-10-13 14:06:50 +0000849 pci_dev_put(udev->pdev);
850 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000851}
852
Michael Chancd801532010-10-13 14:06:49 +0000853static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000854{
Michael Chancd801532010-10-13 14:06:49 +0000855 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000856 return;
857
Michael Chana3ceeeb2010-10-13 14:06:50 +0000858 write_lock(&cnic_dev_lock);
859 list_del_init(&udev->list);
860 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000861 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000862}
863
864static void cnic_free_resc(struct cnic_dev *dev)
865{
866 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000867 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000868
Michael Chancd801532010-10-13 14:06:49 +0000869 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000870 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000871 cp->udev = NULL;
Michael Chanf81b0ac2012-09-08 06:01:02 +0000872 if (udev->uio_dev == -1)
873 __cnic_free_uio_rings(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000874 }
Michael Chana4636962009-06-08 18:14:43 -0700875
Michael Chan86b53602009-10-10 13:46:57 +0000876 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700877 kfree(cp->ctx_arr);
878 cp->ctx_arr = NULL;
879 cp->ctx_blks = 0;
880
881 cnic_free_dma(dev, &cp->gbl_buf_info);
Michael Chana4636962009-06-08 18:14:43 -0700882 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000883 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000884 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000885 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700886 kfree(cp->iscsi_tbl);
887 cp->iscsi_tbl = NULL;
888 kfree(cp->ctx_tbl);
889 cp->ctx_tbl = NULL;
890
Michael Chane1928c82010-12-23 07:43:04 +0000891 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
Michael Chana4636962009-06-08 18:14:43 -0700892 cnic_free_id_tbl(&cp->cid_tbl);
893}
894
895static int cnic_alloc_context(struct cnic_dev *dev)
896{
897 struct cnic_local *cp = dev->cnic_priv;
898
Michael Chan4ce45e02012-12-06 10:33:10 +0000899 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -0700900 int i, k, arr_size;
901
Michael Chan2bc40782012-12-06 10:33:09 +0000902 cp->ctx_blk_size = BNX2_PAGE_SIZE;
903 cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
Michael Chana4636962009-06-08 18:14:43 -0700904 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
905 sizeof(struct cnic_ctx);
906 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
907 if (cp->ctx_arr == NULL)
908 return -ENOMEM;
909
910 k = 0;
911 for (i = 0; i < 2; i++) {
912 u32 j, reg, off, lo, hi;
913
914 if (i == 0)
915 off = BNX2_PG_CTX_MAP;
916 else
917 off = BNX2_ISCSI_CTX_MAP;
918
919 reg = cnic_reg_rd_ind(dev, off);
920 lo = reg >> 16;
921 hi = reg & 0xffff;
922 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
923 cp->ctx_arr[k].cid = j;
924 }
925
926 cp->ctx_blks = k;
927 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
928 cp->ctx_blks = 0;
929 return -ENOMEM;
930 }
931
932 for (i = 0; i < cp->ctx_blks; i++) {
933 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000934 dma_alloc_coherent(&dev->pcidev->dev,
Michael Chan2bc40782012-12-06 10:33:09 +0000935 BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000936 &cp->ctx_arr[i].mapping,
937 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700938 if (cp->ctx_arr[i].ctx == NULL)
939 return -ENOMEM;
940 }
941 }
942 return 0;
943}
944
Michael Chan59e51372011-06-14 01:32:38 +0000945static u16 cnic_bnx2_next_idx(u16 idx)
Michael Chane6c28892010-06-24 14:58:39 +0000946{
Michael Chan59e51372011-06-14 01:32:38 +0000947 return idx + 1;
948}
949
950static u16 cnic_bnx2_hw_idx(u16 idx)
951{
952 return idx;
953}
954
955static u16 cnic_bnx2x_next_idx(u16 idx)
956{
957 idx++;
958 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
959 idx++;
960
961 return idx;
962}
963
964static u16 cnic_bnx2x_hw_idx(u16 idx)
965{
966 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
967 idx++;
968 return idx;
969}
970
971static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
972 bool use_pg_tbl)
973{
974 int err, i, use_page_tbl = 0;
Michael Chane6c28892010-06-24 14:58:39 +0000975 struct kcqe **kcq;
976
Michael Chan59e51372011-06-14 01:32:38 +0000977 if (use_pg_tbl)
978 use_page_tbl = 1;
Michael Chane6c28892010-06-24 14:58:39 +0000979
Michael Chan59e51372011-06-14 01:32:38 +0000980 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
Michael Chane6c28892010-06-24 14:58:39 +0000981 if (err)
982 return err;
983
984 kcq = (struct kcqe **) info->dma.pg_arr;
985 info->kcq = kcq;
986
Michael Chan59e51372011-06-14 01:32:38 +0000987 info->next_idx = cnic_bnx2_next_idx;
988 info->hw_idx = cnic_bnx2_hw_idx;
989 if (use_pg_tbl)
Michael Chane6c28892010-06-24 14:58:39 +0000990 return 0;
991
Michael Chan59e51372011-06-14 01:32:38 +0000992 info->next_idx = cnic_bnx2x_next_idx;
993 info->hw_idx = cnic_bnx2x_hw_idx;
994
Michael Chane6c28892010-06-24 14:58:39 +0000995 for (i = 0; i < KCQ_PAGE_CNT; i++) {
996 struct bnx2x_bd_chain_next *next =
997 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
998 int j = i + 1;
999
1000 if (j >= KCQ_PAGE_CNT)
1001 j = 0;
1002 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1003 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1004 }
1005 return 0;
1006}
1007
Michael Chan74dd0c42012-09-08 06:01:01 +00001008static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1009{
1010 struct cnic_local *cp = udev->dev->cnic_priv;
1011
1012 if (udev->l2_ring)
1013 return 0;
1014
Michael Chan2bc40782012-12-06 10:33:09 +00001015 udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
Michael Chan74dd0c42012-09-08 06:01:01 +00001016 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1017 &udev->l2_ring_map,
1018 GFP_KERNEL | __GFP_COMP);
1019 if (!udev->l2_ring)
1020 return -ENOMEM;
1021
1022 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1023 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1024 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1025 &udev->l2_buf_map,
1026 GFP_KERNEL | __GFP_COMP);
1027 if (!udev->l2_buf) {
1028 __cnic_free_uio_rings(udev);
1029 return -ENOMEM;
1030 }
1031
1032 return 0;
1033
1034}
1035
Michael Chancd801532010-10-13 14:06:49 +00001036static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +00001037{
1038 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001039 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +00001040
Michael Chana3ceeeb2010-10-13 14:06:50 +00001041 read_lock(&cnic_dev_lock);
1042 list_for_each_entry(udev, &cnic_udev_list, list) {
1043 if (udev->pdev == dev->pcidev) {
1044 udev->dev = dev;
Michael Chanf81b0ac2012-09-08 06:01:02 +00001045 if (__cnic_alloc_uio_rings(udev, pages)) {
1046 udev->dev = NULL;
1047 read_unlock(&cnic_dev_lock);
1048 return -ENOMEM;
1049 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00001050 cp->udev = udev;
1051 read_unlock(&cnic_dev_lock);
1052 return 0;
1053 }
1054 }
1055 read_unlock(&cnic_dev_lock);
1056
Michael Chancd801532010-10-13 14:06:49 +00001057 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1058 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +00001059 return -ENOMEM;
1060
Michael Chancd801532010-10-13 14:06:49 +00001061 udev->uio_dev = -1;
1062
1063 udev->dev = dev;
1064 udev->pdev = dev->pcidev;
Michael Chanec0248e2009-08-26 09:49:22 +00001065
Michael Chan74dd0c42012-09-08 06:01:01 +00001066 if (__cnic_alloc_uio_rings(udev, pages))
1067 goto err_udev;
Michael Chancd801532010-10-13 14:06:49 +00001068
Michael Chana3ceeeb2010-10-13 14:06:50 +00001069 write_lock(&cnic_dev_lock);
1070 list_add(&udev->list, &cnic_udev_list);
1071 write_unlock(&cnic_dev_lock);
1072
1073 pci_dev_get(udev->pdev);
1074
Michael Chancd801532010-10-13 14:06:49 +00001075 cp->udev = udev;
1076
Michael Chanec0248e2009-08-26 09:49:22 +00001077 return 0;
Michael Chan74dd0c42012-09-08 06:01:01 +00001078
Jesper Juhlf7e4c972010-12-31 11:18:48 -08001079 err_udev:
1080 kfree(udev);
1081 return -ENOMEM;
Michael Chanec0248e2009-08-26 09:49:22 +00001082}
1083
Michael Chancd801532010-10-13 14:06:49 +00001084static int cnic_init_uio(struct cnic_dev *dev)
1085{
Michael Chan5e9b2db2009-08-26 09:49:23 +00001086 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001087 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001088 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +00001089 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001090
Michael Chancd801532010-10-13 14:06:49 +00001091 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +00001092 return -ENOMEM;
1093
Michael Chancd801532010-10-13 14:06:49 +00001094 uinfo = &udev->cnic_uinfo;
1095
Michael Chanae0eef62012-06-29 09:32:45 +00001096 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1097 uinfo->mem[0].internal_addr = dev->regview;
1098 uinfo->mem[0].memtype = UIO_MEM_PHYS;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001099
Michael Chan5e9b2db2009-08-26 09:49:23 +00001100 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001101 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1102 TX_MAX_TSS_RINGS + 1);
Michael Chana4dde3a2010-02-24 14:42:08 +00001103 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chancd801532010-10-13 14:06:49 +00001104 PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001105 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1106 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1107 else
1108 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1109
1110 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001111 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001112 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1113
Michael Chan71034ba2009-10-10 13:46:59 +00001114 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1115 PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001116 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001117
1118 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001119 }
1120
1121 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1122
Michael Chancd801532010-10-13 14:06:49 +00001123 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1124 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001125 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1126
Michael Chancd801532010-10-13 14:06:49 +00001127 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1128 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001129 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1130
1131 uinfo->version = CNIC_MODULE_VERSION;
1132 uinfo->irq = UIO_IRQ_CUSTOM;
1133
1134 uinfo->open = cnic_uio_open;
1135 uinfo->release = cnic_uio_close;
1136
Michael Chana3ceeeb2010-10-13 14:06:50 +00001137 if (udev->uio_dev == -1) {
1138 if (!uinfo->priv) {
1139 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001140
Michael Chana3ceeeb2010-10-13 14:06:50 +00001141 ret = uio_register_device(&udev->pdev->dev, uinfo);
1142 }
1143 } else {
1144 cnic_init_rings(dev);
1145 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001146
Michael Chancd801532010-10-13 14:06:49 +00001147 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001148}
1149
Michael Chana4636962009-06-08 18:14:43 -07001150static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1151{
1152 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001153 int ret;
1154
1155 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1156 if (ret)
1157 goto error;
1158 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1159
Michael Chan59e51372011-06-14 01:32:38 +00001160 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
Michael Chana4636962009-06-08 18:14:43 -07001161 if (ret)
1162 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001163
1164 ret = cnic_alloc_context(dev);
1165 if (ret)
1166 goto error;
1167
Michael Chancd801532010-10-13 14:06:49 +00001168 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001169 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001170 goto error;
1171
Michael Chancd801532010-10-13 14:06:49 +00001172 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001173 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001174 goto error;
1175
Michael Chana4636962009-06-08 18:14:43 -07001176 return 0;
1177
1178error:
1179 cnic_free_resc(dev);
1180 return ret;
1181}
1182
Michael Chan71034ba2009-10-10 13:46:59 +00001183static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1184{
1185 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001186 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001187 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001188
Michael Chan520efdf2010-06-24 14:58:37 +00001189 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001190 blks = total_mem / ctx_blk_size;
1191 if (total_mem % ctx_blk_size)
1192 blks++;
1193
1194 if (blks > cp->ethdev->ctx_tbl_len)
1195 return -ENOMEM;
1196
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001197 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001198 if (cp->ctx_arr == NULL)
1199 return -ENOMEM;
1200
1201 cp->ctx_blks = blks;
1202 cp->ctx_blk_size = ctx_blk_size;
Michael Chanee87a822010-10-13 14:06:51 +00001203 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
Michael Chan71034ba2009-10-10 13:46:59 +00001204 cp->ctx_align = 0;
1205 else
1206 cp->ctx_align = ctx_blk_size;
1207
1208 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1209
1210 for (i = 0; i < blks; i++) {
1211 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001212 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1213 &cp->ctx_arr[i].mapping,
1214 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001215 if (cp->ctx_arr[i].ctx == NULL)
1216 return -ENOMEM;
1217
1218 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1219 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1220 cnic_free_context(dev);
1221 cp->ctx_blk_size += cp->ctx_align;
1222 i = -1;
1223 continue;
1224 }
1225 }
1226 }
1227 return 0;
1228}
1229
1230static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1231{
1232 struct cnic_local *cp = dev->cnic_priv;
Michael Chan520efdf2010-06-24 14:58:37 +00001233 struct cnic_eth_dev *ethdev = cp->ethdev;
1234 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001235 int i, j, n, ret, pages;
1236 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1237
Michael Chanb37a41e2011-07-20 14:55:22 +00001238 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
Michael Chan520efdf2010-06-24 14:58:37 +00001239 cp->iscsi_start_cid = start_cid;
Michael Chane1928c82010-12-23 07:43:04 +00001240 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1241
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001242 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
Michael Chandc219a22011-08-26 09:45:39 +00001243 cp->max_cid_space += dev->max_fcoe_conn;
Michael Chane1928c82010-12-23 07:43:04 +00001244 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1245 if (!cp->fcoe_init_cid)
1246 cp->fcoe_init_cid = 0x10;
1247 }
1248
Michael Chan71034ba2009-10-10 13:46:59 +00001249 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1250 GFP_KERNEL);
1251 if (!cp->iscsi_tbl)
1252 goto error;
1253
1254 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001255 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001256 if (!cp->ctx_tbl)
1257 goto error;
1258
1259 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1260 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1261 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1262 }
1263
Michael Chane1928c82010-12-23 07:43:04 +00001264 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1265 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1266
Michael Chan520efdf2010-06-24 14:58:37 +00001267 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
Michael Chan71034ba2009-10-10 13:46:59 +00001268 PAGE_SIZE;
1269
1270 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1271 if (ret)
1272 return -ENOMEM;
1273
1274 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001275 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001276 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1277
1278 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1279 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1280 off;
1281
1282 if ((i % n) == (n - 1))
1283 j++;
1284 }
1285
Michael Chan59e51372011-06-14 01:32:38 +00001286 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
Michael Chan71034ba2009-10-10 13:46:59 +00001287 if (ret)
1288 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001289
Michael Chan51a8f542012-09-08 06:01:04 +00001290 if (CNIC_SUPPORTS_FCOE(cp)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001291 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
Michael Chane21ba412010-12-23 07:43:03 +00001292 if (ret)
1293 goto error;
1294 }
1295
Michael Chan71034ba2009-10-10 13:46:59 +00001296 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1297 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1298 if (ret)
1299 goto error;
1300
1301 ret = cnic_alloc_bnx2x_context(dev);
1302 if (ret)
1303 goto error;
1304
Michael Chan82346a72012-09-08 06:01:05 +00001305 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1306 return 0;
1307
Michael Chan71034ba2009-10-10 13:46:59 +00001308 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1309
1310 cp->l2_rx_ring_size = 15;
1311
Michael Chancd801532010-10-13 14:06:49 +00001312 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001313 if (ret)
1314 goto error;
1315
Michael Chancd801532010-10-13 14:06:49 +00001316 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001317 if (ret)
1318 goto error;
1319
1320 return 0;
1321
1322error:
1323 cnic_free_resc(dev);
1324 return -ENOMEM;
1325}
1326
Michael Chana4636962009-06-08 18:14:43 -07001327static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1328{
1329 return cp->max_kwq_idx -
1330 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1331}
1332
1333static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1334 u32 num_wqes)
1335{
1336 struct cnic_local *cp = dev->cnic_priv;
1337 struct kwqe *prod_qe;
1338 u16 prod, sw_prod, i;
1339
1340 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1341 return -EAGAIN; /* bnx2 is down */
1342
1343 spin_lock_bh(&cp->cnic_ulp_lock);
1344 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001345 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001346 spin_unlock_bh(&cp->cnic_ulp_lock);
1347 return -EAGAIN;
1348 }
1349
Michael Chan1f1332a2010-05-18 11:32:52 +00001350 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001351
1352 prod = cp->kwq_prod_idx;
1353 sw_prod = prod & MAX_KWQ_IDX;
1354 for (i = 0; i < num_wqes; i++) {
1355 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1356 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1357 prod++;
1358 sw_prod = prod & MAX_KWQ_IDX;
1359 }
1360 cp->kwq_prod_idx = prod;
1361
1362 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1363
1364 spin_unlock_bh(&cp->cnic_ulp_lock);
1365 return 0;
1366}
1367
Michael Chan71034ba2009-10-10 13:46:59 +00001368static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1369 union l5cm_specific_data *l5_data)
1370{
1371 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1372 dma_addr_t map;
1373
1374 map = ctx->kwqe_data_mapping;
1375 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1376 l5_data->phy_address.hi = (u64) map >> 32;
1377 return ctx->kwqe_data;
1378}
1379
1380static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1381 u32 type, union l5cm_specific_data *l5_data)
1382{
1383 struct cnic_local *cp = dev->cnic_priv;
1384 struct l5cm_spe kwqe;
1385 struct kwqe_16 *kwq[1];
Michael Chan68d7c1a2011-01-05 15:14:13 +00001386 u16 type_16;
Michael Chan71034ba2009-10-10 13:46:59 +00001387 int ret;
1388
1389 kwqe.hdr.conn_and_cmd_data =
1390 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chanceb7e1c2010-10-06 03:14:54 +00001391 BNX2X_HW_CID(cp, cid)));
Michael Chan68d7c1a2011-01-05 15:14:13 +00001392
1393 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1394 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1395 SPE_HDR_FUNCTION_ID;
1396
1397 kwqe.hdr.type = cpu_to_le16(type_16);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001398 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001399 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1400 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1401
1402 kwq[0] = (struct kwqe_16 *) &kwqe;
1403
1404 spin_lock_bh(&cp->cnic_ulp_lock);
1405 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1406 spin_unlock_bh(&cp->cnic_ulp_lock);
1407
1408 if (ret == 1)
1409 return 0;
1410
Michael Chan23021c22012-01-04 12:12:28 +00001411 return ret;
Michael Chan71034ba2009-10-10 13:46:59 +00001412}
1413
1414static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1415 struct kcqe *cqes[], u32 num_cqes)
1416{
1417 struct cnic_local *cp = dev->cnic_priv;
1418 struct cnic_ulp_ops *ulp_ops;
1419
1420 rcu_read_lock();
1421 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1422 if (likely(ulp_ops)) {
1423 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1424 cqes, num_cqes);
1425 }
1426 rcu_read_unlock();
1427}
1428
1429static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1430{
1431 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001432 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001433 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001434 int hq_bds, pages;
1435 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001436
1437 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1438 cp->num_ccells = req1->num_ccells_per_conn;
1439 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1440 cp->num_iscsi_tasks;
1441 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1442 BNX2X_ISCSI_R2TQE_SIZE;
1443 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1444 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1445 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1446 cp->num_cqs = req1->num_cqs;
1447
1448 if (!dev->max_iscsi_conn)
1449 return 0;
1450
1451 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001452 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001453 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001454 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001455 PAGE_SIZE);
1456 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001457 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001458 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001459 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001460 req1->num_tasks_per_conn);
1461
1462 /* init Ustorm RAM */
1463 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001464 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001465 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001466 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001467 PAGE_SIZE);
1468 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001469 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001470 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001471 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001472 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001473 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001474 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001475 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001476 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001477 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001478 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1479
1480 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001481 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001482 PAGE_SIZE);
1483 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001484 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001485 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001486 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001487 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001488 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001489 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001490 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001491 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001492 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001493 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1494
1495 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001496 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001497 PAGE_SIZE);
1498 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001499 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001500 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001501 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001502 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001503 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001504 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001505 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001506 hq_bds);
1507
1508 return 0;
1509}
1510
1511static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1512{
1513 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1514 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001515 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00001516 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001517 struct iscsi_kcqe kcqe;
1518 struct kcqe *cqes[1];
1519
1520 memset(&kcqe, 0, sizeof(kcqe));
1521 if (!dev->max_iscsi_conn) {
1522 kcqe.completion_status =
1523 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1524 goto done;
1525 }
1526
1527 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001528 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001529 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001530 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001531 req2->error_bit_map[1]);
1532
1533 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001534 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001535 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001536 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001537 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001538 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001539 req2->error_bit_map[1]);
1540
1541 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001542 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001543
1544 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1545
1546done:
1547 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1548 cqes[0] = (struct kcqe *) &kcqe;
1549 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1550
1551 return 0;
1552}
1553
1554static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1555{
1556 struct cnic_local *cp = dev->cnic_priv;
1557 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1558
1559 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1560 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1561
1562 cnic_free_dma(dev, &iscsi->hq_info);
1563 cnic_free_dma(dev, &iscsi->r2tq_info);
1564 cnic_free_dma(dev, &iscsi->task_array_info);
Michael Chane1928c82010-12-23 07:43:04 +00001565 cnic_free_id(&cp->cid_tbl, ctx->cid);
1566 } else {
1567 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001568 }
Michael Chane1928c82010-12-23 07:43:04 +00001569
Michael Chan71034ba2009-10-10 13:46:59 +00001570 ctx->cid = 0;
1571}
1572
1573static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1574{
1575 u32 cid;
1576 int ret, pages;
1577 struct cnic_local *cp = dev->cnic_priv;
1578 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1579 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1580
Michael Chane1928c82010-12-23 07:43:04 +00001581 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1582 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1583 if (cid == -1) {
1584 ret = -ENOMEM;
1585 goto error;
1586 }
1587 ctx->cid = cid;
1588 return 0;
1589 }
1590
Michael Chan71034ba2009-10-10 13:46:59 +00001591 cid = cnic_alloc_new_id(&cp->cid_tbl);
1592 if (cid == -1) {
1593 ret = -ENOMEM;
1594 goto error;
1595 }
1596
1597 ctx->cid = cid;
1598 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1599
1600 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1601 if (ret)
1602 goto error;
1603
1604 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1605 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1606 if (ret)
1607 goto error;
1608
1609 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1610 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1611 if (ret)
1612 goto error;
1613
1614 return 0;
1615
1616error:
1617 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1618 return ret;
1619}
1620
1621static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1622 struct regpair *ctx_addr)
1623{
1624 struct cnic_local *cp = dev->cnic_priv;
1625 struct cnic_eth_dev *ethdev = cp->ethdev;
1626 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1627 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1628 unsigned long align_off = 0;
1629 dma_addr_t ctx_map;
1630 void *ctx;
1631
1632 if (cp->ctx_align) {
1633 unsigned long mask = cp->ctx_align - 1;
1634
1635 if (cp->ctx_arr[blk].mapping & mask)
1636 align_off = cp->ctx_align -
1637 (cp->ctx_arr[blk].mapping & mask);
1638 }
1639 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1640 (off * BNX2X_CONTEXT_MEM_SIZE);
1641 ctx = cp->ctx_arr[blk].ctx + align_off +
1642 (off * BNX2X_CONTEXT_MEM_SIZE);
1643 if (init)
1644 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1645
1646 ctx_addr->lo = ctx_map & 0xffffffff;
1647 ctx_addr->hi = (u64) ctx_map >> 32;
1648 return ctx;
1649}
1650
1651static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1652 u32 num)
1653{
1654 struct cnic_local *cp = dev->cnic_priv;
1655 struct iscsi_kwqe_conn_offload1 *req1 =
1656 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1657 struct iscsi_kwqe_conn_offload2 *req2 =
1658 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1659 struct iscsi_kwqe_conn_offload3 *req3;
1660 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1661 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1662 u32 cid = ctx->cid;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001663 u32 hw_cid = BNX2X_HW_CID(cp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001664 struct iscsi_context *ictx;
1665 struct regpair context_addr;
1666 int i, j, n = 2, n_max;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001667 u8 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00001668
1669 ctx->ctx_flags = 0;
1670 if (!req2->num_additional_wqes)
1671 return -EINVAL;
1672
1673 n_max = req2->num_additional_wqes + 2;
1674
1675 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1676 if (ictx == NULL)
1677 return -ENOMEM;
1678
1679 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1680
1681 ictx->xstorm_ag_context.hq_prod = 1;
1682
1683 ictx->xstorm_st_context.iscsi.first_burst_length =
1684 ISCSI_DEF_FIRST_BURST_LEN;
1685 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1686 ISCSI_DEF_MAX_RECV_SEG_LEN;
1687 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1688 req1->sq_page_table_addr_lo;
1689 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1690 req1->sq_page_table_addr_hi;
1691 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1692 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1693 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1694 iscsi->hq_info.pgtbl_map & 0xffffffff;
1695 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1696 (u64) iscsi->hq_info.pgtbl_map >> 32;
1697 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1698 iscsi->hq_info.pgtbl[0];
1699 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1700 iscsi->hq_info.pgtbl[1];
1701 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1702 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1703 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1704 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1705 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1706 iscsi->r2tq_info.pgtbl[0];
1707 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1708 iscsi->r2tq_info.pgtbl[1];
1709 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1710 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1711 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1712 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1713 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1714 BNX2X_ISCSI_PBL_NOT_CACHED;
1715 ictx->xstorm_st_context.iscsi.flags.flags |=
1716 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1717 ictx->xstorm_st_context.iscsi.flags.flags |=
1718 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001719 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1720 ETH_P_8021Q;
1721 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1722 cp->port_mode == CHIP_2_PORT_MODE) {
1723
1724 port = 0;
1725 }
1726 ictx->xstorm_st_context.common.flags =
1727 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1728 ictx->xstorm_st_context.common.flags =
1729 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00001730
1731 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1732 /* TSTORM requires the base address of RQ DB & not PTE */
1733 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1734 req2->rq_page_table_addr_lo & PAGE_MASK;
1735 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1736 req2->rq_page_table_addr_hi;
1737 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1738 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1739 ictx->tstorm_st_context.tcp.flags2 |=
1740 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001741 ictx->tstorm_st_context.tcp.ooo_support_mode =
1742 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001743
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001744 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001745
1746 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001747 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001748 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001749 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001750 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1751 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1752 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1753 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1754 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1755 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1756 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1757 iscsi->r2tq_info.pgtbl[0];
1758 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1759 iscsi->r2tq_info.pgtbl[1];
1760 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1761 req1->cq_page_table_addr_lo;
1762 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1763 req1->cq_page_table_addr_hi;
1764 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1765 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1766 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1767 ictx->ustorm_st_context.task_pbe_cache_index =
1768 BNX2X_ISCSI_PBL_NOT_CACHED;
1769 ictx->ustorm_st_context.task_pdu_cache_index =
1770 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1771
1772 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1773 if (j == 3) {
1774 if (n >= n_max)
1775 break;
1776 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1777 j = 0;
1778 }
1779 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1780 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1781 req3->qp_first_pte[j].hi;
1782 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1783 req3->qp_first_pte[j].lo;
1784 }
1785
1786 ictx->ustorm_st_context.task_pbl_base.lo =
1787 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1788 ictx->ustorm_st_context.task_pbl_base.hi =
1789 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1790 ictx->ustorm_st_context.tce_phy_addr.lo =
1791 iscsi->task_array_info.pgtbl[0];
1792 ictx->ustorm_st_context.tce_phy_addr.hi =
1793 iscsi->task_array_info.pgtbl[1];
1794 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1795 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1796 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1797 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1798 ISCSI_DEF_MAX_BURST_LEN;
1799 ictx->ustorm_st_context.negotiated_rx |=
1800 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1801 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1802
1803 ictx->cstorm_st_context.hq_pbl_base.lo =
1804 iscsi->hq_info.pgtbl_map & 0xffffffff;
1805 ictx->cstorm_st_context.hq_pbl_base.hi =
1806 (u64) iscsi->hq_info.pgtbl_map >> 32;
1807 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1808 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1809 ictx->cstorm_st_context.task_pbl_base.lo =
1810 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1811 ictx->cstorm_st_context.task_pbl_base.hi =
1812 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1813 /* CSTORM and USTORM initialization is different, CSTORM requires
1814 * CQ DB base & not PTE addr */
1815 ictx->cstorm_st_context.cq_db_base.lo =
1816 req1->cq_page_table_addr_lo & PAGE_MASK;
1817 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1818 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1819 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1820 for (i = 0; i < cp->num_cqs; i++) {
1821 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1822 ISCSI_INITIAL_SN;
1823 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1824 ISCSI_INITIAL_SN;
1825 }
1826
1827 ictx->xstorm_ag_context.cdu_reserved =
1828 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1829 ISCSI_CONNECTION_TYPE);
1830 ictx->ustorm_ag_context.cdu_usage =
1831 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1832 ISCSI_CONNECTION_TYPE);
1833 return 0;
1834
1835}
1836
1837static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1838 u32 num, int *work)
1839{
1840 struct iscsi_kwqe_conn_offload1 *req1;
1841 struct iscsi_kwqe_conn_offload2 *req2;
1842 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00001843 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001844 struct iscsi_kcqe kcqe;
1845 struct kcqe *cqes[1];
1846 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001847 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001848
1849 if (num < 2) {
1850 *work = num;
1851 return -EINVAL;
1852 }
1853
1854 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1855 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1856 if ((num - 2) < req2->num_additional_wqes) {
1857 *work = num;
1858 return -EINVAL;
1859 }
Joe Perches779bb412010-11-14 17:04:37 +00001860 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001861
1862 l5_cid = req1->iscsi_conn_id;
1863 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1864 return -EINVAL;
1865
1866 memset(&kcqe, 0, sizeof(kcqe));
1867 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1868 kcqe.iscsi_conn_id = l5_cid;
1869 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1870
Michael Chanfdf24082010-10-13 14:06:47 +00001871 ctx = &cp->ctx_tbl[l5_cid];
1872 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1873 kcqe.completion_status =
1874 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1875 goto done;
1876 }
1877
Michael Chan71034ba2009-10-10 13:46:59 +00001878 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1879 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001880 goto done;
1881 }
1882 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1883 if (ret) {
1884 atomic_dec(&cp->iscsi_conn);
1885 ret = 0;
1886 goto done;
1887 }
1888 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1889 if (ret < 0) {
1890 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1891 atomic_dec(&cp->iscsi_conn);
1892 goto done;
1893 }
1894
1895 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001896 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001897
1898done:
1899 cqes[0] = (struct kcqe *) &kcqe;
1900 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
Michael Chan23021c22012-01-04 12:12:28 +00001901 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001902}
1903
1904
1905static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1906{
1907 struct cnic_local *cp = dev->cnic_priv;
1908 struct iscsi_kwqe_conn_update *req =
1909 (struct iscsi_kwqe_conn_update *) kwqe;
1910 void *data;
1911 union l5cm_specific_data l5_data;
1912 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1913 int ret;
1914
1915 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1916 return -EINVAL;
1917
1918 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1919 if (!data)
1920 return -ENOMEM;
1921
1922 memcpy(data, kwqe, sizeof(struct kwqe));
1923
1924 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1925 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1926 return ret;
1927}
1928
Michael Chana2c9e762010-10-13 14:06:46 +00001929static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001930{
1931 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001932 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001933 union l5cm_specific_data l5_data;
1934 int ret;
Michael Chan68d7c1a2011-01-05 15:14:13 +00001935 u32 hw_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001936
Michael Chan71034ba2009-10-10 13:46:59 +00001937 init_waitqueue_head(&ctx->waitq);
1938 ctx->wait_cond = 0;
1939 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001940 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001941
1942 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00001943 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001944
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001945 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00001946 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001947 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1948 return -EBUSY;
1949 }
Michael Chan71034ba2009-10-10 13:46:59 +00001950
Michael Chandcc7e3a2011-08-26 09:45:40 +00001951 return 0;
Michael Chana2c9e762010-10-13 14:06:46 +00001952}
1953
1954static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1955{
1956 struct cnic_local *cp = dev->cnic_priv;
1957 struct iscsi_kwqe_conn_destroy *req =
1958 (struct iscsi_kwqe_conn_destroy *) kwqe;
1959 u32 l5_cid = req->reserved0;
1960 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1961 int ret = 0;
1962 struct iscsi_kcqe kcqe;
1963 struct kcqe *cqes[1];
1964
1965 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1966 goto skip_cfc_delete;
1967
Michael Chanfdf24082010-10-13 14:06:47 +00001968 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1969 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1970
1971 if (delta > (2 * HZ))
1972 delta = 0;
1973
1974 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1975 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1976 goto destroy_reply;
1977 }
Michael Chana2c9e762010-10-13 14:06:46 +00001978
1979 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1980
Michael Chan71034ba2009-10-10 13:46:59 +00001981skip_cfc_delete:
1982 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1983
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001984 if (!ret) {
1985 atomic_dec(&cp->iscsi_conn);
1986 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1987 }
Michael Chan71034ba2009-10-10 13:46:59 +00001988
Michael Chanfdf24082010-10-13 14:06:47 +00001989destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00001990 memset(&kcqe, 0, sizeof(kcqe));
1991 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1992 kcqe.iscsi_conn_id = l5_cid;
1993 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1994 kcqe.iscsi_conn_context_id = req->context_id;
1995
1996 cqes[0] = (struct kcqe *) &kcqe;
1997 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1998
Michael Chan23021c22012-01-04 12:12:28 +00001999 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00002000}
2001
2002static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2003 struct l4_kwq_connect_req1 *kwqe1,
2004 struct l4_kwq_connect_req3 *kwqe3,
2005 struct l5cm_active_conn_buffer *conn_buf)
2006{
2007 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2008 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2009 &conn_buf->xstorm_conn_buffer;
2010 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2011 &conn_buf->tstorm_conn_buffer;
2012 struct regpair context_addr;
2013 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2014 struct in6_addr src_ip, dst_ip;
2015 int i;
2016 u32 *addrp;
2017
2018 addrp = (u32 *) &conn_addr->local_ip_addr;
2019 for (i = 0; i < 4; i++, addrp++)
2020 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2021
2022 addrp = (u32 *) &conn_addr->remote_ip_addr;
2023 for (i = 0; i < 4; i++, addrp++)
2024 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2025
2026 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2027
2028 xstorm_buf->context_addr.hi = context_addr.hi;
2029 xstorm_buf->context_addr.lo = context_addr.lo;
2030 xstorm_buf->mss = 0xffff;
2031 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2032 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2033 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2034 xstorm_buf->pseudo_header_checksum =
2035 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2036
2037 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2038 tstorm_buf->params |=
2039 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2040 if (kwqe3->ka_timeout) {
2041 tstorm_buf->ka_enable = 1;
2042 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2043 tstorm_buf->ka_interval = kwqe3->ka_interval;
2044 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2045 }
Michael Chan71034ba2009-10-10 13:46:59 +00002046 tstorm_buf->max_rt_time = 0xffffffff;
2047}
2048
2049static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2050{
2051 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002052 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00002053 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00002054 u8 *mac = dev->mac_addr;
2055
2056 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002057 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00002058 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002059 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002060 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002061 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00002062 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002063 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002064 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002065 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00002066 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002067 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002068
2069 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002070 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002071 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002072 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002073 mac[4]);
2074 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002075 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002076 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002077 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002078 mac[2]);
2079 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002080 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002081 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002082 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002083 mac[0]);
2084}
2085
2086static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2087{
2088 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002089 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00002090 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2091 u16 tstorm_flags = 0;
2092
2093 if (tcp_ts) {
2094 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2095 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2096 }
2097
2098 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002099 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002100
2101 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002102 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002103}
2104
2105static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2106 u32 num, int *work)
2107{
2108 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002109 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00002110 struct l4_kwq_connect_req1 *kwqe1 =
2111 (struct l4_kwq_connect_req1 *) wqes[0];
2112 struct l4_kwq_connect_req3 *kwqe3;
2113 struct l5cm_active_conn_buffer *conn_buf;
2114 struct l5cm_conn_addr_params *conn_addr;
2115 union l5cm_specific_data l5_data;
2116 u32 l5_cid = kwqe1->pg_cid;
2117 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2118 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2119 int ret;
2120
2121 if (num < 2) {
2122 *work = num;
2123 return -EINVAL;
2124 }
2125
2126 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2127 *work = 3;
2128 else
2129 *work = 2;
2130
2131 if (num < *work) {
2132 *work = num;
2133 return -EINVAL;
2134 }
2135
2136 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002137 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002138 return -ENOMEM;
2139 }
2140 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2141 if (!conn_buf)
2142 return -ENOMEM;
2143
2144 memset(conn_buf, 0, sizeof(*conn_buf));
2145
2146 conn_addr = &conn_buf->conn_addr_buf;
2147 conn_addr->remote_addr_0 = csk->ha[0];
2148 conn_addr->remote_addr_1 = csk->ha[1];
2149 conn_addr->remote_addr_2 = csk->ha[2];
2150 conn_addr->remote_addr_3 = csk->ha[3];
2151 conn_addr->remote_addr_4 = csk->ha[4];
2152 conn_addr->remote_addr_5 = csk->ha[5];
2153
2154 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2155 struct l4_kwq_connect_req2 *kwqe2 =
2156 (struct l4_kwq_connect_req2 *) wqes[1];
2157
2158 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2159 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2160 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2161
2162 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2163 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2164 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2165 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2166 }
2167 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2168
2169 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2170 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2171 conn_addr->local_tcp_port = kwqe1->src_port;
2172 conn_addr->remote_tcp_port = kwqe1->dst_port;
2173
2174 conn_addr->pmtu = kwqe3->pmtu;
2175 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2176
2177 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002178 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002179
2180 cnic_bnx2x_set_tcp_timestamp(dev,
2181 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2182
2183 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2184 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2185 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002186 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002187
2188 return ret;
2189}
2190
2191static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2192{
2193 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2194 union l5cm_specific_data l5_data;
2195 int ret;
2196
2197 memset(&l5_data, 0, sizeof(l5_data));
2198 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2199 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2200 return ret;
2201}
2202
2203static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2204{
2205 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2206 union l5cm_specific_data l5_data;
2207 int ret;
2208
2209 memset(&l5_data, 0, sizeof(l5_data));
2210 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2211 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2212 return ret;
2213}
2214static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2215{
2216 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2217 struct l4_kcq kcqe;
2218 struct kcqe *cqes[1];
2219
2220 memset(&kcqe, 0, sizeof(kcqe));
2221 kcqe.pg_host_opaque = req->host_opaque;
2222 kcqe.pg_cid = req->host_opaque;
2223 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2224 cqes[0] = (struct kcqe *) &kcqe;
2225 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2226 return 0;
2227}
2228
2229static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2230{
2231 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2232 struct l4_kcq kcqe;
2233 struct kcqe *cqes[1];
2234
2235 memset(&kcqe, 0, sizeof(kcqe));
2236 kcqe.pg_host_opaque = req->pg_host_opaque;
2237 kcqe.pg_cid = req->pg_cid;
2238 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2239 cqes[0] = (struct kcqe *) &kcqe;
2240 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2241 return 0;
2242}
2243
Michael Chane1928c82010-12-23 07:43:04 +00002244static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2245{
2246 struct fcoe_kwqe_stat *req;
2247 struct fcoe_stat_ramrod_params *fcoe_stat;
2248 union l5cm_specific_data l5_data;
2249 struct cnic_local *cp = dev->cnic_priv;
2250 int ret;
2251 u32 cid;
2252
2253 req = (struct fcoe_kwqe_stat *) kwqe;
2254 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2255
2256 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2257 if (!fcoe_stat)
2258 return -ENOMEM;
2259
2260 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2261 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2262
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002263 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002264 FCOE_CONNECTION_TYPE, &l5_data);
2265 return ret;
2266}
2267
2268static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2269 u32 num, int *work)
2270{
2271 int ret;
2272 struct cnic_local *cp = dev->cnic_priv;
2273 u32 cid;
2274 struct fcoe_init_ramrod_params *fcoe_init;
2275 struct fcoe_kwqe_init1 *req1;
2276 struct fcoe_kwqe_init2 *req2;
2277 struct fcoe_kwqe_init3 *req3;
2278 union l5cm_specific_data l5_data;
2279
2280 if (num < 3) {
2281 *work = num;
2282 return -EINVAL;
2283 }
2284 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2285 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2286 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2287 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2288 *work = 1;
2289 return -EINVAL;
2290 }
2291 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2292 *work = 2;
2293 return -EINVAL;
2294 }
2295
2296 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2297 netdev_err(dev->netdev, "fcoe_init size too big\n");
2298 return -ENOMEM;
2299 }
2300 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2301 if (!fcoe_init)
2302 return -ENOMEM;
2303
2304 memset(fcoe_init, 0, sizeof(*fcoe_init));
2305 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2306 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2307 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002308 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2309 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2310 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
Michael Chane1928c82010-12-23 07:43:04 +00002311
2312 fcoe_init->sb_num = cp->status_blk_num;
2313 fcoe_init->eq_prod = MAX_KCQ_IDX;
2314 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2315 cp->kcq2.sw_prod_idx = 0;
2316
2317 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002318 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002319 FCOE_CONNECTION_TYPE, &l5_data);
2320 *work = 3;
2321 return ret;
2322}
2323
2324static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2325 u32 num, int *work)
2326{
2327 int ret = 0;
2328 u32 cid = -1, l5_cid;
2329 struct cnic_local *cp = dev->cnic_priv;
2330 struct fcoe_kwqe_conn_offload1 *req1;
2331 struct fcoe_kwqe_conn_offload2 *req2;
2332 struct fcoe_kwqe_conn_offload3 *req3;
2333 struct fcoe_kwqe_conn_offload4 *req4;
2334 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2335 struct cnic_context *ctx;
2336 struct fcoe_context *fctx;
2337 struct regpair ctx_addr;
2338 union l5cm_specific_data l5_data;
2339 struct fcoe_kcqe kcqe;
2340 struct kcqe *cqes[1];
2341
2342 if (num < 4) {
2343 *work = num;
2344 return -EINVAL;
2345 }
2346 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2347 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2348 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2349 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2350
2351 *work = 4;
2352
2353 l5_cid = req1->fcoe_conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002354 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002355 goto err_reply;
2356
2357 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2358
2359 ctx = &cp->ctx_tbl[l5_cid];
2360 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2361 goto err_reply;
2362
2363 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2364 if (ret) {
2365 ret = 0;
2366 goto err_reply;
2367 }
2368 cid = ctx->cid;
2369
2370 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2371 if (fctx) {
2372 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2373 u32 val;
2374
2375 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2376 FCOE_CONNECTION_TYPE);
2377 fctx->xstorm_ag_context.cdu_reserved = val;
2378 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2379 FCOE_CONNECTION_TYPE);
2380 fctx->ustorm_ag_context.cdu_usage = val;
2381 }
2382 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2383 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2384 goto err_reply;
2385 }
2386 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2387 if (!fcoe_offload)
2388 goto err_reply;
2389
2390 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2391 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2392 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2393 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2394 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2395
2396 cid = BNX2X_HW_CID(cp, cid);
2397 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2398 FCOE_CONNECTION_TYPE, &l5_data);
2399 if (!ret)
2400 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2401
2402 return ret;
2403
2404err_reply:
2405 if (cid != -1)
2406 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2407
2408 memset(&kcqe, 0, sizeof(kcqe));
2409 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2410 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2411 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2412
2413 cqes[0] = (struct kcqe *) &kcqe;
2414 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2415 return ret;
2416}
2417
2418static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2419{
2420 struct fcoe_kwqe_conn_enable_disable *req;
2421 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2422 union l5cm_specific_data l5_data;
2423 int ret;
2424 u32 cid, l5_cid;
2425 struct cnic_local *cp = dev->cnic_priv;
2426
2427 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2428 cid = req->context_id;
2429 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2430
2431 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2432 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2433 return -ENOMEM;
2434 }
2435 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2436 if (!fcoe_enable)
2437 return -ENOMEM;
2438
2439 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2440 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2441 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2442 FCOE_CONNECTION_TYPE, &l5_data);
2443 return ret;
2444}
2445
2446static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2447{
2448 struct fcoe_kwqe_conn_enable_disable *req;
2449 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2450 union l5cm_specific_data l5_data;
2451 int ret;
2452 u32 cid, l5_cid;
2453 struct cnic_local *cp = dev->cnic_priv;
2454
2455 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2456 cid = req->context_id;
2457 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002458 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002459 return -EINVAL;
2460
2461 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2462
2463 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2464 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2465 return -ENOMEM;
2466 }
2467 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2468 if (!fcoe_disable)
2469 return -ENOMEM;
2470
2471 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2472 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2473 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2474 FCOE_CONNECTION_TYPE, &l5_data);
2475 return ret;
2476}
2477
2478static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2479{
2480 struct fcoe_kwqe_conn_destroy *req;
2481 union l5cm_specific_data l5_data;
2482 int ret;
2483 u32 cid, l5_cid;
2484 struct cnic_local *cp = dev->cnic_priv;
2485 struct cnic_context *ctx;
2486 struct fcoe_kcqe kcqe;
2487 struct kcqe *cqes[1];
2488
2489 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2490 cid = req->context_id;
2491 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002492 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002493 return -EINVAL;
2494
2495 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2496
2497 ctx = &cp->ctx_tbl[l5_cid];
2498
2499 init_waitqueue_head(&ctx->waitq);
2500 ctx->wait_cond = 0;
2501
Michael Chandcc7e3a2011-08-26 09:45:40 +00002502 memset(&kcqe, 0, sizeof(kcqe));
2503 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
Michael Chane1928c82010-12-23 07:43:04 +00002504 memset(&l5_data, 0, sizeof(l5_data));
2505 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2506 FCOE_CONNECTION_TYPE, &l5_data);
2507 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00002508 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2509 if (ctx->wait_cond)
2510 kcqe.completion_status = 0;
Michael Chane1928c82010-12-23 07:43:04 +00002511 }
2512
Michael Chandcc7e3a2011-08-26 09:45:40 +00002513 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2514 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2515
Michael Chane1928c82010-12-23 07:43:04 +00002516 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2517 kcqe.fcoe_conn_id = req->conn_id;
2518 kcqe.fcoe_conn_context_id = cid;
2519
2520 cqes[0] = (struct kcqe *) &kcqe;
2521 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2522 return ret;
2523}
2524
Michael Chan74e49bb2011-07-20 14:55:23 +00002525static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2526{
2527 struct cnic_local *cp = dev->cnic_priv;
2528 u32 i;
2529
2530 for (i = start_cid; i < cp->max_cid_space; i++) {
2531 struct cnic_context *ctx = &cp->ctx_tbl[i];
2532 int j;
2533
2534 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2535 msleep(10);
2536
2537 for (j = 0; j < 5; j++) {
2538 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2539 break;
2540 msleep(20);
2541 }
2542
2543 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2544 netdev_warn(dev->netdev, "CID %x not deleted\n",
2545 ctx->cid);
2546 }
2547}
2548
Michael Chane1928c82010-12-23 07:43:04 +00002549static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2550{
2551 struct fcoe_kwqe_destroy *req;
2552 union l5cm_specific_data l5_data;
2553 struct cnic_local *cp = dev->cnic_priv;
2554 int ret;
2555 u32 cid;
2556
Michael Chan74e49bb2011-07-20 14:55:23 +00002557 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2558
Michael Chane1928c82010-12-23 07:43:04 +00002559 req = (struct fcoe_kwqe_destroy *) kwqe;
2560 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2561
2562 memset(&l5_data, 0, sizeof(l5_data));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002563 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002564 FCOE_CONNECTION_TYPE, &l5_data);
2565 return ret;
2566}
2567
Michael Chan23021c22012-01-04 12:12:28 +00002568static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2569{
2570 struct cnic_local *cp = dev->cnic_priv;
2571 struct kcqe kcqe;
2572 struct kcqe *cqes[1];
2573 u32 cid;
2574 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2575 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
Michael Chan3238a9b2012-02-05 15:24:40 +00002576 u32 kcqe_op;
Michael Chan23021c22012-01-04 12:12:28 +00002577 int ulp_type;
2578
2579 cid = kwqe->kwqe_info0;
2580 memset(&kcqe, 0, sizeof(kcqe));
2581
Michael Chan3238a9b2012-02-05 15:24:40 +00002582 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2583 u32 l5_cid = 0;
2584
2585 ulp_type = CNIC_ULP_FCOE;
2586 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2587 struct fcoe_kwqe_conn_enable_disable *req;
2588
2589 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2590 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2591 cid = req->context_id;
2592 l5_cid = req->conn_id;
2593 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2594 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2595 } else {
2596 return;
2597 }
2598 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2599 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
Michael Chan8ec3e702012-03-21 15:38:34 +00002600 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan3238a9b2012-02-05 15:24:40 +00002601 kcqe.kcqe_info2 = cid;
2602 kcqe.kcqe_info0 = l5_cid;
2603
2604 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
Michael Chan23021c22012-01-04 12:12:28 +00002605 ulp_type = CNIC_ULP_ISCSI;
2606 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2607 cid = kwqe->kwqe_info1;
2608
2609 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2610 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
Michael Chan8ec3e702012-03-21 15:38:34 +00002611 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
Michael Chan23021c22012-01-04 12:12:28 +00002612 kcqe.kcqe_info2 = cid;
2613 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2614
2615 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2616 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002617
2618 ulp_type = CNIC_ULP_L4;
2619 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2620 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2621 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2622 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2623 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2624 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2625 else
2626 return;
2627
2628 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2629 KCQE_FLAGS_LAYER_MASK_L4;
Michael Chan8ec3e702012-03-21 15:38:34 +00002630 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan23021c22012-01-04 12:12:28 +00002631 l4kcqe->cid = cid;
2632 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2633 } else {
2634 return;
2635 }
2636
Joe Perches64699332012-06-04 12:44:16 +00002637 cqes[0] = &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002638 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2639}
2640
Michael Chane1928c82010-12-23 07:43:04 +00002641static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2642 struct kwqe *wqes[], u32 num_wqes)
Michael Chan71034ba2009-10-10 13:46:59 +00002643{
2644 int i, work, ret;
2645 u32 opcode;
2646 struct kwqe *kwqe;
2647
2648 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2649 return -EAGAIN; /* bnx2 is down */
2650
2651 for (i = 0; i < num_wqes; ) {
2652 kwqe = wqes[i];
2653 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2654 work = 1;
2655
2656 switch (opcode) {
2657 case ISCSI_KWQE_OPCODE_INIT1:
2658 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2659 break;
2660 case ISCSI_KWQE_OPCODE_INIT2:
2661 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2662 break;
2663 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2664 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2665 num_wqes - i, &work);
2666 break;
2667 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2668 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2669 break;
2670 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2671 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2672 break;
2673 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2674 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2675 &work);
2676 break;
2677 case L4_KWQE_OPCODE_VALUE_CLOSE:
2678 ret = cnic_bnx2x_close(dev, kwqe);
2679 break;
2680 case L4_KWQE_OPCODE_VALUE_RESET:
2681 ret = cnic_bnx2x_reset(dev, kwqe);
2682 break;
2683 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2684 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2685 break;
2686 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2687 ret = cnic_bnx2x_update_pg(dev, kwqe);
2688 break;
2689 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2690 ret = 0;
2691 break;
2692 default:
2693 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002694 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2695 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002696 break;
2697 }
Michael Chan23021c22012-01-04 12:12:28 +00002698 if (ret < 0) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002699 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2700 opcode);
Michael Chan23021c22012-01-04 12:12:28 +00002701
2702 /* Possibly bnx2x parity error, send completion
2703 * to ulp drivers with error code to speed up
2704 * cleanup and reset recovery.
2705 */
2706 if (ret == -EIO || ret == -EAGAIN)
2707 cnic_bnx2x_kwqe_err(dev, kwqe);
2708 }
Michael Chan71034ba2009-10-10 13:46:59 +00002709 i += work;
2710 }
2711 return 0;
2712}
2713
Michael Chane1928c82010-12-23 07:43:04 +00002714static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2715 struct kwqe *wqes[], u32 num_wqes)
2716{
2717 struct cnic_local *cp = dev->cnic_priv;
2718 int i, work, ret;
2719 u32 opcode;
2720 struct kwqe *kwqe;
2721
2722 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2723 return -EAGAIN; /* bnx2 is down */
2724
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002725 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
Michael Chane1928c82010-12-23 07:43:04 +00002726 return -EINVAL;
2727
2728 for (i = 0; i < num_wqes; ) {
2729 kwqe = wqes[i];
2730 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2731 work = 1;
2732
2733 switch (opcode) {
2734 case FCOE_KWQE_OPCODE_INIT1:
2735 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2736 num_wqes - i, &work);
2737 break;
2738 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2739 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2740 num_wqes - i, &work);
2741 break;
2742 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2743 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2744 break;
2745 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2746 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2747 break;
2748 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2749 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2750 break;
2751 case FCOE_KWQE_OPCODE_DESTROY:
2752 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2753 break;
2754 case FCOE_KWQE_OPCODE_STAT:
2755 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2756 break;
2757 default:
2758 ret = 0;
2759 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2760 opcode);
2761 break;
2762 }
Michael Chan3238a9b2012-02-05 15:24:40 +00002763 if (ret < 0) {
Michael Chane1928c82010-12-23 07:43:04 +00002764 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2765 opcode);
Michael Chan3238a9b2012-02-05 15:24:40 +00002766
2767 /* Possibly bnx2x parity error, send completion
2768 * to ulp drivers with error code to speed up
2769 * cleanup and reset recovery.
2770 */
2771 if (ret == -EIO || ret == -EAGAIN)
2772 cnic_bnx2x_kwqe_err(dev, kwqe);
2773 }
Michael Chane1928c82010-12-23 07:43:04 +00002774 i += work;
2775 }
2776 return 0;
2777}
2778
2779static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2780 u32 num_wqes)
2781{
2782 int ret = -EINVAL;
2783 u32 layer_code;
2784
2785 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2786 return -EAGAIN; /* bnx2x is down */
2787
2788 if (!num_wqes)
2789 return 0;
2790
2791 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2792 switch (layer_code) {
2793 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2794 case KWQE_FLAGS_LAYER_MASK_L4:
2795 case KWQE_FLAGS_LAYER_MASK_L2:
2796 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2797 break;
2798
2799 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2800 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2801 break;
2802 }
2803 return ret;
2804}
2805
2806static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2807{
2808 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2809 return KCQE_FLAGS_LAYER_MASK_L4;
2810
2811 return opflag & KCQE_FLAGS_LAYER_MASK;
2812}
2813
Michael Chana4636962009-06-08 18:14:43 -07002814static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2815{
2816 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002817 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002818
2819 i = 0;
2820 j = 1;
2821 while (num_cqes) {
2822 struct cnic_ulp_ops *ulp_ops;
2823 int ulp_type;
2824 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
Michael Chane1928c82010-12-23 07:43:04 +00002825 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002826
2827 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002828 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002829
2830 while (j < num_cqes) {
2831 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2832
Michael Chane1928c82010-12-23 07:43:04 +00002833 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
Michael Chana4636962009-06-08 18:14:43 -07002834 break;
2835
2836 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002837 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002838 j++;
2839 }
2840
2841 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2842 ulp_type = CNIC_ULP_RDMA;
2843 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2844 ulp_type = CNIC_ULP_ISCSI;
Michael Chane1928c82010-12-23 07:43:04 +00002845 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2846 ulp_type = CNIC_ULP_FCOE;
Michael Chana4636962009-06-08 18:14:43 -07002847 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2848 ulp_type = CNIC_ULP_L4;
2849 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2850 goto end;
2851 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002852 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2853 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002854 goto end;
2855 }
2856
2857 rcu_read_lock();
2858 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2859 if (likely(ulp_ops)) {
2860 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2861 cp->completed_kcq + i, j);
2862 }
2863 rcu_read_unlock();
2864end:
2865 num_cqes -= j;
2866 i += j;
2867 j = 1;
2868 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002869 if (unlikely(comp))
2870 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002871}
2872
Michael Chan644b9d42010-06-24 14:58:40 +00002873static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002874{
2875 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002876 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002877 struct kcqe *kcqe;
2878 int kcqe_cnt = 0, last_cnt = 0;
2879
Michael Chan644b9d42010-06-24 14:58:40 +00002880 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002881 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002882 hw_prod = *info->hw_prod_idx_ptr;
Michael Chan59e51372011-06-14 01:32:38 +00002883 hw_prod = info->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002884
2885 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002886 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002887 cp->completed_kcq[kcqe_cnt++] = kcqe;
Michael Chan59e51372011-06-14 01:32:38 +00002888 i = info->next_idx(i);
Michael Chana4636962009-06-08 18:14:43 -07002889 ri = i & MAX_KCQ_IDX;
2890 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2891 last_cnt = kcqe_cnt;
2892 last = i;
2893 }
2894 }
2895
Michael Chan644b9d42010-06-24 14:58:40 +00002896 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002897 return last_cnt;
2898}
2899
Michael Chan48f753d2010-05-18 11:32:53 +00002900static int cnic_l2_completion(struct cnic_local *cp)
2901{
2902 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002903 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002904 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chan2bc40782012-12-06 10:33:09 +00002905 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002906 u32 cmd;
2907 int comp = 0;
2908
2909 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2910 return 0;
2911
2912 hw_cons = *cp->rx_cons_ptr;
2913 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2914 hw_cons++;
2915
2916 sw_cons = cp->rx_cons;
2917 while (sw_cons != hw_cons) {
2918 u8 cqe_fp_flags;
2919
2920 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2921 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2922 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2923 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2924 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2925 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2926 cmd == RAMROD_CMD_ID_ETH_HALT)
2927 comp++;
2928 }
2929 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2930 }
2931 return comp;
2932}
2933
Michael Chan86b53602009-10-10 13:46:57 +00002934static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002935{
Michael Chan541a7812010-10-06 03:17:22 +00002936 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002937 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002938
Michael Chan541a7812010-10-06 03:17:22 +00002939 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002940 return;
2941
Michael Chan541a7812010-10-06 03:17:22 +00002942 rx_cons = *cp->rx_cons_ptr;
2943 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002944 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002945 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2946 comp = cnic_l2_completion(cp);
2947
Michael Chana4636962009-06-08 18:14:43 -07002948 cp->tx_cons = tx_cons;
2949 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002950
Michael Chancd801532010-10-13 14:06:49 +00002951 if (cp->udev)
2952 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002953 }
Michael Chan48f753d2010-05-18 11:32:53 +00002954 if (comp)
2955 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002956}
2957
Michael Chanb177a5d52010-06-24 14:58:41 +00002958static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002959{
Michael Chana4636962009-06-08 18:14:43 -07002960 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002961 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002962 int kcqe_cnt;
2963
Michael Chan107c3f42011-03-02 13:00:49 +00002964 /* status block index must be read before reading other fields */
2965 rmb();
Michael Chana4636962009-06-08 18:14:43 -07002966 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2967
Michael Chan644b9d42010-06-24 14:58:40 +00002968 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002969
2970 service_kcqes(dev, kcqe_cnt);
2971
2972 /* Tell compiler that status_blk fields can change. */
2973 barrier();
Michael Chan93736652011-06-08 19:29:32 +00002974 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2975 /* status block index must be read first */
2976 rmb();
2977 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002978 }
2979
Michael Chan644b9d42010-06-24 14:58:40 +00002980 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002981
Michael Chan86b53602009-10-10 13:46:57 +00002982 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002983
Michael Chana4636962009-06-08 18:14:43 -07002984 return status_idx;
2985}
2986
Michael Chanb177a5d52010-06-24 14:58:41 +00002987static int cnic_service_bnx2(void *data, void *status_blk)
2988{
2989 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002990
Michael Chaneaaa6e92010-12-23 08:38:30 +00002991 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2992 struct status_block *sblk = status_blk;
2993
2994 return sblk->status_idx;
2995 }
Michael Chanb177a5d52010-06-24 14:58:41 +00002996
2997 return cnic_service_bnx2_queues(dev);
2998}
2999
Michael Chana4636962009-06-08 18:14:43 -07003000static void cnic_service_bnx2_msix(unsigned long data)
3001{
3002 struct cnic_dev *dev = (struct cnic_dev *) data;
3003 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003004
Michael Chanb177a5d52010-06-24 14:58:41 +00003005 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07003006
Michael Chana4636962009-06-08 18:14:43 -07003007 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3008 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3009}
3010
Michael Chan66fee9e2010-06-24 14:58:38 +00003011static void cnic_doirq(struct cnic_dev *dev)
3012{
3013 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00003014
3015 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00003016 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3017
Michael Chan66fee9e2010-06-24 14:58:38 +00003018 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00003019 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00003020
3021 tasklet_schedule(&cp->cnic_irq_task);
3022 }
3023}
3024
Michael Chana4636962009-06-08 18:14:43 -07003025static irqreturn_t cnic_irq(int irq, void *dev_instance)
3026{
3027 struct cnic_dev *dev = dev_instance;
3028 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003029
3030 if (cp->ack_int)
3031 cp->ack_int(dev);
3032
Michael Chan66fee9e2010-06-24 14:58:38 +00003033 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07003034
3035 return IRQ_HANDLED;
3036}
3037
Michael Chan71034ba2009-10-10 13:46:59 +00003038static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3039 u16 index, u8 op, u8 update)
3040{
3041 struct cnic_local *cp = dev->cnic_priv;
3042 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
3043 COMMAND_REG_INT_ACK);
3044 struct igu_ack_register igu_ack;
3045
3046 igu_ack.status_block_index = index;
3047 igu_ack.sb_id_and_flags =
3048 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3049 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3050 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3051 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3052
3053 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3054}
3055
Michael Chanee87a822010-10-13 14:06:51 +00003056static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3057 u16 index, u8 op, u8 update)
3058{
3059 struct igu_regular cmd_data;
3060 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3061
3062 cmd_data.sb_id_and_flags =
3063 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3064 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3065 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3066 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3067
3068
3069 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3070}
3071
Michael Chan71034ba2009-10-10 13:46:59 +00003072static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3073{
3074 struct cnic_local *cp = dev->cnic_priv;
3075
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003076 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00003077 IGU_INT_DISABLE, 0);
3078}
3079
Michael Chanee87a822010-10-13 14:06:51 +00003080static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3081{
3082 struct cnic_local *cp = dev->cnic_priv;
3083
3084 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3085 IGU_INT_DISABLE, 0);
3086}
3087
Michael Chan8cc0e022012-09-08 06:01:03 +00003088static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3089{
3090 struct cnic_local *cp = dev->cnic_priv;
3091
3092 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3093 IGU_INT_ENABLE, 1);
3094}
3095
3096static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3097{
3098 struct cnic_local *cp = dev->cnic_priv;
3099
3100 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3101 IGU_INT_ENABLE, 1);
3102}
3103
Michael Chanb177a5d52010-06-24 14:58:41 +00003104static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00003105{
Michael Chanb177a5d52010-06-24 14:58:41 +00003106 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00003107 int kcqe_cnt;
3108
Michael Chan107c3f42011-03-02 13:00:49 +00003109 /* status block index must be read before reading the KCQ */
3110 rmb();
Michael Chanb177a5d52010-06-24 14:58:41 +00003111 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00003112
3113 service_kcqes(dev, kcqe_cnt);
3114
3115 /* Tell compiler that sblk fields can change. */
3116 barrier();
Michael Chan71034ba2009-10-10 13:46:59 +00003117
Michael Chanb177a5d52010-06-24 14:58:41 +00003118 last_status = *info->status_idx_ptr;
Michael Chan107c3f42011-03-02 13:00:49 +00003119 /* status block index must be read before reading the KCQ */
3120 rmb();
Michael Chan71034ba2009-10-10 13:46:59 +00003121 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003122 return last_status;
3123}
3124
3125static void cnic_service_bnx2x_bh(unsigned long data)
3126{
3127 struct cnic_dev *dev = (struct cnic_dev *) data;
3128 struct cnic_local *cp = dev->cnic_priv;
Michael Chan0197b082011-03-02 13:00:50 +00003129 u32 status_idx, new_status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00003130
3131 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3132 return;
3133
Michael Chan0197b082011-03-02 13:00:50 +00003134 while (1) {
3135 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00003136
Michael Chan0197b082011-03-02 13:00:50 +00003137 CNIC_WR16(dev, cp->kcq1.io_addr,
3138 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00003139
Michael Chan51a8f542012-09-08 06:01:04 +00003140 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
Michael Chan8cc0e022012-09-08 06:01:03 +00003141 cp->arm_int(dev, status_idx);
Michael Chan0197b082011-03-02 13:00:50 +00003142 break;
3143 }
3144
3145 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3146
3147 if (new_status_idx != status_idx)
3148 continue;
Michael Chane21ba412010-12-23 07:43:03 +00003149
3150 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3151 MAX_KCQ_IDX);
3152
Michael Chanee87a822010-10-13 14:06:51 +00003153 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3154 status_idx, IGU_INT_ENABLE, 1);
Michael Chan0197b082011-03-02 13:00:50 +00003155
3156 break;
Michael Chane21ba412010-12-23 07:43:03 +00003157 }
Michael Chan71034ba2009-10-10 13:46:59 +00003158}
3159
3160static int cnic_service_bnx2x(void *data, void *status_blk)
3161{
3162 struct cnic_dev *dev = data;
3163 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00003164
Michael Chan66fee9e2010-06-24 14:58:38 +00003165 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3166 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00003167
Michael Chan66fee9e2010-06-24 14:58:38 +00003168 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003169
3170 return 0;
3171}
3172
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003173static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3174{
3175 struct cnic_ulp_ops *ulp_ops;
3176
3177 if (if_type == CNIC_ULP_ISCSI)
3178 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3179
3180 mutex_lock(&cnic_lock);
3181 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3182 lockdep_is_held(&cnic_lock));
3183 if (!ulp_ops) {
3184 mutex_unlock(&cnic_lock);
3185 return;
3186 }
3187 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3188 mutex_unlock(&cnic_lock);
3189
3190 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3191 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3192
3193 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3194}
3195
Michael Chana4636962009-06-08 18:14:43 -07003196static void cnic_ulp_stop(struct cnic_dev *dev)
3197{
3198 struct cnic_local *cp = dev->cnic_priv;
3199 int if_type;
3200
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003201 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3202 cnic_ulp_stop_one(cp, if_type);
Michael Chana4636962009-06-08 18:14:43 -07003203}
3204
3205static void cnic_ulp_start(struct cnic_dev *dev)
3206{
3207 struct cnic_local *cp = dev->cnic_priv;
3208 int if_type;
3209
Michael Chana4636962009-06-08 18:14:43 -07003210 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3211 struct cnic_ulp_ops *ulp_ops;
3212
Michael Chan681dbd72009-08-14 15:49:46 +00003213 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003214 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3215 lockdep_is_held(&cnic_lock));
Michael Chan681dbd72009-08-14 15:49:46 +00003216 if (!ulp_ops || !ulp_ops->cnic_start) {
3217 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003218 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00003219 }
3220 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3221 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003222
3223 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3224 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00003225
3226 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07003227 }
Michael Chana4636962009-06-08 18:14:43 -07003228}
3229
Barak Witkowski1d187b32011-12-05 22:41:50 +00003230static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3231{
3232 struct cnic_local *cp = dev->cnic_priv;
3233 struct cnic_ulp_ops *ulp_ops;
3234 int rc;
3235
3236 mutex_lock(&cnic_lock);
3237 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3238 if (ulp_ops && ulp_ops->cnic_get_stats)
3239 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3240 else
3241 rc = -ENODEV;
3242 mutex_unlock(&cnic_lock);
3243 return rc;
3244}
3245
Michael Chana4636962009-06-08 18:14:43 -07003246static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3247{
3248 struct cnic_dev *dev = data;
Barak Witkowski1d187b32011-12-05 22:41:50 +00003249 int ulp_type = CNIC_ULP_ISCSI;
Michael Chana4636962009-06-08 18:14:43 -07003250
3251 switch (info->cmd) {
3252 case CNIC_CTL_STOP_CMD:
3253 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003254
3255 cnic_ulp_stop(dev);
3256 cnic_stop_hw(dev);
3257
Michael Chana4636962009-06-08 18:14:43 -07003258 cnic_put(dev);
3259 break;
3260 case CNIC_CTL_START_CMD:
3261 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003262
3263 if (!cnic_start_hw(dev))
3264 cnic_ulp_start(dev);
3265
Michael Chana4636962009-06-08 18:14:43 -07003266 cnic_put(dev);
3267 break;
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003268 case CNIC_CTL_STOP_ISCSI_CMD: {
3269 struct cnic_local *cp = dev->cnic_priv;
3270 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3271 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3272 break;
3273 }
Michael Chan71034ba2009-10-10 13:46:59 +00003274 case CNIC_CTL_COMPLETION_CMD: {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003275 struct cnic_ctl_completion *comp = &info->data.comp;
3276 u32 cid = BNX2X_SW_CID(comp->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00003277 u32 l5_cid;
3278 struct cnic_local *cp = dev->cnic_priv;
3279
Michael Chana2028b232012-06-27 15:08:19 +00003280 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3281 break;
3282
Michael Chan71034ba2009-10-10 13:46:59 +00003283 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3284 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3285
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003286 if (unlikely(comp->error)) {
3287 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3288 netdev_err(dev->netdev,
3289 "CID %x CFC delete comp error %x\n",
3290 cid, comp->error);
3291 }
3292
Michael Chan71034ba2009-10-10 13:46:59 +00003293 ctx->wait_cond = 1;
3294 wake_up(&ctx->waitq);
3295 }
3296 break;
3297 }
Barak Witkowski1d187b32011-12-05 22:41:50 +00003298 case CNIC_CTL_FCOE_STATS_GET_CMD:
3299 ulp_type = CNIC_ULP_FCOE;
3300 /* fall through */
3301 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3302 cnic_hold(dev);
3303 cnic_copy_ulp_stats(dev, ulp_type);
3304 cnic_put(dev);
3305 break;
3306
Michael Chana4636962009-06-08 18:14:43 -07003307 default:
3308 return -EINVAL;
3309 }
3310 return 0;
3311}
3312
3313static void cnic_ulp_init(struct cnic_dev *dev)
3314{
3315 int i;
3316 struct cnic_local *cp = dev->cnic_priv;
3317
Michael Chana4636962009-06-08 18:14:43 -07003318 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3319 struct cnic_ulp_ops *ulp_ops;
3320
Michael Chan7fc1ece2009-08-14 15:49:47 +00003321 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003322 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003323 if (!ulp_ops || !ulp_ops->cnic_init) {
3324 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003325 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003326 }
3327 ulp_get(ulp_ops);
3328 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003329
3330 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3331 ulp_ops->cnic_init(dev);
3332
Michael Chan7fc1ece2009-08-14 15:49:47 +00003333 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003334 }
Michael Chana4636962009-06-08 18:14:43 -07003335}
3336
3337static void cnic_ulp_exit(struct cnic_dev *dev)
3338{
3339 int i;
3340 struct cnic_local *cp = dev->cnic_priv;
3341
Michael Chana4636962009-06-08 18:14:43 -07003342 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3343 struct cnic_ulp_ops *ulp_ops;
3344
Michael Chan7fc1ece2009-08-14 15:49:47 +00003345 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003346 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003347 if (!ulp_ops || !ulp_ops->cnic_exit) {
3348 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003349 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003350 }
3351 ulp_get(ulp_ops);
3352 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003353
3354 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3355 ulp_ops->cnic_exit(dev);
3356
Michael Chan7fc1ece2009-08-14 15:49:47 +00003357 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003358 }
Michael Chana4636962009-06-08 18:14:43 -07003359}
3360
3361static int cnic_cm_offload_pg(struct cnic_sock *csk)
3362{
3363 struct cnic_dev *dev = csk->dev;
3364 struct l4_kwq_offload_pg *l4kwqe;
3365 struct kwqe *wqes[1];
3366
3367 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3368 memset(l4kwqe, 0, sizeof(*l4kwqe));
3369 wqes[0] = (struct kwqe *) l4kwqe;
3370
3371 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3372 l4kwqe->flags =
3373 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3374 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3375
3376 l4kwqe->da0 = csk->ha[0];
3377 l4kwqe->da1 = csk->ha[1];
3378 l4kwqe->da2 = csk->ha[2];
3379 l4kwqe->da3 = csk->ha[3];
3380 l4kwqe->da4 = csk->ha[4];
3381 l4kwqe->da5 = csk->ha[5];
3382
3383 l4kwqe->sa0 = dev->mac_addr[0];
3384 l4kwqe->sa1 = dev->mac_addr[1];
3385 l4kwqe->sa2 = dev->mac_addr[2];
3386 l4kwqe->sa3 = dev->mac_addr[3];
3387 l4kwqe->sa4 = dev->mac_addr[4];
3388 l4kwqe->sa5 = dev->mac_addr[5];
3389
3390 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00003391 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07003392 l4kwqe->host_opaque = csk->l5_cid;
3393
3394 if (csk->vlan_id) {
3395 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3396 l4kwqe->vlan_tag = csk->vlan_id;
3397 l4kwqe->l2hdr_nbytes += 4;
3398 }
3399
3400 return dev->submit_kwqes(dev, wqes, 1);
3401}
3402
3403static int cnic_cm_update_pg(struct cnic_sock *csk)
3404{
3405 struct cnic_dev *dev = csk->dev;
3406 struct l4_kwq_update_pg *l4kwqe;
3407 struct kwqe *wqes[1];
3408
3409 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3410 memset(l4kwqe, 0, sizeof(*l4kwqe));
3411 wqes[0] = (struct kwqe *) l4kwqe;
3412
3413 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3414 l4kwqe->flags =
3415 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3416 l4kwqe->pg_cid = csk->pg_cid;
3417
3418 l4kwqe->da0 = csk->ha[0];
3419 l4kwqe->da1 = csk->ha[1];
3420 l4kwqe->da2 = csk->ha[2];
3421 l4kwqe->da3 = csk->ha[3];
3422 l4kwqe->da4 = csk->ha[4];
3423 l4kwqe->da5 = csk->ha[5];
3424
3425 l4kwqe->pg_host_opaque = csk->l5_cid;
3426 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3427
3428 return dev->submit_kwqes(dev, wqes, 1);
3429}
3430
3431static int cnic_cm_upload_pg(struct cnic_sock *csk)
3432{
3433 struct cnic_dev *dev = csk->dev;
3434 struct l4_kwq_upload *l4kwqe;
3435 struct kwqe *wqes[1];
3436
3437 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3438 memset(l4kwqe, 0, sizeof(*l4kwqe));
3439 wqes[0] = (struct kwqe *) l4kwqe;
3440
3441 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3442 l4kwqe->flags =
3443 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3444 l4kwqe->cid = csk->pg_cid;
3445
3446 return dev->submit_kwqes(dev, wqes, 1);
3447}
3448
3449static int cnic_cm_conn_req(struct cnic_sock *csk)
3450{
3451 struct cnic_dev *dev = csk->dev;
3452 struct l4_kwq_connect_req1 *l4kwqe1;
3453 struct l4_kwq_connect_req2 *l4kwqe2;
3454 struct l4_kwq_connect_req3 *l4kwqe3;
3455 struct kwqe *wqes[3];
3456 u8 tcp_flags = 0;
3457 int num_wqes = 2;
3458
3459 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3460 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3461 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3462 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3463 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3464 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3465
3466 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3467 l4kwqe3->flags =
3468 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3469 l4kwqe3->ka_timeout = csk->ka_timeout;
3470 l4kwqe3->ka_interval = csk->ka_interval;
3471 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3472 l4kwqe3->tos = csk->tos;
3473 l4kwqe3->ttl = csk->ttl;
3474 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3475 l4kwqe3->pmtu = csk->mtu;
3476 l4kwqe3->rcv_buf = csk->rcv_buf;
3477 l4kwqe3->snd_buf = csk->snd_buf;
3478 l4kwqe3->seed = csk->seed;
3479
3480 wqes[0] = (struct kwqe *) l4kwqe1;
3481 if (test_bit(SK_F_IPV6, &csk->flags)) {
3482 wqes[1] = (struct kwqe *) l4kwqe2;
3483 wqes[2] = (struct kwqe *) l4kwqe3;
3484 num_wqes = 3;
3485
3486 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3487 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3488 l4kwqe2->flags =
3489 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3490 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3491 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3492 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3493 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3494 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3495 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3496 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3497 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3498 sizeof(struct tcphdr);
3499 } else {
3500 wqes[1] = (struct kwqe *) l4kwqe3;
3501 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3502 sizeof(struct tcphdr);
3503 }
3504
3505 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3506 l4kwqe1->flags =
3507 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3508 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3509 l4kwqe1->cid = csk->cid;
3510 l4kwqe1->pg_cid = csk->pg_cid;
3511 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3512 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3513 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3514 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3515 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3516 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3517 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3518 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3519 if (csk->tcp_flags & SK_TCP_NAGLE)
3520 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3521 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3522 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3523 if (csk->tcp_flags & SK_TCP_SACK)
3524 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3525 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3526 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3527
3528 l4kwqe1->tcp_flags = tcp_flags;
3529
3530 return dev->submit_kwqes(dev, wqes, num_wqes);
3531}
3532
3533static int cnic_cm_close_req(struct cnic_sock *csk)
3534{
3535 struct cnic_dev *dev = csk->dev;
3536 struct l4_kwq_close_req *l4kwqe;
3537 struct kwqe *wqes[1];
3538
3539 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3540 memset(l4kwqe, 0, sizeof(*l4kwqe));
3541 wqes[0] = (struct kwqe *) l4kwqe;
3542
3543 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3544 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3545 l4kwqe->cid = csk->cid;
3546
3547 return dev->submit_kwqes(dev, wqes, 1);
3548}
3549
3550static int cnic_cm_abort_req(struct cnic_sock *csk)
3551{
3552 struct cnic_dev *dev = csk->dev;
3553 struct l4_kwq_reset_req *l4kwqe;
3554 struct kwqe *wqes[1];
3555
3556 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3557 memset(l4kwqe, 0, sizeof(*l4kwqe));
3558 wqes[0] = (struct kwqe *) l4kwqe;
3559
3560 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3561 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3562 l4kwqe->cid = csk->cid;
3563
3564 return dev->submit_kwqes(dev, wqes, 1);
3565}
3566
3567static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3568 u32 l5_cid, struct cnic_sock **csk, void *context)
3569{
3570 struct cnic_local *cp = dev->cnic_priv;
3571 struct cnic_sock *csk1;
3572
3573 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3574 return -EINVAL;
3575
Michael Chanfdf24082010-10-13 14:06:47 +00003576 if (cp->ctx_tbl) {
3577 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3578
3579 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3580 return -EAGAIN;
3581 }
3582
Michael Chana4636962009-06-08 18:14:43 -07003583 csk1 = &cp->csk_tbl[l5_cid];
3584 if (atomic_read(&csk1->ref_count))
3585 return -EAGAIN;
3586
3587 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3588 return -EBUSY;
3589
3590 csk1->dev = dev;
3591 csk1->cid = cid;
3592 csk1->l5_cid = l5_cid;
3593 csk1->ulp_type = ulp_type;
3594 csk1->context = context;
3595
3596 csk1->ka_timeout = DEF_KA_TIMEOUT;
3597 csk1->ka_interval = DEF_KA_INTERVAL;
3598 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3599 csk1->tos = DEF_TOS;
3600 csk1->ttl = DEF_TTL;
3601 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3602 csk1->rcv_buf = DEF_RCV_BUF;
3603 csk1->snd_buf = DEF_SND_BUF;
3604 csk1->seed = DEF_SEED;
3605
3606 *csk = csk1;
3607 return 0;
3608}
3609
3610static void cnic_cm_cleanup(struct cnic_sock *csk)
3611{
3612 if (csk->src_port) {
3613 struct cnic_dev *dev = csk->dev;
3614 struct cnic_local *cp = dev->cnic_priv;
3615
Michael Chan9b093362010-12-23 07:42:56 +00003616 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07003617 csk->src_port = 0;
3618 }
3619}
3620
3621static void cnic_close_conn(struct cnic_sock *csk)
3622{
3623 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3624 cnic_cm_upload_pg(csk);
3625 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3626 }
3627 cnic_cm_cleanup(csk);
3628}
3629
3630static int cnic_cm_destroy(struct cnic_sock *csk)
3631{
3632 if (!cnic_in_use(csk))
3633 return -EINVAL;
3634
3635 csk_hold(csk);
3636 clear_bit(SK_F_INUSE, &csk->flags);
3637 smp_mb__after_clear_bit();
3638 while (atomic_read(&csk->ref_count) != 1)
3639 msleep(1);
3640 cnic_cm_cleanup(csk);
3641
3642 csk->flags = 0;
3643 csk_put(csk);
3644 return 0;
3645}
3646
3647static inline u16 cnic_get_vlan(struct net_device *dev,
3648 struct net_device **vlan_dev)
3649{
3650 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3651 *vlan_dev = vlan_dev_real_dev(dev);
3652 return vlan_dev_vlan_id(dev);
3653 }
3654 *vlan_dev = dev;
3655 return 0;
3656}
3657
3658static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3659 struct dst_entry **dst)
3660{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003661#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07003662 struct rtable *rt;
3663
David S. Miller78fbfd82011-03-12 00:00:52 -05003664 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3665 if (!IS_ERR(rt)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07003666 *dst = &rt->dst;
David S. Miller78fbfd82011-03-12 00:00:52 -05003667 return 0;
3668 }
3669 return PTR_ERR(rt);
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003670#else
3671 return -ENETUNREACH;
3672#endif
Michael Chana4636962009-06-08 18:14:43 -07003673}
3674
3675static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3676 struct dst_entry **dst)
3677{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003678#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
David S. Miller4c9483b2011-03-12 16:22:43 -05003679 struct flowi6 fl6;
Michael Chana4636962009-06-08 18:14:43 -07003680
David S. Miller4c9483b2011-03-12 16:22:43 -05003681 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003682 fl6.daddr = dst_addr->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -05003683 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3684 fl6.flowi6_oif = dst_addr->sin6_scope_id;
Michael Chana4636962009-06-08 18:14:43 -07003685
David S. Miller4c9483b2011-03-12 16:22:43 -05003686 *dst = ip6_route_output(&init_net, NULL, &fl6);
RongQing.Li05417432012-02-21 22:10:50 +00003687 if ((*dst)->error) {
3688 dst_release(*dst);
3689 *dst = NULL;
3690 return -ENETUNREACH;
3691 } else
Michael Chana4636962009-06-08 18:14:43 -07003692 return 0;
3693#endif
3694
3695 return -ENETUNREACH;
3696}
3697
3698static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3699 int ulp_type)
3700{
3701 struct cnic_dev *dev = NULL;
3702 struct dst_entry *dst;
3703 struct net_device *netdev = NULL;
3704 int err = -ENETUNREACH;
3705
3706 if (dst_addr->sin_family == AF_INET)
3707 err = cnic_get_v4_route(dst_addr, &dst);
3708 else if (dst_addr->sin_family == AF_INET6) {
3709 struct sockaddr_in6 *dst_addr6 =
3710 (struct sockaddr_in6 *) dst_addr;
3711
3712 err = cnic_get_v6_route(dst_addr6, &dst);
3713 } else
3714 return NULL;
3715
3716 if (err)
3717 return NULL;
3718
3719 if (!dst->dev)
3720 goto done;
3721
3722 cnic_get_vlan(dst->dev, &netdev);
3723
3724 dev = cnic_from_netdev(netdev);
3725
3726done:
3727 dst_release(dst);
3728 if (dev)
3729 cnic_put(dev);
3730 return dev;
3731}
3732
3733static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3734{
3735 struct cnic_dev *dev = csk->dev;
3736 struct cnic_local *cp = dev->cnic_priv;
3737
3738 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3739}
3740
3741static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3742{
3743 struct cnic_dev *dev = csk->dev;
3744 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003745 int is_v6, rc = 0;
3746 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003747 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003748 __be16 local_port;
3749 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003750
3751 if (saddr->local.v6.sin6_family == AF_INET6 &&
3752 saddr->remote.v6.sin6_family == AF_INET6)
3753 is_v6 = 1;
3754 else if (saddr->local.v4.sin_family == AF_INET &&
3755 saddr->remote.v4.sin_family == AF_INET)
3756 is_v6 = 0;
3757 else
3758 return -EINVAL;
3759
3760 clear_bit(SK_F_IPV6, &csk->flags);
3761
3762 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003763 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003764 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003765
3766 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3767 sizeof(struct in6_addr));
3768 csk->dst_port = saddr->remote.v6.sin6_port;
3769 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003770
3771 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003772 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003773
3774 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3775 csk->dst_port = saddr->remote.v4.sin_port;
3776 local_port = saddr->local.v4.sin_port;
3777 }
3778
Michael Chanc76284a2010-02-24 14:42:07 +00003779 csk->vlan_id = 0;
3780 csk->mtu = dev->netdev->mtu;
3781 if (dst && dst->dev) {
3782 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3783 if (realdev == dev->netdev) {
3784 csk->vlan_id = vlan;
3785 csk->mtu = dst_mtu(dst);
3786 }
3787 }
Michael Chana4636962009-06-08 18:14:43 -07003788
Michael Chan9b093362010-12-23 07:42:56 +00003789 port_id = be16_to_cpu(local_port);
3790 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3791 port_id < CNIC_LOCAL_PORT_MAX) {
3792 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3793 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003794 } else
Michael Chan9b093362010-12-23 07:42:56 +00003795 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003796
Michael Chan9b093362010-12-23 07:42:56 +00003797 if (!port_id) {
3798 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3799 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003800 rc = -ENOMEM;
3801 goto err_out;
3802 }
Michael Chan9b093362010-12-23 07:42:56 +00003803 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003804 }
3805 csk->src_port = local_port;
3806
Michael Chana4636962009-06-08 18:14:43 -07003807err_out:
3808 dst_release(dst);
3809 return rc;
3810}
3811
3812static void cnic_init_csk_state(struct cnic_sock *csk)
3813{
3814 csk->state = 0;
3815 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3816 clear_bit(SK_F_CLOSING, &csk->flags);
3817}
3818
3819static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3820{
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003821 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003822 int err = 0;
3823
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003824 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3825 return -EOPNOTSUPP;
3826
Michael Chana4636962009-06-08 18:14:43 -07003827 if (!cnic_in_use(csk))
3828 return -EINVAL;
3829
3830 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3831 return -EINVAL;
3832
3833 cnic_init_csk_state(csk);
3834
3835 err = cnic_get_route(csk, saddr);
3836 if (err)
3837 goto err_out;
3838
3839 err = cnic_resolve_addr(csk, saddr);
3840 if (!err)
3841 return 0;
3842
3843err_out:
3844 clear_bit(SK_F_CONNECT_START, &csk->flags);
3845 return err;
3846}
3847
3848static int cnic_cm_abort(struct cnic_sock *csk)
3849{
3850 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003851 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003852
3853 if (!cnic_in_use(csk))
3854 return -EINVAL;
3855
3856 if (cnic_abort_prep(csk))
3857 return cnic_cm_abort_req(csk);
3858
3859 /* Getting here means that we haven't started connect, or
Eddie Wai0d650ec2012-12-05 10:10:15 +00003860 * connect was not successful, or it has been reset by the target.
Michael Chana4636962009-06-08 18:14:43 -07003861 */
3862
Michael Chana4636962009-06-08 18:14:43 -07003863 cp->close_conn(csk, opcode);
Eddie Wai0d650ec2012-12-05 10:10:15 +00003864 if (csk->state != opcode) {
3865 /* Wait for remote reset sequence to complete */
3866 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3867 msleep(1);
3868
Michael Chan7b34a462010-06-15 08:57:03 +00003869 return -EALREADY;
Eddie Wai0d650ec2012-12-05 10:10:15 +00003870 }
Michael Chana4636962009-06-08 18:14:43 -07003871
3872 return 0;
3873}
3874
3875static int cnic_cm_close(struct cnic_sock *csk)
3876{
3877 if (!cnic_in_use(csk))
3878 return -EINVAL;
3879
3880 if (cnic_close_prep(csk)) {
3881 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3882 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003883 } else {
Eddie Wai0d650ec2012-12-05 10:10:15 +00003884 /* Wait for remote reset sequence to complete */
3885 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3886 msleep(1);
3887
Michael Chaned99daa52010-06-15 08:57:00 +00003888 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003889 }
3890 return 0;
3891}
3892
3893static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3894 u8 opcode)
3895{
3896 struct cnic_ulp_ops *ulp_ops;
3897 int ulp_type = csk->ulp_type;
3898
3899 rcu_read_lock();
3900 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3901 if (ulp_ops) {
3902 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3903 ulp_ops->cm_connect_complete(csk);
3904 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3905 ulp_ops->cm_close_complete(csk);
3906 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3907 ulp_ops->cm_remote_abort(csk);
3908 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3909 ulp_ops->cm_abort_complete(csk);
3910 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3911 ulp_ops->cm_remote_close(csk);
3912 }
3913 rcu_read_unlock();
3914}
3915
3916static int cnic_cm_set_pg(struct cnic_sock *csk)
3917{
3918 if (cnic_offld_prep(csk)) {
3919 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3920 cnic_cm_update_pg(csk);
3921 else
3922 cnic_cm_offload_pg(csk);
3923 }
3924 return 0;
3925}
3926
3927static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3928{
3929 struct cnic_local *cp = dev->cnic_priv;
3930 u32 l5_cid = kcqe->pg_host_opaque;
3931 u8 opcode = kcqe->op_code;
3932 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3933
3934 csk_hold(csk);
3935 if (!cnic_in_use(csk))
3936 goto done;
3937
3938 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3939 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3940 goto done;
3941 }
Eddie Waia9736c02010-02-24 14:42:04 +00003942 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3943 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3944 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3945 cnic_cm_upcall(cp, csk,
3946 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3947 goto done;
3948 }
3949
Michael Chana4636962009-06-08 18:14:43 -07003950 csk->pg_cid = kcqe->pg_cid;
3951 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3952 cnic_cm_conn_req(csk);
3953
3954done:
3955 csk_put(csk);
3956}
3957
Michael Chane1928c82010-12-23 07:43:04 +00003958static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3959{
3960 struct cnic_local *cp = dev->cnic_priv;
3961 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3962 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3963 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3964
3965 ctx->timestamp = jiffies;
3966 ctx->wait_cond = 1;
3967 wake_up(&ctx->waitq);
3968}
3969
Michael Chana4636962009-06-08 18:14:43 -07003970static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3971{
3972 struct cnic_local *cp = dev->cnic_priv;
3973 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3974 u8 opcode = l4kcqe->op_code;
3975 u32 l5_cid;
3976 struct cnic_sock *csk;
3977
Michael Chane1928c82010-12-23 07:43:04 +00003978 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3979 cnic_process_fcoe_term_conn(dev, kcqe);
3980 return;
3981 }
Michael Chana4636962009-06-08 18:14:43 -07003982 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3983 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3984 cnic_cm_process_offld_pg(dev, l4kcqe);
3985 return;
3986 }
3987
3988 l5_cid = l4kcqe->conn_id;
3989 if (opcode & 0x80)
3990 l5_cid = l4kcqe->cid;
3991 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3992 return;
3993
3994 csk = &cp->csk_tbl[l5_cid];
3995 csk_hold(csk);
3996
3997 if (!cnic_in_use(csk)) {
3998 csk_put(csk);
3999 return;
4000 }
4001
4002 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00004003 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4004 if (l4kcqe->status != 0) {
4005 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4006 cnic_cm_upcall(cp, csk,
4007 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4008 }
4009 break;
Michael Chana4636962009-06-08 18:14:43 -07004010 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4011 if (l4kcqe->status == 0)
4012 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
Michael Chan8ec3e702012-03-21 15:38:34 +00004013 else if (l4kcqe->status ==
4014 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Jeffrey Huang0cb1f4b2012-02-08 17:33:56 +00004015 set_bit(SK_F_HW_ERR, &csk->flags);
Michael Chana4636962009-06-08 18:14:43 -07004016
4017 smp_mb__before_clear_bit();
4018 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4019 cnic_cm_upcall(cp, csk, opcode);
4020 break;
4021
Eddie Wai7bc910f2012-06-27 15:08:22 +00004022 case L5CM_RAMROD_CMD_ID_CLOSE:
4023 if (l4kcqe->status != 0) {
4024 netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
4025 "status 0x%x\n", l4kcqe->status);
4026 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4027 /* Fall through */
4028 } else {
4029 break;
4030 }
Michael Chana4636962009-06-08 18:14:43 -07004031 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07004032 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4033 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00004034 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4035 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chan8ec3e702012-03-21 15:38:34 +00004036 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Michael Chan23021c22012-01-04 12:12:28 +00004037 set_bit(SK_F_HW_ERR, &csk->flags);
4038
Michael Chana4636962009-06-08 18:14:43 -07004039 cp->close_conn(csk, opcode);
4040 break;
4041
4042 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
Michael Chan101c40c2011-06-08 19:29:33 +00004043 /* after we already sent CLOSE_REQ */
4044 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4045 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4046 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4047 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4048 else
4049 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004050 break;
4051 }
4052 csk_put(csk);
4053}
4054
4055static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4056{
4057 struct cnic_dev *dev = data;
4058 int i;
4059
4060 for (i = 0; i < num; i++)
4061 cnic_cm_process_kcqe(dev, kcqe[i]);
4062}
4063
4064static struct cnic_ulp_ops cm_ulp_ops = {
4065 .indicate_kcqes = cnic_cm_indicate_kcqe,
4066};
4067
4068static void cnic_cm_free_mem(struct cnic_dev *dev)
4069{
4070 struct cnic_local *cp = dev->cnic_priv;
4071
4072 kfree(cp->csk_tbl);
4073 cp->csk_tbl = NULL;
4074 cnic_free_id_tbl(&cp->csk_port_tbl);
4075}
4076
4077static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4078{
4079 struct cnic_local *cp = dev->cnic_priv;
Eddie Wai11f23aa2011-06-08 19:29:34 +00004080 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07004081
4082 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4083 GFP_KERNEL);
4084 if (!cp->csk_tbl)
4085 return -ENOMEM;
4086
Michael Chan973e5742011-07-13 17:24:17 +00004087 port_id = random32();
Eddie Wai11f23aa2011-06-08 19:29:34 +00004088 port_id %= CNIC_LOCAL_PORT_RANGE;
Michael Chana4636962009-06-08 18:14:43 -07004089 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
Eddie Wai11f23aa2011-06-08 19:29:34 +00004090 CNIC_LOCAL_PORT_MIN, port_id)) {
Michael Chana4636962009-06-08 18:14:43 -07004091 cnic_cm_free_mem(dev);
4092 return -ENOMEM;
4093 }
4094 return 0;
4095}
4096
4097static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4098{
Michael Chan943189f2010-06-15 08:57:02 +00004099 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4100 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4101 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4102 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00004103 }
Michael Chan943189f2010-06-15 08:57:02 +00004104
4105 /* 1. If event opcode matches the expected event in csk->state
Michael Chan101c40c2011-06-08 19:29:33 +00004106 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4107 * event
Michael Chan7b34a462010-06-15 08:57:03 +00004108 * 3. If the expected event is 0, meaning the connection was never
4109 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00004110 */
Michael Chan7b34a462010-06-15 08:57:03 +00004111 if (opcode == csk->state || csk->state == 0 ||
Michael Chan101c40c2011-06-08 19:29:33 +00004112 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4113 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
Michael Chan7b34a462010-06-15 08:57:03 +00004114 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4115 if (csk->state == 0)
4116 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07004117 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00004118 }
Michael Chana4636962009-06-08 18:14:43 -07004119 }
4120 return 0;
4121}
4122
4123static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4124{
4125 struct cnic_dev *dev = csk->dev;
4126 struct cnic_local *cp = dev->cnic_priv;
4127
Michael Chana1e621b2010-06-15 08:57:01 +00004128 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4129 cnic_cm_upcall(cp, csk, opcode);
4130 return;
4131 }
4132
Michael Chana4636962009-06-08 18:14:43 -07004133 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00004134 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00004135 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00004136 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004137}
4138
4139static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4140{
4141}
4142
4143static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4144{
4145 u32 seed;
4146
Michael Chan973e5742011-07-13 17:24:17 +00004147 seed = random32();
Michael Chana4636962009-06-08 18:14:43 -07004148 cnic_ctx_wr(dev, 45, 0, seed);
4149 return 0;
4150}
4151
Michael Chan71034ba2009-10-10 13:46:59 +00004152static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4153{
4154 struct cnic_dev *dev = csk->dev;
4155 struct cnic_local *cp = dev->cnic_priv;
4156 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4157 union l5cm_specific_data l5_data;
4158 u32 cmd = 0;
4159 int close_complete = 0;
4160
4161 switch (opcode) {
4162 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4163 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4164 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00004165 if (cnic_ready_to_close(csk, opcode)) {
Michael Chan23021c22012-01-04 12:12:28 +00004166 if (test_bit(SK_F_HW_ERR, &csk->flags))
4167 close_complete = 1;
4168 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
Michael Chan7b34a462010-06-15 08:57:03 +00004169 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4170 else
4171 close_complete = 1;
4172 }
Michael Chan71034ba2009-10-10 13:46:59 +00004173 break;
4174 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4175 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4176 break;
4177 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4178 close_complete = 1;
4179 break;
4180 }
4181 if (cmd) {
4182 memset(&l5_data, 0, sizeof(l5_data));
4183
4184 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4185 &l5_data);
4186 } else if (close_complete) {
4187 ctx->timestamp = jiffies;
4188 cnic_close_conn(csk);
4189 cnic_cm_upcall(cp, csk, csk->state);
4190 }
4191}
4192
4193static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4194{
Michael Chanfdf24082010-10-13 14:06:47 +00004195 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00004196
4197 if (!cp->ctx_tbl)
4198 return;
4199
4200 if (!netif_running(dev->netdev))
4201 return;
4202
Michael Chan74e49bb2011-07-20 14:55:23 +00004203 cnic_bnx2x_delete_wait(dev, 0);
Michael Chanfdf24082010-10-13 14:06:47 +00004204
4205 cancel_delayed_work(&cp->delete_task);
4206 flush_workqueue(cnic_wq);
4207
4208 if (atomic_read(&cp->iscsi_conn) != 0)
4209 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4210 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00004211}
4212
4213static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4214{
4215 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004216 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00004217 u32 pfid = cp->pfid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004218 u32 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00004219
4220 cnic_init_bnx2x_mac(dev);
4221 cnic_bnx2x_set_tcp_timestamp(dev, 1);
4222
4223 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004224 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004225
4226 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004227 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004228 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004229 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00004230 DEF_MAX_DA_COUNT);
4231
4232 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004233 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00004234 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004235 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00004236 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004237 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00004238 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004239 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00004240
Michael Chan14203982010-10-06 03:16:06 +00004241 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004242 DEF_MAX_CWND);
4243 return 0;
4244}
4245
Michael Chanfdf24082010-10-13 14:06:47 +00004246static void cnic_delete_task(struct work_struct *work)
4247{
4248 struct cnic_local *cp;
4249 struct cnic_dev *dev;
4250 u32 i;
4251 int need_resched = 0;
4252
4253 cp = container_of(work, struct cnic_local, delete_task.work);
4254 dev = cp->dev;
4255
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004256 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4257 struct drv_ctl_info info;
4258
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004259 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004260
4261 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4262 cp->ethdev->drv_ctl(dev->netdev, &info);
4263 }
4264
Michael Chanfdf24082010-10-13 14:06:47 +00004265 for (i = 0; i < cp->max_cid_space; i++) {
4266 struct cnic_context *ctx = &cp->ctx_tbl[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004267 int err;
Michael Chanfdf24082010-10-13 14:06:47 +00004268
4269 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4270 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4271 continue;
4272
4273 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4274 need_resched = 1;
4275 continue;
4276 }
4277
4278 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4279 continue;
4280
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004281 err = cnic_bnx2x_destroy_ramrod(dev, i);
Michael Chanfdf24082010-10-13 14:06:47 +00004282
4283 cnic_free_bnx2x_conn_resc(dev, i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004284 if (!err) {
4285 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4286 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00004287
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004288 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4289 }
Michael Chanfdf24082010-10-13 14:06:47 +00004290 }
4291
4292 if (need_resched)
4293 queue_delayed_work(cnic_wq, &cp->delete_task,
4294 msecs_to_jiffies(10));
4295
4296}
4297
Michael Chana4636962009-06-08 18:14:43 -07004298static int cnic_cm_open(struct cnic_dev *dev)
4299{
4300 struct cnic_local *cp = dev->cnic_priv;
4301 int err;
4302
4303 err = cnic_cm_alloc_mem(dev);
4304 if (err)
4305 return err;
4306
4307 err = cp->start_cm(dev);
4308
4309 if (err)
4310 goto err_out;
4311
Michael Chanfdf24082010-10-13 14:06:47 +00004312 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4313
Michael Chana4636962009-06-08 18:14:43 -07004314 dev->cm_create = cnic_cm_create;
4315 dev->cm_destroy = cnic_cm_destroy;
4316 dev->cm_connect = cnic_cm_connect;
4317 dev->cm_abort = cnic_cm_abort;
4318 dev->cm_close = cnic_cm_close;
4319 dev->cm_select_dev = cnic_cm_select_dev;
4320
4321 cp->ulp_handle[CNIC_ULP_L4] = dev;
4322 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4323 return 0;
4324
4325err_out:
4326 cnic_cm_free_mem(dev);
4327 return err;
4328}
4329
4330static int cnic_cm_shutdown(struct cnic_dev *dev)
4331{
4332 struct cnic_local *cp = dev->cnic_priv;
4333 int i;
4334
Michael Chana4636962009-06-08 18:14:43 -07004335 if (!cp->csk_tbl)
4336 return 0;
4337
4338 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4339 struct cnic_sock *csk = &cp->csk_tbl[i];
4340
4341 clear_bit(SK_F_INUSE, &csk->flags);
4342 cnic_cm_cleanup(csk);
4343 }
4344 cnic_cm_free_mem(dev);
4345
4346 return 0;
4347}
4348
4349static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4350{
Michael Chana4636962009-06-08 18:14:43 -07004351 u32 cid_addr;
4352 int i;
4353
Michael Chana4636962009-06-08 18:14:43 -07004354 cid_addr = GET_CID_ADDR(cid);
4355
4356 for (i = 0; i < CTX_SIZE; i += 4)
4357 cnic_ctx_wr(dev, cid_addr, i, 0);
4358}
4359
4360static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4361{
4362 struct cnic_local *cp = dev->cnic_priv;
4363 int ret = 0, i;
4364 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4365
Michael Chan4ce45e02012-12-06 10:33:10 +00004366 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004367 return 0;
4368
4369 for (i = 0; i < cp->ctx_blks; i++) {
4370 int j;
4371 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4372 u32 val;
4373
Michael Chan2bc40782012-12-06 10:33:09 +00004374 memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07004375
4376 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4377 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4378 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4379 (u64) cp->ctx_arr[i].mapping >> 32);
4380 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4381 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4382 for (j = 0; j < 10; j++) {
4383
4384 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4385 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4386 break;
4387 udelay(5);
4388 }
4389 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4390 ret = -EBUSY;
4391 break;
4392 }
4393 }
4394 return ret;
4395}
4396
4397static void cnic_free_irq(struct cnic_dev *dev)
4398{
4399 struct cnic_local *cp = dev->cnic_priv;
4400 struct cnic_eth_dev *ethdev = cp->ethdev;
4401
4402 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4403 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004404 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07004405 free_irq(ethdev->irq_arr[0].vector, dev);
4406 }
4407}
4408
Michael Chan6e0dc642010-10-13 14:06:44 +00004409static int cnic_request_irq(struct cnic_dev *dev)
4410{
4411 struct cnic_local *cp = dev->cnic_priv;
4412 struct cnic_eth_dev *ethdev = cp->ethdev;
4413 int err;
4414
4415 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4416 if (err)
4417 tasklet_disable(&cp->cnic_irq_task);
4418
4419 return err;
4420}
4421
Michael Chana4636962009-06-08 18:14:43 -07004422static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4423{
4424 struct cnic_local *cp = dev->cnic_priv;
4425 struct cnic_eth_dev *ethdev = cp->ethdev;
4426
4427 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4428 int err, i = 0;
4429 int sblk_num = cp->status_blk_num;
4430 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4431 BNX2_HC_SB_CONFIG_1;
4432
4433 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4434
4435 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4436 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4437 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4438
Michael Chana4dde3a2010-02-24 14:42:08 +00004439 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00004440 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07004441 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004442 err = cnic_request_irq(dev);
4443 if (err)
Michael Chana4636962009-06-08 18:14:43 -07004444 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00004445
Michael Chana4dde3a2010-02-24 14:42:08 +00004446 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07004447 i < 10) {
4448 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4449 1 << (11 + sblk_num));
4450 udelay(10);
4451 i++;
4452 barrier();
4453 }
Michael Chana4dde3a2010-02-24 14:42:08 +00004454 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07004455 cnic_free_irq(dev);
4456 goto failed;
4457 }
4458
4459 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00004460 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004461 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4462 int i = 0;
4463
4464 while (sblk->status_completion_producer_index && i < 10) {
4465 CNIC_WR(dev, BNX2_HC_COMMAND,
4466 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4467 udelay(10);
4468 i++;
4469 barrier();
4470 }
4471 if (sblk->status_completion_producer_index)
4472 goto failed;
4473
4474 }
4475 return 0;
4476
4477failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00004478 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07004479 return -EBUSY;
4480}
4481
4482static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4483{
4484 struct cnic_local *cp = dev->cnic_priv;
4485 struct cnic_eth_dev *ethdev = cp->ethdev;
4486
4487 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4488 return;
4489
4490 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4491 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4492}
4493
4494static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4495{
4496 struct cnic_local *cp = dev->cnic_priv;
4497 struct cnic_eth_dev *ethdev = cp->ethdev;
4498
4499 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4500 return;
4501
4502 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4503 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4504 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4505 synchronize_irq(ethdev->irq_arr[0].vector);
4506}
4507
4508static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4509{
4510 struct cnic_local *cp = dev->cnic_priv;
4511 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004512 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004513 u32 cid_addr, tx_cid, sb_id;
4514 u32 val, offset0, offset1, offset2, offset3;
4515 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004516 struct bnx2_tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00004517 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00004518 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004519
4520 sb_id = cp->status_blk_num;
4521 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07004522 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4523 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004524 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004525
4526 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07004527 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4528 (TX_TSS_CID << 7));
4529 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4530 }
4531 cp->tx_cons = *cp->tx_cons_ptr;
4532
4533 cid_addr = GET_CID_ADDR(tx_cid);
Michael Chan4ce45e02012-12-06 10:33:10 +00004534 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -07004535 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4536
4537 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4538 cnic_ctx_wr(dev, cid_addr2, i, 0);
4539
4540 offset0 = BNX2_L2CTX_TYPE_XI;
4541 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4542 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4543 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4544 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07004545 cnic_init_context(dev, tx_cid);
4546 cnic_init_context(dev, tx_cid + 1);
4547
Michael Chana4636962009-06-08 18:14:43 -07004548 offset0 = BNX2_L2CTX_TYPE;
4549 offset1 = BNX2_L2CTX_CMD_TYPE;
4550 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4551 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4552 }
4553 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4554 cnic_ctx_wr(dev, cid_addr, offset0, val);
4555
4556 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4557 cnic_ctx_wr(dev, cid_addr, offset1, val);
4558
Joe Perches43d620c2011-06-16 19:08:06 +00004559 txbd = udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07004560
Michael Chancd801532010-10-13 14:06:49 +00004561 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004562 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004563 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4564 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4565 }
Michael Chancd801532010-10-13 14:06:49 +00004566 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004567 cnic_ctx_wr(dev, cid_addr, offset2, val);
4568 txbd->tx_bd_haddr_hi = val;
4569
Michael Chancd801532010-10-13 14:06:49 +00004570 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004571 cnic_ctx_wr(dev, cid_addr, offset3, val);
4572 txbd->tx_bd_haddr_lo = val;
4573}
4574
4575static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4576{
4577 struct cnic_local *cp = dev->cnic_priv;
4578 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004579 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004580 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4581 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004582 struct bnx2_rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00004583 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00004584 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07004585
4586 sb_id = cp->status_blk_num;
4587 cnic_init_context(dev, 2);
4588 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4589 coal_reg = BNX2_HC_COMMAND;
4590 coal_val = CNIC_RD(dev, coal_reg);
4591 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004592 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004593
4594 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4595 coal_reg = BNX2_HC_COALESCE_NOW;
4596 coal_val = 1 << (11 + sb_id);
4597 }
4598 i = 0;
4599 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4600 CNIC_WR(dev, coal_reg, coal_val);
4601 udelay(10);
4602 i++;
4603 barrier();
4604 }
4605 cp->rx_cons = *cp->rx_cons_ptr;
4606
4607 cid_addr = GET_CID_ADDR(2);
4608 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4609 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4610 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4611
4612 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07004613 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07004614 else
Michael Chand0549382009-10-28 03:41:59 -07004615 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004616 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4617
Michael Chan2bc40782012-12-06 10:33:09 +00004618 rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
4619 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004620 dma_addr_t buf_map;
4621 int n = (i % cp->l2_rx_ring_size) + 1;
4622
Michael Chancd801532010-10-13 14:06:49 +00004623 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07004624 rxbd->rx_bd_len = cp->l2_single_buf_size;
4625 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4626 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4627 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4628 }
Michael Chan2bc40782012-12-06 10:33:09 +00004629 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004630 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4631 rxbd->rx_bd_haddr_hi = val;
4632
Michael Chan2bc40782012-12-06 10:33:09 +00004633 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004634 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4635 rxbd->rx_bd_haddr_lo = val;
4636
4637 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4638 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4639}
4640
4641static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4642{
4643 struct kwqe *wqes[1], l2kwqe;
4644
4645 memset(&l2kwqe, 0, sizeof(l2kwqe));
4646 wqes[0] = &l2kwqe;
Michael Chane1928c82010-12-23 07:43:04 +00004647 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
Michael Chana4636962009-06-08 18:14:43 -07004648 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4649 KWQE_OPCODE_SHIFT) | 2;
4650 dev->submit_kwqes(dev, wqes, 1);
4651}
4652
4653static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4654{
4655 struct cnic_local *cp = dev->cnic_priv;
4656 u32 val;
4657
4658 val = cp->func << 2;
4659
4660 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4661
4662 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4663 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4664 dev->mac_addr[0] = (u8) (val >> 8);
4665 dev->mac_addr[1] = (u8) val;
4666
4667 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4668
4669 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4670 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4671 dev->mac_addr[2] = (u8) (val >> 24);
4672 dev->mac_addr[3] = (u8) (val >> 16);
4673 dev->mac_addr[4] = (u8) (val >> 8);
4674 dev->mac_addr[5] = (u8) val;
4675
4676 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4677
4678 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
Michael Chan4ce45e02012-12-06 10:33:10 +00004679 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004680 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4681
4682 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4683 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4684 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4685}
4686
4687static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4688{
4689 struct cnic_local *cp = dev->cnic_priv;
4690 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00004691 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00004692 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07004693 int err;
4694
4695 cnic_set_bnx2_mac(dev);
4696
4697 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4698 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
Michael Chan2bc40782012-12-06 10:33:09 +00004699 if (BNX2_PAGE_BITS > 12)
Michael Chana4636962009-06-08 18:14:43 -07004700 val |= (12 - 8) << 4;
4701 else
Michael Chan2bc40782012-12-06 10:33:09 +00004702 val |= (BNX2_PAGE_BITS - 8) << 4;
Michael Chana4636962009-06-08 18:14:43 -07004703
4704 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4705
4706 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4707 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4708 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4709
4710 err = cnic_setup_5709_context(dev, 1);
4711 if (err)
4712 return err;
4713
4714 cnic_init_context(dev, KWQ_CID);
4715 cnic_init_context(dev, KCQ_CID);
4716
Michael Chane6c28892010-06-24 14:58:39 +00004717 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07004718 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4719
4720 cp->max_kwq_idx = MAX_KWQ_IDX;
4721 cp->kwq_prod_idx = 0;
4722 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00004723 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07004724
Michael Chan4ce45e02012-12-06 10:33:10 +00004725 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
Michael Chana4636962009-06-08 18:14:43 -07004726 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4727 else
4728 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4729
4730 /* Initialize the kernel work queue context. */
4731 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chan2bc40782012-12-06 10:33:09 +00004732 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004733 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004734
Michael Chan2bc40782012-12-06 10:33:09 +00004735 val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004736 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004737
Michael Chan2bc40782012-12-06 10:33:09 +00004738 val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004739 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004740
4741 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00004742 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004743
4744 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00004745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004746
Michael Chane6c28892010-06-24 14:58:39 +00004747 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4748 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07004749
Michael Chane6c28892010-06-24 14:58:39 +00004750 cp->kcq1.sw_prod_idx = 0;
4751 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004752 &sblk->status_completion_producer_index;
Michael Chane6c28892010-06-24 14:58:39 +00004753
Joe Perches64699332012-06-04 12:44:16 +00004754 cp->kcq1.status_idx_ptr = &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004755
4756 /* Initialize the kernel complete queue context. */
4757 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chan2bc40782012-12-06 10:33:09 +00004758 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004759 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004760
Michael Chan2bc40782012-12-06 10:33:09 +00004761 val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004762 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004763
Michael Chan2bc40782012-12-06 10:33:09 +00004764 val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004765 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004766
Michael Chane6c28892010-06-24 14:58:39 +00004767 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004769
Michael Chane6c28892010-06-24 14:58:39 +00004770 val = (u32) cp->kcq1.dma.pgtbl_map;
4771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004772
4773 cp->int_num = 0;
4774 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004775 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004776 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004777 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004778
Michael Chane6c28892010-06-24 14:58:39 +00004779 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004780 &msblk->status_completion_producer_index;
4781 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4782 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004783 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004784 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004786 }
4787
4788 /* Enable Commnad Scheduler notification when we write to the
4789 * host producer index of the kernel contexts. */
4790 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4791
4792 /* Enable Command Scheduler notification when we write to either
4793 * the Send Queue or Receive Queue producer indexes of the kernel
4794 * bypass contexts. */
4795 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4796 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4797
4798 /* Notify COM when the driver post an application buffer. */
4799 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4800
4801 /* Set the CP and COM doorbells. These two processors polls the
4802 * doorbell for a non zero value before running. This must be done
4803 * after setting up the kernel queue contexts. */
4804 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4805 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4806
4807 cnic_init_bnx2_tx_ring(dev);
4808 cnic_init_bnx2_rx_ring(dev);
4809
4810 err = cnic_init_bnx2_irq(dev);
4811 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004812 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004813 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4814 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4815 return err;
4816 }
4817
4818 return 0;
4819}
4820
Michael Chan71034ba2009-10-10 13:46:59 +00004821static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4822{
4823 struct cnic_local *cp = dev->cnic_priv;
4824 struct cnic_eth_dev *ethdev = cp->ethdev;
4825 u32 start_offset = ethdev->ctx_tbl_offset;
4826 int i;
4827
4828 for (i = 0; i < cp->ctx_blks; i++) {
4829 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4830 dma_addr_t map = ctx->mapping;
4831
4832 if (cp->ctx_align) {
4833 unsigned long mask = cp->ctx_align - 1;
4834
4835 map = (map + mask) & ~mask;
4836 }
4837
4838 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4839 }
4840}
4841
4842static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4843{
4844 struct cnic_local *cp = dev->cnic_priv;
4845 struct cnic_eth_dev *ethdev = cp->ethdev;
4846 int err = 0;
4847
Joe Perches164165d2009-11-19 09:30:10 +00004848 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004849 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004850 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4851 err = cnic_request_irq(dev);
4852
Michael Chan71034ba2009-10-10 13:46:59 +00004853 return err;
4854}
4855
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004856static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4857 u16 sb_id, u8 sb_index,
4858 u8 disable)
4859{
Michael Chan68c64d22012-12-06 10:33:11 +00004860 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004861
4862 u32 addr = BAR_CSTRORM_INTMEM +
4863 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4864 offsetof(struct hc_status_block_data_e1x, index_data) +
4865 sizeof(struct hc_index_data)*sb_index +
4866 offsetof(struct hc_index_data, flags);
4867 u16 flags = CNIC_RD16(dev, addr);
4868 /* clear and set */
4869 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4870 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4871 HC_INDEX_DATA_HC_ENABLED);
4872 CNIC_WR16(dev, addr, flags);
4873}
4874
Michael Chan71034ba2009-10-10 13:46:59 +00004875static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4876{
4877 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004878 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00004879 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004880
4881 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004882 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4883 offsetof(struct hc_status_block_data_e1x, index_data) +
4884 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004885 offsetof(struct hc_index_data, timeout), 64 / 4);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004886 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004887}
4888
4889static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4890{
4891}
4892
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004893static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4894 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004895{
4896 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004897 struct cnic_uio_dev *udev = cp->udev;
4898 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4899 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004900 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004901 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004902 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004903 u32 val;
4904
Michael Chan2bc40782012-12-06 10:33:09 +00004905 memset(txbd, 0, BNX2_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004906
Michael Chancd801532010-10-13 14:06:49 +00004907 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004908 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
Michael Chan71034ba2009-10-10 13:46:59 +00004909 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004910 struct eth_tx_parse_bd_e1x *pbd_e1x =
4911 &((txbd + 1)->parse_bd_e1x);
4912 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
Michael Chan71034ba2009-10-10 13:46:59 +00004913 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4914
4915 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4916 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4917 reg_bd->addr_hi = start_bd->addr_hi;
4918 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4919 start_bd->nbytes = cpu_to_le16(0x10);
4920 start_bd->nbd = cpu_to_le16(3);
4921 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004922 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
Michael Chan71034ba2009-10-10 13:46:59 +00004923 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4924
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004925 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
4926 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
Michael Chan4ce45e02012-12-06 10:33:10 +00004927 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004928 else
Michael Chan4ce45e02012-12-06 10:33:10 +00004929 pbd_e1x->global_data = (UNICAST_ADDRESS <<
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004930 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00004931 }
Michael Chan71034ba2009-10-10 13:46:59 +00004932
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004933 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004934 txbd->next_bd.addr_hi = cpu_to_le32(val);
4935
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004936 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004937
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004938 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004939 txbd->next_bd.addr_lo = cpu_to_le32(val);
4940
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004941 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004942
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004943 /* Other ramrod params */
4944 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4945 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004946
4947 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004948 if (cli < MAX_STAT_COUNTER_ID) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004949 data->general.statistics_zero_flg = 1;
4950 data->general.statistics_en_flg = 1;
4951 data->general.statistics_counter_id = cli;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004952 }
Michael Chan71034ba2009-10-10 13:46:59 +00004953
4954 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004955 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004956}
4957
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004958static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4959 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004960{
4961 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004962 struct cnic_uio_dev *udev = cp->udev;
4963 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chan2bc40782012-12-06 10:33:09 +00004964 BNX2_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004965 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chan2bc40782012-12-06 10:33:09 +00004966 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004967 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004968 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004969 u32 cli = cp->ethdev->iscsi_l2_client_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004970 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004971 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004972 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004973
4974 /* General data */
4975 data->general.client_id = cli;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004976 data->general.activate_flg = 1;
4977 data->general.sp_client_id = cli;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004978 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4979 data->general.func_id = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004980
4981 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4982 dma_addr_t buf_map;
4983 int n = (i % cp->l2_rx_ring_size) + 1;
4984
Michael Chancd801532010-10-13 14:06:49 +00004985 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004986 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4987 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4988 }
Michael Chan71034ba2009-10-10 13:46:59 +00004989
Michael Chan2bc40782012-12-06 10:33:09 +00004990 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004991 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004992 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004993
Michael Chan2bc40782012-12-06 10:33:09 +00004994 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004995 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004996 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004997
4998 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Michael Chan2bc40782012-12-06 10:33:09 +00004999 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005000 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005001 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005002
Michael Chan2bc40782012-12-06 10:33:09 +00005003 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005004 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005005 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005006
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005007 /* Other ramrod params */
5008 data->rx.client_qzone_id = cl_qzone_id;
5009 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5010 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00005011
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005012 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00005013
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005014 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005015 data->rx.outer_vlan_removal_enable_flg = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005016 data->rx.silent_vlan_removal_flg = 1;
5017 data->rx.silent_vlan_value = 0;
5018 data->rx.silent_vlan_mask = 0xffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005019
5020 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005021 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00005022 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005023}
5024
Michael Chane21ba412010-12-23 07:43:03 +00005025static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5026{
5027 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005028 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane21ba412010-12-23 07:43:03 +00005029 u32 pfid = cp->pfid;
5030
5031 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5032 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5033 cp->kcq1.sw_prod_idx = 0;
5034
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005035 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
Michael Chane21ba412010-12-23 07:43:03 +00005036 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5037
5038 cp->kcq1.hw_prod_idx_ptr =
5039 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5040 cp->kcq1.status_idx_ptr =
5041 &sb->sb.running_index[SM_RX_ID];
5042 } else {
5043 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5044
5045 cp->kcq1.hw_prod_idx_ptr =
5046 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5047 cp->kcq1.status_idx_ptr =
5048 &sb->sb.running_index[SM_RX_ID];
5049 }
5050
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005051 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
Michael Chane21ba412010-12-23 07:43:03 +00005052 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5053
5054 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5055 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5056 cp->kcq2.sw_prod_idx = 0;
5057 cp->kcq2.hw_prod_idx_ptr =
5058 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5059 cp->kcq2.status_idx_ptr =
5060 &sb->sb.running_index[SM_RX_ID];
5061 }
5062}
5063
Michael Chan71034ba2009-10-10 13:46:59 +00005064static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5065{
5066 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005067 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005068 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan68c64d22012-12-06 10:33:11 +00005069 int func, ret;
Michael Chan14203982010-10-06 03:16:06 +00005070 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00005071
Michael Chana9e0a4f2012-01-04 12:12:27 +00005072 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
Michael Chan68c64d22012-12-06 10:33:11 +00005073 cp->port_mode = bp->common.chip_port_mode;
5074 cp->pfid = bp->pfid;
5075 cp->func = bp->pf_num;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005076
Michael Chan68c64d22012-12-06 10:33:11 +00005077 func = CNIC_FUNC(cp);
Michael Chan14203982010-10-06 03:16:06 +00005078 pfid = cp->pfid;
5079
Michael Chan71034ba2009-10-10 13:46:59 +00005080 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005081 cp->iscsi_start_cid, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005082
5083 if (ret)
5084 return -ENOMEM;
5085
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005086 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
Michael Chandc219a22011-08-26 09:45:39 +00005087 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005088 cp->fcoe_start_cid, 0);
Michael Chane1928c82010-12-23 07:43:04 +00005089
5090 if (ret)
5091 return -ENOMEM;
5092 }
5093
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005094 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5095
Michael Chane21ba412010-12-23 07:43:03 +00005096 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005097
Michael Chan71034ba2009-10-10 13:46:59 +00005098 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00005099 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00005100 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005101 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005102 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005103 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005104 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005105 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005106 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005107 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005108 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005109 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005110 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005111 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005112 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005113 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005114 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005115 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00005116 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005117 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00005118 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005119 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005120 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00005121
Michael Chan71034ba2009-10-10 13:46:59 +00005122 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005123 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00005124 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5125 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005126 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00005127 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5128
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005129 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5130 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5131
Michael Chan71034ba2009-10-10 13:46:59 +00005132 cnic_setup_bnx2x_context(dev);
5133
Michael Chan71034ba2009-10-10 13:46:59 +00005134 ret = cnic_init_bnx2x_irq(dev);
5135 if (ret)
5136 return ret;
5137
Michael Chan71034ba2009-10-10 13:46:59 +00005138 return 0;
5139}
5140
Michael Chan86b53602009-10-10 13:46:57 +00005141static void cnic_init_rings(struct cnic_dev *dev)
5142{
Michael Chan541a7812010-10-06 03:17:22 +00005143 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005144 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00005145 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00005146
5147 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5148 return;
5149
Michael Chan86b53602009-10-10 13:46:57 +00005150 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5151 cnic_init_bnx2_tx_ring(dev);
5152 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00005153 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00005154 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005155 u32 cli = cp->ethdev->iscsi_l2_client_id;
5156 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan68d7c1a2011-01-05 15:14:13 +00005157 u32 cl_qzone_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005158 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00005159 union l5cm_specific_data l5_data;
5160 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chane1dd8832011-07-13 17:24:19 +00005161 u32 off, i, *cid_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005162
5163 rx_prods.bd_prod = 0;
5164 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5165 barrier();
5166
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005167 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
5168
Michael Chanc7596b72009-12-02 15:15:35 +00005169 off = BAR_USTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005170 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
Michael Chanee87a822010-10-13 14:06:51 +00005171 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5172 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00005173
5174 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00005175 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00005176
Michael Chan48f753d2010-05-18 11:32:53 +00005177 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5178
Michael Chancd801532010-10-13 14:06:49 +00005179 data = udev->l2_buf;
Michael Chane1dd8832011-07-13 17:24:19 +00005180 cid_ptr = udev->l2_buf + 12;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005181
5182 memset(data, 0, sizeof(*data));
5183
5184 cnic_init_bnx2x_tx_ring(dev, data);
5185 cnic_init_bnx2x_rx_ring(dev, data);
5186
Michael Chancd801532010-10-13 14:06:49 +00005187 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5188 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005189
Michael Chan541a7812010-10-06 03:17:22 +00005190 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5191
Michael Chan71034ba2009-10-10 13:46:59 +00005192 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005193 cid, ETH_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005194
Michael Chan48f753d2010-05-18 11:32:53 +00005195 i = 0;
5196 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5197 ++i < 10)
5198 msleep(1);
5199
5200 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5201 netdev_err(dev->netdev,
5202 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005203 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00005204 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chane1dd8832011-07-13 17:24:19 +00005205 *cid_ptr = cid;
Michael Chan86b53602009-10-10 13:46:57 +00005206 }
5207}
5208
5209static void cnic_shutdown_rings(struct cnic_dev *dev)
5210{
Michael Chan541a7812010-10-06 03:17:22 +00005211 struct cnic_local *cp = dev->cnic_priv;
Michael Chane1dd8832011-07-13 17:24:19 +00005212 struct cnic_uio_dev *udev = cp->udev;
5213 void *rx_ring;
Michael Chan541a7812010-10-06 03:17:22 +00005214
5215 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5216 return;
5217
Michael Chan86b53602009-10-10 13:46:57 +00005218 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5219 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005220 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005221 u32 cli = cp->ethdev->iscsi_l2_client_id;
5222 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00005223 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00005224 int i;
Michael Chan71034ba2009-10-10 13:46:59 +00005225
Michael Chan5159fdc2010-12-23 07:42:59 +00005226 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00005227
Michael Chan48f753d2010-05-18 11:32:53 +00005228 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5229
Michael Chan8b065b62009-12-02 15:15:36 +00005230 l5_data.phy_address.lo = cli;
5231 l5_data.phy_address.hi = 0;
5232 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00005233 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00005234 i = 0;
5235 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5236 ++i < 10)
5237 msleep(1);
5238
5239 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5240 netdev_err(dev->netdev,
5241 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005242 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00005243
5244 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005245 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005246 cid, NONE_CONNECTION_TYPE, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00005247 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00005248 }
Michael Chan541a7812010-10-06 03:17:22 +00005249 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan2bc40782012-12-06 10:33:09 +00005250 rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
5251 memset(rx_ring, 0, BNX2_PAGE_SIZE);
Michael Chan86b53602009-10-10 13:46:57 +00005252}
5253
Michael Chana3059b12009-08-14 15:49:44 +00005254static int cnic_register_netdev(struct cnic_dev *dev)
5255{
5256 struct cnic_local *cp = dev->cnic_priv;
5257 struct cnic_eth_dev *ethdev = cp->ethdev;
5258 int err;
5259
5260 if (!ethdev)
5261 return -ENODEV;
5262
5263 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5264 return 0;
5265
5266 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5267 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00005268 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00005269
5270 return err;
5271}
5272
5273static void cnic_unregister_netdev(struct cnic_dev *dev)
5274{
5275 struct cnic_local *cp = dev->cnic_priv;
5276 struct cnic_eth_dev *ethdev = cp->ethdev;
5277
5278 if (!ethdev)
5279 return;
5280
5281 ethdev->drv_unregister_cnic(dev->netdev);
5282}
5283
Michael Chana4636962009-06-08 18:14:43 -07005284static int cnic_start_hw(struct cnic_dev *dev)
5285{
5286 struct cnic_local *cp = dev->cnic_priv;
5287 struct cnic_eth_dev *ethdev = cp->ethdev;
5288 int err;
5289
5290 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5291 return -EALREADY;
5292
Michael Chana4636962009-06-08 18:14:43 -07005293 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07005294 pci_dev_get(dev->pcidev);
5295 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00005296 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07005297 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5298
5299 err = cp->alloc_resc(dev);
5300 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005301 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005302 goto err1;
5303 }
5304
5305 err = cp->start_hw(dev);
5306 if (err)
5307 goto err1;
5308
5309 err = cnic_cm_open(dev);
5310 if (err)
5311 goto err1;
5312
5313 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5314
5315 cp->enable_int(dev);
5316
5317 return 0;
5318
5319err1:
Michael Chana4636962009-06-08 18:14:43 -07005320 cp->free_resc(dev);
5321 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07005322 return err;
5323}
5324
5325static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5326{
Michael Chana4636962009-06-08 18:14:43 -07005327 cnic_disable_bnx2_int_sync(dev);
5328
5329 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5330 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5331
5332 cnic_init_context(dev, KWQ_CID);
5333 cnic_init_context(dev, KCQ_CID);
5334
5335 cnic_setup_5709_context(dev, 0);
5336 cnic_free_irq(dev);
5337
Michael Chana4636962009-06-08 18:14:43 -07005338 cnic_free_resc(dev);
5339}
5340
Michael Chan71034ba2009-10-10 13:46:59 +00005341
5342static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5343{
5344 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005345 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancaa9e932012-12-05 10:10:14 +00005346 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5347 u32 sb_id = cp->status_blk_num;
5348 u32 idx_off, syn_off;
Michael Chan71034ba2009-10-10 13:46:59 +00005349
5350 cnic_free_irq(dev);
Michael Chancaa9e932012-12-05 10:10:14 +00005351
5352 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5353 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5354 (hc_index * sizeof(u16));
5355
5356 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5357 } else {
5358 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5359 (hc_index * sizeof(u16));
5360
5361 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5362 }
5363 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5364 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5365 idx_off, 0);
5366
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005367 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00005368 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005369 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00005370 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005371 cnic_free_resc(dev);
5372}
5373
Michael Chana4636962009-06-08 18:14:43 -07005374static void cnic_stop_hw(struct cnic_dev *dev)
5375{
5376 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5377 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00005378 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07005379
Michael Chan48f753d2010-05-18 11:32:53 +00005380 /* Need to wait for the ring shutdown event to complete
5381 * before clearing the CNIC_UP flag.
5382 */
Michael Chan82346a72012-09-08 06:01:05 +00005383 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00005384 msleep(100);
5385 i++;
5386 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005387 cnic_shutdown_rings(dev);
Michael Chana2028b232012-06-27 15:08:19 +00005388 cp->stop_cm(dev);
Michael Chana4636962009-06-08 18:14:43 -07005389 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
Eric Dumazet2cfa5a02011-11-23 07:09:32 +00005390 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
Michael Chana4636962009-06-08 18:14:43 -07005391 synchronize_rcu();
5392 cnic_cm_shutdown(dev);
5393 cp->stop_hw(dev);
5394 pci_dev_put(dev->pcidev);
5395 }
5396}
5397
5398static void cnic_free_dev(struct cnic_dev *dev)
5399{
5400 int i = 0;
5401
5402 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5403 msleep(100);
5404 i++;
5405 }
5406 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00005407 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07005408
Joe Perchesddf79b22010-02-17 15:01:54 +00005409 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005410 dev_put(dev->netdev);
5411 kfree(dev);
5412}
5413
5414static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5415 struct pci_dev *pdev)
5416{
5417 struct cnic_dev *cdev;
5418 struct cnic_local *cp;
5419 int alloc_size;
5420
5421 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5422
5423 cdev = kzalloc(alloc_size , GFP_KERNEL);
5424 if (cdev == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005425 netdev_err(dev, "allocate dev struct failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005426 return NULL;
5427 }
5428
5429 cdev->netdev = dev;
5430 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5431 cdev->register_device = cnic_register_device;
5432 cdev->unregister_device = cnic_unregister_device;
5433 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5434
5435 cp = cdev->cnic_priv;
5436 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07005437 cp->l2_single_buf_size = 0x400;
5438 cp->l2_rx_ring_size = 3;
5439
5440 spin_lock_init(&cp->cnic_ulp_lock);
5441
Joe Perchesddf79b22010-02-17 15:01:54 +00005442 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005443
5444 return cdev;
5445}
5446
5447static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5448{
5449 struct pci_dev *pdev;
5450 struct cnic_dev *cdev;
5451 struct cnic_local *cp;
5452 struct cnic_eth_dev *ethdev = NULL;
Michael Chane2ee3612009-06-13 17:43:02 -07005453 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
Michael Chana4636962009-06-08 18:14:43 -07005454
Michael Chane2ee3612009-06-13 17:43:02 -07005455 probe = symbol_get(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07005456 if (probe) {
5457 ethdev = (*probe)(dev);
Michael Chan64c64602009-08-14 15:49:43 +00005458 symbol_put(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07005459 }
5460 if (!ethdev)
5461 return NULL;
5462
5463 pdev = ethdev->pdev;
5464 if (!pdev)
5465 return NULL;
5466
5467 dev_hold(dev);
5468 pci_dev_get(pdev);
Sergei Shtylyovff938e42011-02-28 11:57:33 -08005469 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5470 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5471 (pdev->revision < 0x10)) {
5472 pci_dev_put(pdev);
5473 goto cnic_err;
Michael Chana4636962009-06-08 18:14:43 -07005474 }
5475 pci_dev_put(pdev);
5476
5477 cdev = cnic_alloc_dev(dev, pdev);
5478 if (cdev == NULL)
5479 goto cnic_err;
5480
5481 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5482 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5483
5484 cp = cdev->cnic_priv;
5485 cp->ethdev = ethdev;
5486 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005487 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07005488
Michael Chan7625eb22011-06-08 19:29:36 +00005489 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5490
Michael Chana4636962009-06-08 18:14:43 -07005491 cp->cnic_ops = &cnic_bnx2_ops;
5492 cp->start_hw = cnic_start_bnx2_hw;
5493 cp->stop_hw = cnic_stop_bnx2_hw;
5494 cp->setup_pgtbl = cnic_setup_page_tbl;
5495 cp->alloc_resc = cnic_alloc_bnx2_resc;
5496 cp->free_resc = cnic_free_resc;
5497 cp->start_cm = cnic_cm_init_bnx2_hw;
5498 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5499 cp->enable_int = cnic_enable_bnx2_int;
5500 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5501 cp->close_conn = cnic_close_bnx2_conn;
Michael Chana4636962009-06-08 18:14:43 -07005502 return cdev;
5503
5504cnic_err:
5505 dev_put(dev);
5506 return NULL;
5507}
5508
Michael Chan71034ba2009-10-10 13:46:59 +00005509static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5510{
5511 struct pci_dev *pdev;
5512 struct cnic_dev *cdev;
5513 struct cnic_local *cp;
5514 struct cnic_eth_dev *ethdev = NULL;
5515 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5516
5517 probe = symbol_get(bnx2x_cnic_probe);
5518 if (probe) {
5519 ethdev = (*probe)(dev);
5520 symbol_put(bnx2x_cnic_probe);
5521 }
5522 if (!ethdev)
5523 return NULL;
5524
5525 pdev = ethdev->pdev;
5526 if (!pdev)
5527 return NULL;
5528
5529 dev_hold(dev);
5530 cdev = cnic_alloc_dev(dev, pdev);
5531 if (cdev == NULL) {
5532 dev_put(dev);
5533 return NULL;
5534 }
5535
5536 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5537 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5538
5539 cp = cdev->cnic_priv;
5540 cp->ethdev = ethdev;
5541 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005542 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00005543
Barak Witkowski1d187b32011-12-05 22:41:50 +00005544 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5545
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005546 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5547 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
Michael Chan51a8f542012-09-08 06:01:04 +00005548 if (CNIC_SUPPORTS_FCOE(cp))
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005549 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5550
Michael Chandc219a22011-08-26 09:45:39 +00005551 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5552 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5553
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005554 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5555
Michael Chan71034ba2009-10-10 13:46:59 +00005556 cp->cnic_ops = &cnic_bnx2x_ops;
5557 cp->start_hw = cnic_start_bnx2x_hw;
5558 cp->stop_hw = cnic_stop_bnx2x_hw;
5559 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5560 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5561 cp->free_resc = cnic_free_resc;
5562 cp->start_cm = cnic_cm_init_bnx2x_hw;
5563 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5564 cp->enable_int = cnic_enable_bnx2x_int;
5565 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chan8cc0e022012-09-08 06:01:03 +00005566 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
Michael Chanee87a822010-10-13 14:06:51 +00005567 cp->ack_int = cnic_ack_bnx2x_e2_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005568 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5569 } else {
Michael Chanee87a822010-10-13 14:06:51 +00005570 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005571 cp->arm_int = cnic_arm_bnx2x_msix;
5572 }
Michael Chan71034ba2009-10-10 13:46:59 +00005573 cp->close_conn = cnic_close_bnx2x_conn;
Michael Chan71034ba2009-10-10 13:46:59 +00005574 return cdev;
5575}
5576
Michael Chana4636962009-06-08 18:14:43 -07005577static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5578{
5579 struct ethtool_drvinfo drvinfo;
5580 struct cnic_dev *cdev = NULL;
5581
5582 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5583 memset(&drvinfo, 0, sizeof(drvinfo));
5584 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5585
5586 if (!strcmp(drvinfo.driver, "bnx2"))
5587 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005588 if (!strcmp(drvinfo.driver, "bnx2x"))
5589 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07005590 if (cdev) {
5591 write_lock(&cnic_dev_lock);
5592 list_add(&cdev->list, &cnic_dev_list);
5593 write_unlock(&cnic_dev_lock);
5594 }
5595 }
5596 return cdev;
5597}
5598
Michael Chan415199f2011-07-20 14:55:24 +00005599static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5600 u16 vlan_id)
5601{
5602 int if_type;
5603
5604 rcu_read_lock();
5605 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5606 struct cnic_ulp_ops *ulp_ops;
5607 void *ctx;
5608
5609 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5610 if (!ulp_ops || !ulp_ops->indicate_netevent)
5611 continue;
5612
5613 ctx = cp->ulp_handle[if_type];
5614
5615 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5616 }
5617 rcu_read_unlock();
5618}
5619
Ben Hutchings1aa8b472012-07-10 10:56:59 +00005620/* netdev event handler */
Michael Chana4636962009-06-08 18:14:43 -07005621static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5622 void *ptr)
5623{
5624 struct net_device *netdev = ptr;
5625 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -07005626 int new_dev = 0;
5627
5628 dev = cnic_from_netdev(netdev);
5629
Michael Chandb1d3502011-06-08 19:29:35 +00005630 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
Michael Chana4636962009-06-08 18:14:43 -07005631 /* Check for the hot-plug device */
5632 dev = is_cnic_dev(netdev);
5633 if (dev) {
5634 new_dev = 1;
5635 cnic_hold(dev);
5636 }
5637 }
5638 if (dev) {
5639 struct cnic_local *cp = dev->cnic_priv;
5640
5641 if (new_dev)
5642 cnic_ulp_init(dev);
5643 else if (event == NETDEV_UNREGISTER)
5644 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07005645
Michael Chandb1d3502011-06-08 19:29:35 +00005646 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
Michael Chana3059b12009-08-14 15:49:44 +00005647 if (cnic_register_netdev(dev) != 0) {
5648 cnic_put(dev);
5649 goto done;
5650 }
Michael Chana4636962009-06-08 18:14:43 -07005651 if (!cnic_start_hw(dev))
5652 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07005653 }
5654
Michael Chan415199f2011-07-20 14:55:24 +00005655 cnic_rcv_netevent(cp, event, 0);
Michael Chana4636962009-06-08 18:14:43 -07005656
5657 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005658 cnic_ulp_stop(dev);
5659 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005660 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005661 } else if (event == NETDEV_UNREGISTER) {
5662 write_lock(&cnic_dev_lock);
5663 list_del_init(&dev->list);
5664 write_unlock(&cnic_dev_lock);
5665
5666 cnic_put(dev);
5667 cnic_free_dev(dev);
5668 goto done;
5669 }
5670 cnic_put(dev);
Michael Chan415199f2011-07-20 14:55:24 +00005671 } else {
5672 struct net_device *realdev;
5673 u16 vid;
5674
5675 vid = cnic_get_vlan(netdev, &realdev);
5676 if (realdev) {
5677 dev = cnic_from_netdev(realdev);
5678 if (dev) {
5679 vid |= VLAN_TAG_PRESENT;
5680 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5681 cnic_put(dev);
5682 }
5683 }
Michael Chana4636962009-06-08 18:14:43 -07005684 }
5685done:
5686 return NOTIFY_DONE;
5687}
5688
5689static struct notifier_block cnic_netdev_notifier = {
5690 .notifier_call = cnic_netdev_event
5691};
5692
5693static void cnic_release(void)
5694{
5695 struct cnic_dev *dev;
Michael Chana3ceeeb2010-10-13 14:06:50 +00005696 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005697
5698 while (!list_empty(&cnic_dev_list)) {
5699 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5700 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5701 cnic_ulp_stop(dev);
5702 cnic_stop_hw(dev);
5703 }
5704
5705 cnic_ulp_exit(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005706 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005707 list_del_init(&dev->list);
5708 cnic_free_dev(dev);
5709 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005710 while (!list_empty(&cnic_udev_list)) {
5711 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5712 list);
5713 cnic_free_uio(udev);
5714 }
Michael Chana4636962009-06-08 18:14:43 -07005715}
5716
5717static int __init cnic_init(void)
5718{
5719 int rc = 0;
5720
Joe Perchesddf79b22010-02-17 15:01:54 +00005721 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005722
5723 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5724 if (rc) {
5725 cnic_release();
5726 return rc;
5727 }
5728
Michael Chanfdf24082010-10-13 14:06:47 +00005729 cnic_wq = create_singlethread_workqueue("cnic_wq");
5730 if (!cnic_wq) {
5731 cnic_release();
5732 unregister_netdevice_notifier(&cnic_netdev_notifier);
5733 return -ENOMEM;
5734 }
5735
Michael Chana4636962009-06-08 18:14:43 -07005736 return 0;
5737}
5738
5739static void __exit cnic_exit(void)
5740{
5741 unregister_netdevice_notifier(&cnic_netdev_notifier);
5742 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005743 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005744}
5745
5746module_init(cnic_init);
5747module_exit(cnic_exit);