blob: 3dadc81a3b3c3fcd5b3b36e379939ad8dd016d61 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chanca67a3c2013-07-28 19:04:00 -07003 * Copyright (c) 2006-2013 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040030#include <linux/prefetch.h>
Michael Chan973e5742011-07-13 17:24:17 +000031#include <linux/random.h>
Michael Chana4636962009-06-08 18:14:43 -070032#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33#define BCM_VLAN 1
34#endif
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/route.h>
38#include <net/ipv6.h>
39#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070040#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070041#include <scsi/iscsi_if.h>
42
Michael Chan4bd9b0ff2012-12-06 10:33:12 +000043#define BCM_CNIC 1
Michael Chana4636962009-06-08 18:14:43 -070044#include "cnic_if.h"
45#include "bnx2.h"
Michael Chan68c64d22012-12-06 10:33:11 +000046#include "bnx2x/bnx2x.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000047#include "bnx2x/bnx2x_reg.h"
48#include "bnx2x/bnx2x_fw_defs.h"
49#include "bnx2x/bnx2x_hsi.h"
Jeff Kirsheradfc5212011-04-07 06:03:04 -070050#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
51#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chan8ec3e702012-03-21 15:38:34 +000052#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
Michael Chana4636962009-06-08 18:14:43 -070053#include "cnic.h"
54#include "cnic_defs.h"
55
Michael Chan68c64d22012-12-06 10:33:11 +000056#define CNIC_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070057
Bill Pemberton047fc562012-12-03 09:24:23 -050058static char version[] =
Michael Chan68c64d22012-12-06 10:33:11 +000059 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
Michael Chana4636962009-06-08 18:14:43 -070060
61MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
62 "Chen (zongxi@broadcom.com");
63MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
64MODULE_LICENSE("GPL");
65MODULE_VERSION(CNIC_MODULE_VERSION);
66
Michael Chan8adc92402010-12-23 07:42:57 +000067/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070068static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000069static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070070static DEFINE_RWLOCK(cnic_dev_lock);
71static DEFINE_MUTEX(cnic_lock);
72
Eric Dumazet13707f92011-01-26 19:28:23 +000073static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
74
75/* helper function, assuming cnic_lock is held */
76static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
77{
78 return rcu_dereference_protected(cnic_ulp_tbl[type],
79 lockdep_is_held(&cnic_lock));
80}
Michael Chana4636962009-06-08 18:14:43 -070081
82static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000083static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070084static int cnic_ctl(void *, struct cnic_ctl_info *);
85
86static struct cnic_ops cnic_bnx2_ops = {
87 .cnic_owner = THIS_MODULE,
88 .cnic_handler = cnic_service_bnx2,
89 .cnic_ctl = cnic_ctl,
90};
91
Michael Chan71034ba2009-10-10 13:46:59 +000092static struct cnic_ops cnic_bnx2x_ops = {
93 .cnic_owner = THIS_MODULE,
94 .cnic_handler = cnic_service_bnx2x,
95 .cnic_ctl = cnic_ctl,
96};
97
Michael Chanfdf24082010-10-13 14:06:47 +000098static struct workqueue_struct *cnic_wq;
99
Michael Chan86b53602009-10-10 13:46:57 +0000100static void cnic_shutdown_rings(struct cnic_dev *);
101static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -0700102static int cnic_cm_set_pg(struct cnic_sock *);
103
104static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
105{
Michael Chancd801532010-10-13 14:06:49 +0000106 struct cnic_uio_dev *udev = uinfo->priv;
107 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -0700108
109 if (!capable(CAP_NET_ADMIN))
110 return -EPERM;
111
Michael Chancd801532010-10-13 14:06:49 +0000112 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700113 return -EBUSY;
114
Michael Chan86b53602009-10-10 13:46:57 +0000115 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000116 dev = udev->dev;
117
Michael Chana3ceeeb2010-10-13 14:06:50 +0000118 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000119 rtnl_unlock();
120 return -ENODEV;
121 }
122
Michael Chancd801532010-10-13 14:06:49 +0000123 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700124
Michael Chana3ceeeb2010-10-13 14:06:50 +0000125 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000126 cnic_init_rings(dev);
127 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700128
129 return 0;
130}
131
132static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
133{
Michael Chancd801532010-10-13 14:06:49 +0000134 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000135
Michael Chancd801532010-10-13 14:06:49 +0000136 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700137 return 0;
138}
139
140static inline void cnic_hold(struct cnic_dev *dev)
141{
142 atomic_inc(&dev->ref_count);
143}
144
145static inline void cnic_put(struct cnic_dev *dev)
146{
147 atomic_dec(&dev->ref_count);
148}
149
150static inline void csk_hold(struct cnic_sock *csk)
151{
152 atomic_inc(&csk->ref_count);
153}
154
155static inline void csk_put(struct cnic_sock *csk)
156{
157 atomic_dec(&csk->ref_count);
158}
159
160static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
161{
162 struct cnic_dev *cdev;
163
164 read_lock(&cnic_dev_lock);
165 list_for_each_entry(cdev, &cnic_dev_list, list) {
166 if (netdev == cdev->netdev) {
167 cnic_hold(cdev);
168 read_unlock(&cnic_dev_lock);
169 return cdev;
170 }
171 }
172 read_unlock(&cnic_dev_lock);
173 return NULL;
174}
175
Michael Chan7fc1ece2009-08-14 15:49:47 +0000176static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
177{
178 atomic_inc(&ulp_ops->ref_count);
179}
180
181static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
182{
183 atomic_dec(&ulp_ops->ref_count);
184}
185
Michael Chana4636962009-06-08 18:14:43 -0700186static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191 struct drv_ctl_io *io = &info.data.io;
192
193 info.cmd = DRV_CTL_CTX_WR_CMD;
194 io->cid_addr = cid_addr;
195 io->offset = off;
196 io->data = val;
197 ethdev->drv_ctl(dev->netdev, &info);
198}
199
Michael Chan71034ba2009-10-10 13:46:59 +0000200static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
201{
202 struct cnic_local *cp = dev->cnic_priv;
203 struct cnic_eth_dev *ethdev = cp->ethdev;
204 struct drv_ctl_info info;
205 struct drv_ctl_io *io = &info.data.io;
206
207 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
208 io->offset = off;
209 io->dma_addr = addr;
210 ethdev->drv_ctl(dev->netdev, &info);
211}
212
213static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
214{
215 struct cnic_local *cp = dev->cnic_priv;
216 struct cnic_eth_dev *ethdev = cp->ethdev;
217 struct drv_ctl_info info;
218 struct drv_ctl_l2_ring *ring = &info.data.ring;
219
220 if (start)
221 info.cmd = DRV_CTL_START_L2_CMD;
222 else
223 info.cmd = DRV_CTL_STOP_L2_CMD;
224
225 ring->cid = cid;
226 ring->client_id = cl_id;
227 ethdev->drv_ctl(dev->netdev, &info);
228}
229
Michael Chana4636962009-06-08 18:14:43 -0700230static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
231{
232 struct cnic_local *cp = dev->cnic_priv;
233 struct cnic_eth_dev *ethdev = cp->ethdev;
234 struct drv_ctl_info info;
235 struct drv_ctl_io *io = &info.data.io;
236
237 info.cmd = DRV_CTL_IO_WR_CMD;
238 io->offset = off;
239 io->data = val;
240 ethdev->drv_ctl(dev->netdev, &info);
241}
242
243static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
244{
245 struct cnic_local *cp = dev->cnic_priv;
246 struct cnic_eth_dev *ethdev = cp->ethdev;
247 struct drv_ctl_info info;
248 struct drv_ctl_io *io = &info.data.io;
249
250 info.cmd = DRV_CTL_IO_RD_CMD;
251 io->offset = off;
252 ethdev->drv_ctl(dev->netdev, &info);
253 return io->data;
254}
255
Barak Witkowski1d187b32011-12-05 22:41:50 +0000256static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
257{
258 struct cnic_local *cp = dev->cnic_priv;
259 struct cnic_eth_dev *ethdev = cp->ethdev;
260 struct drv_ctl_info info;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000261 struct fcoe_capabilities *fcoe_cap =
262 &info.data.register_data.fcoe_features;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000263
Barak Witkowski2e499d32012-06-26 01:31:19 +0000264 if (reg) {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000265 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000266 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
267 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
268 } else {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000269 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000270 }
Barak Witkowski1d187b32011-12-05 22:41:50 +0000271
272 info.data.ulp_type = ulp_type;
273 ethdev->drv_ctl(dev->netdev, &info);
274}
275
Michael Chana4636962009-06-08 18:14:43 -0700276static int cnic_in_use(struct cnic_sock *csk)
277{
278 return test_bit(SK_F_INUSE, &csk->flags);
279}
280
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000281static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700282{
283 struct cnic_local *cp = dev->cnic_priv;
284 struct cnic_eth_dev *ethdev = cp->ethdev;
285 struct drv_ctl_info info;
286
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000287 info.cmd = cmd;
288 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700289 ethdev->drv_ctl(dev->netdev, &info);
290}
291
Michael Chan71034ba2009-10-10 13:46:59 +0000292static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
293{
294 u32 i;
295
Michael Chana2028b232012-06-27 15:08:19 +0000296 if (!cp->ctx_tbl)
297 return -EINVAL;
298
Michael Chan520efdf2010-06-24 14:58:37 +0000299 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000300 if (cp->ctx_tbl[i].cid == cid) {
301 *l5_cid = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
Michael Chana4636962009-06-08 18:14:43 -0700308static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
309 struct cnic_sock *csk)
310{
311 struct iscsi_path path_req;
312 char *buf = NULL;
313 u16 len = 0;
314 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
315 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000316 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000317 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700318
Michael Chancd801532010-10-13 14:06:49 +0000319 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700320 return -ENODEV;
321
322 if (csk) {
323 len = sizeof(path_req);
324 buf = (char *) &path_req;
325 memset(&path_req, 0, len);
326
327 msg_type = ISCSI_KEVENT_PATH_REQ;
328 path_req.handle = (u64) csk->l5_cid;
329 if (test_bit(SK_F_IPV6, &csk->flags)) {
330 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
331 sizeof(struct in6_addr));
332 path_req.ip_addr_len = 16;
333 } else {
334 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
335 sizeof(struct in_addr));
336 path_req.ip_addr_len = 4;
337 }
338 path_req.vlan_id = csk->vlan_id;
339 path_req.pmtu = csk->mtu;
340 }
341
Michael Chan939b82e2010-12-23 07:42:58 +0000342 while (retry < 3) {
343 rc = 0;
344 rcu_read_lock();
345 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
346 if (ulp_ops)
347 rc = ulp_ops->iscsi_nl_send_msg(
348 cp->ulp_handle[CNIC_ULP_ISCSI],
349 msg_type, buf, len);
350 rcu_read_unlock();
351 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
352 break;
353
354 msleep(100);
355 retry++;
356 }
Michael Chan558e4c72011-07-13 17:24:20 +0000357 return rc;
Michael Chana4636962009-06-08 18:14:43 -0700358}
359
Eddie Wai42ecbb82010-12-23 07:43:02 +0000360static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
361
Michael Chana4636962009-06-08 18:14:43 -0700362static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
363 char *buf, u16 len)
364{
365 int rc = -EINVAL;
366
367 switch (msg_type) {
368 case ISCSI_UEVENT_PATH_UPDATE: {
369 struct cnic_local *cp;
370 u32 l5_cid;
371 struct cnic_sock *csk;
372 struct iscsi_path *path_resp;
373
374 if (len < sizeof(*path_resp))
375 break;
376
377 path_resp = (struct iscsi_path *) buf;
378 cp = dev->cnic_priv;
379 l5_cid = (u32) path_resp->handle;
380 if (l5_cid >= MAX_CM_SK_TBL_SZ)
381 break;
382
Michael Chand02a5e62010-02-24 14:42:06 +0000383 rcu_read_lock();
384 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
385 rc = -ENODEV;
386 rcu_read_unlock();
387 break;
388 }
Michael Chana4636962009-06-08 18:14:43 -0700389 csk = &cp->csk_tbl[l5_cid];
390 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000391 if (cnic_in_use(csk) &&
392 test_bit(SK_F_CONNECT_START, &csk->flags)) {
393
Eddie Wai4cbbb042012-02-08 17:33:57 +0000394 csk->vlan_id = path_resp->vlan_id;
395
Michael Chana4636962009-06-08 18:14:43 -0700396 memcpy(csk->ha, path_resp->mac_addr, 6);
397 if (test_bit(SK_F_IPV6, &csk->flags))
398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
399 sizeof(struct in6_addr));
400 else
401 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
402 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000403
404 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700405 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000406 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
407 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
408
409 cnic_cm_upcall(cp, csk,
410 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
411 clear_bit(SK_F_CONNECT_START, &csk->flags);
412 }
Michael Chana4636962009-06-08 18:14:43 -0700413 }
414 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000415 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700416 rc = 0;
417 }
418 }
419
420 return rc;
421}
422
423static int cnic_offld_prep(struct cnic_sock *csk)
424{
425 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 return 0;
427
428 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
429 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
430 return 0;
431 }
432
433 return 1;
434}
435
436static int cnic_close_prep(struct cnic_sock *csk)
437{
438 clear_bit(SK_F_CONNECT_START, &csk->flags);
439 smp_mb__after_clear_bit();
440
441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
443 msleep(1);
444
445 return 1;
446 }
447 return 0;
448}
449
450static int cnic_abort_prep(struct cnic_sock *csk)
451{
452 clear_bit(SK_F_CONNECT_START, &csk->flags);
453 smp_mb__after_clear_bit();
454
455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 msleep(1);
457
458 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
459 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
460 return 1;
461 }
462
463 return 0;
464}
465
466int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
467{
468 struct cnic_dev *dev;
469
roel kluin0d37f362009-11-02 06:53:44 +0000470 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000471 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700472 return -EINVAL;
473 }
474 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000475 if (cnic_ulp_tbl_prot(ulp_type)) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000476 pr_err("%s: Type %d has already been registered\n",
477 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700478 mutex_unlock(&cnic_lock);
479 return -EBUSY;
480 }
481
482 read_lock(&cnic_dev_lock);
483 list_for_each_entry(dev, &cnic_dev_list, list) {
484 struct cnic_local *cp = dev->cnic_priv;
485
486 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
487 }
488 read_unlock(&cnic_dev_lock);
489
Michael Chan7fc1ece2009-08-14 15:49:47 +0000490 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
492 mutex_unlock(&cnic_lock);
493
494 /* Prevent race conditions with netdev_event */
495 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700496 list_for_each_entry(dev, &cnic_dev_list, list) {
497 struct cnic_local *cp = dev->cnic_priv;
498
499 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
500 ulp_ops->cnic_init(dev);
501 }
Michael Chana4636962009-06-08 18:14:43 -0700502 rtnl_unlock();
503
504 return 0;
505}
506
507int cnic_unregister_driver(int ulp_type)
508{
509 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000510 struct cnic_ulp_ops *ulp_ops;
511 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700512
roel kluin0d37f362009-11-02 06:53:44 +0000513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000514 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700515 return -EINVAL;
516 }
517 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000518 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000519 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000520 pr_err("%s: Type %d has not been registered\n",
521 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700522 goto out_unlock;
523 }
524 read_lock(&cnic_dev_lock);
525 list_for_each_entry(dev, &cnic_dev_list, list) {
526 struct cnic_local *cp = dev->cnic_priv;
527
528 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000529 pr_err("%s: Type %d still has devices registered\n",
530 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700531 read_unlock(&cnic_dev_lock);
532 goto out_unlock;
533 }
534 }
535 read_unlock(&cnic_dev_lock);
536
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000537 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700538
539 mutex_unlock(&cnic_lock);
540 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000541 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
542 msleep(100);
543 i++;
544 }
545
546 if (atomic_read(&ulp_ops->ref_count) != 0)
Julia Lawall022f0972012-07-08 01:37:43 +0000547 pr_warn("%s: Failed waiting for ref count to go to zero\n",
548 __func__);
Michael Chana4636962009-06-08 18:14:43 -0700549 return 0;
550
551out_unlock:
552 mutex_unlock(&cnic_lock);
553 return -EINVAL;
554}
555
556static int cnic_start_hw(struct cnic_dev *);
557static void cnic_stop_hw(struct cnic_dev *);
558
559static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
560 void *ulp_ctx)
561{
562 struct cnic_local *cp = dev->cnic_priv;
563 struct cnic_ulp_ops *ulp_ops;
564
roel kluin0d37f362009-11-02 06:53:44 +0000565 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000566 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700567 return -EINVAL;
568 }
569 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000570 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000571 pr_err("%s: Driver with type %d has not been registered\n",
572 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700573 mutex_unlock(&cnic_lock);
574 return -EAGAIN;
575 }
576 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000577 pr_err("%s: Type %d has already been registered to this device\n",
578 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700579 mutex_unlock(&cnic_lock);
580 return -EBUSY;
581 }
582
583 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
584 cp->ulp_handle[ulp_type] = ulp_ctx;
Eric Dumazet13707f92011-01-26 19:28:23 +0000585 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700586 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
587 cnic_hold(dev);
588
589 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
590 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
591 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
592
593 mutex_unlock(&cnic_lock);
594
Barak Witkowski1d187b32011-12-05 22:41:50 +0000595 cnic_ulp_ctl(dev, ulp_type, true);
596
Michael Chana4636962009-06-08 18:14:43 -0700597 return 0;
598
599}
600EXPORT_SYMBOL(cnic_register_driver);
601
602static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
603{
604 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000605 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700606
roel kluin0d37f362009-11-02 06:53:44 +0000607 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700609 return -EINVAL;
610 }
611 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700614 cnic_put(dev);
615 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000616 pr_err("%s: device not registered to this ulp type %d\n",
617 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700618 mutex_unlock(&cnic_lock);
619 return -EINVAL;
620 }
621 mutex_unlock(&cnic_lock);
622
Michael Chan42bb8d52011-01-03 15:21:46 +0000623 if (ulp_type == CNIC_ULP_ISCSI)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Barak Witkowski2e499d32012-06-26 01:31:19 +0000625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL;
Michael Chan42bb8d52011-01-03 15:21:46 +0000627
Michael Chana4636962009-06-08 18:14:43 -0700628 synchronize_rcu();
629
Michael Chan681dbd72009-08-14 15:49:46 +0000630 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
631 i < 20) {
632 msleep(100);
633 i++;
634 }
635 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000636 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000637
Barak Witkowski1d187b32011-12-05 22:41:50 +0000638 cnic_ulp_ctl(dev, ulp_type, false);
639
Michael Chana4636962009-06-08 18:14:43 -0700640 return 0;
641}
642EXPORT_SYMBOL(cnic_unregister_driver);
643
Eddie Wai11f23aa2011-06-08 19:29:34 +0000644static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
645 u32 next)
Michael Chana4636962009-06-08 18:14:43 -0700646{
647 id_tbl->start = start_id;
648 id_tbl->max = size;
Eddie Wai11f23aa2011-06-08 19:29:34 +0000649 id_tbl->next = next;
Michael Chana4636962009-06-08 18:14:43 -0700650 spin_lock_init(&id_tbl->lock);
651 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
652 if (!id_tbl->table)
653 return -ENOMEM;
654
655 return 0;
656}
657
658static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
659{
660 kfree(id_tbl->table);
661 id_tbl->table = NULL;
662}
663
664static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
665{
666 int ret = -1;
667
668 id -= id_tbl->start;
669 if (id >= id_tbl->max)
670 return ret;
671
672 spin_lock(&id_tbl->lock);
673 if (!test_bit(id, id_tbl->table)) {
674 set_bit(id, id_tbl->table);
675 ret = 0;
676 }
677 spin_unlock(&id_tbl->lock);
678 return ret;
679}
680
681/* Returns -1 if not successful */
682static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
683{
684 u32 id;
685
686 spin_lock(&id_tbl->lock);
687 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
688 if (id >= id_tbl->max) {
689 id = -1;
690 if (id_tbl->next != 0) {
691 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
692 if (id >= id_tbl->next)
693 id = -1;
694 }
695 }
696
697 if (id < id_tbl->max) {
698 set_bit(id, id_tbl->table);
699 id_tbl->next = (id + 1) & (id_tbl->max - 1);
700 id += id_tbl->start;
701 }
702
703 spin_unlock(&id_tbl->lock);
704
705 return id;
706}
707
708static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
709{
710 if (id == -1)
711 return;
712
713 id -= id_tbl->start;
714 if (id >= id_tbl->max)
715 return;
716
717 clear_bit(id, id_tbl->table);
718}
719
720static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
721{
722 int i;
723
724 if (!dma->pg_arr)
725 return;
726
727 for (i = 0; i < dma->num_pages; i++) {
728 if (dma->pg_arr[i]) {
Michael Chan2bc40782012-12-06 10:33:09 +0000729 dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000730 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700731 dma->pg_arr[i] = NULL;
732 }
733 }
734 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000735 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
736 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700737 dma->pgtbl = NULL;
738 }
739 kfree(dma->pg_arr);
740 dma->pg_arr = NULL;
741 dma->num_pages = 0;
742}
743
744static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
745{
746 int i;
Michael Chan51388262011-01-25 22:14:50 +0000747 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chana4636962009-06-08 18:14:43 -0700748
749 for (i = 0; i < dma->num_pages; i++) {
750 /* Each entry needs to be in big endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000751 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chana4636962009-06-08 18:14:43 -0700752 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000753 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chana4636962009-06-08 18:14:43 -0700754 page_table++;
755 }
756}
757
Michael Chan71034ba2009-10-10 13:46:59 +0000758static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
759{
760 int i;
Michael Chan51388262011-01-25 22:14:50 +0000761 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chan71034ba2009-10-10 13:46:59 +0000762
763 for (i = 0; i < dma->num_pages; i++) {
764 /* Each entry needs to be in little endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +0000766 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000767 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +0000768 page_table++;
769 }
770}
771
Michael Chana4636962009-06-08 18:14:43 -0700772static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
773 int pages, int use_pg_tbl)
774{
775 int i, size;
776 struct cnic_local *cp = dev->cnic_priv;
777
778 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
779 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
780 if (dma->pg_arr == NULL)
781 return -ENOMEM;
782
783 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
784 dma->num_pages = pages;
785
786 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000787 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
Michael Chan2bc40782012-12-06 10:33:09 +0000788 BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000789 &dma->pg_map_arr[i],
790 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700791 if (dma->pg_arr[i] == NULL)
792 goto error;
793 }
794 if (!use_pg_tbl)
795 return 0;
796
Michael Chan2bc40782012-12-06 10:33:09 +0000797 dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
798 ~(BNX2_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000799 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
800 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700801 if (dma->pgtbl == NULL)
802 goto error;
803
804 cp->setup_pgtbl(dev, dma);
805
806 return 0;
807
808error:
809 cnic_free_dma(dev, dma);
810 return -ENOMEM;
811}
812
Michael Chan86b53602009-10-10 13:46:57 +0000813static void cnic_free_context(struct cnic_dev *dev)
814{
815 struct cnic_local *cp = dev->cnic_priv;
816 int i;
817
818 for (i = 0; i < cp->ctx_blks; i++) {
819 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000820 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
821 cp->ctx_arr[i].ctx,
822 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000823 cp->ctx_arr[i].ctx = NULL;
824 }
825 }
826}
827
Michael Chan74dd0c42012-09-08 06:01:01 +0000828static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700829{
Michael Chancd801532010-10-13 14:06:49 +0000830 if (udev->l2_buf) {
831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
832 udev->l2_buf, udev->l2_buf_map);
833 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700834 }
835
Michael Chancd801532010-10-13 14:06:49 +0000836 if (udev->l2_ring) {
837 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
838 udev->l2_ring, udev->l2_ring_map);
839 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700840 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000841
Michael Chan74dd0c42012-09-08 06:01:01 +0000842}
843
844static void __cnic_free_uio(struct cnic_uio_dev *udev)
845{
846 uio_unregister_device(&udev->cnic_uinfo);
847
848 __cnic_free_uio_rings(udev);
849
Michael Chana3ceeeb2010-10-13 14:06:50 +0000850 pci_dev_put(udev->pdev);
851 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000852}
853
Michael Chancd801532010-10-13 14:06:49 +0000854static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000855{
Michael Chancd801532010-10-13 14:06:49 +0000856 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000857 return;
858
Michael Chana3ceeeb2010-10-13 14:06:50 +0000859 write_lock(&cnic_dev_lock);
860 list_del_init(&udev->list);
861 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000862 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000863}
864
865static void cnic_free_resc(struct cnic_dev *dev)
866{
867 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000868 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000869
Michael Chancd801532010-10-13 14:06:49 +0000870 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000871 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000872 cp->udev = NULL;
Michael Chanf81b0ac2012-09-08 06:01:02 +0000873 if (udev->uio_dev == -1)
874 __cnic_free_uio_rings(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000875 }
Michael Chana4636962009-06-08 18:14:43 -0700876
Michael Chan86b53602009-10-10 13:46:57 +0000877 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700878 kfree(cp->ctx_arr);
879 cp->ctx_arr = NULL;
880 cp->ctx_blks = 0;
881
882 cnic_free_dma(dev, &cp->gbl_buf_info);
Michael Chana4636962009-06-08 18:14:43 -0700883 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000884 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000885 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000886 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700887 kfree(cp->iscsi_tbl);
888 cp->iscsi_tbl = NULL;
889 kfree(cp->ctx_tbl);
890 cp->ctx_tbl = NULL;
891
Michael Chane1928c82010-12-23 07:43:04 +0000892 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
Michael Chana4636962009-06-08 18:14:43 -0700893 cnic_free_id_tbl(&cp->cid_tbl);
894}
895
896static int cnic_alloc_context(struct cnic_dev *dev)
897{
898 struct cnic_local *cp = dev->cnic_priv;
899
Michael Chan4ce45e02012-12-06 10:33:10 +0000900 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -0700901 int i, k, arr_size;
902
Michael Chan2bc40782012-12-06 10:33:09 +0000903 cp->ctx_blk_size = BNX2_PAGE_SIZE;
904 cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
Michael Chana4636962009-06-08 18:14:43 -0700905 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
906 sizeof(struct cnic_ctx);
907 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
908 if (cp->ctx_arr == NULL)
909 return -ENOMEM;
910
911 k = 0;
912 for (i = 0; i < 2; i++) {
913 u32 j, reg, off, lo, hi;
914
915 if (i == 0)
916 off = BNX2_PG_CTX_MAP;
917 else
918 off = BNX2_ISCSI_CTX_MAP;
919
920 reg = cnic_reg_rd_ind(dev, off);
921 lo = reg >> 16;
922 hi = reg & 0xffff;
923 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
924 cp->ctx_arr[k].cid = j;
925 }
926
927 cp->ctx_blks = k;
928 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
929 cp->ctx_blks = 0;
930 return -ENOMEM;
931 }
932
933 for (i = 0; i < cp->ctx_blks; i++) {
934 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000935 dma_alloc_coherent(&dev->pcidev->dev,
Michael Chan2bc40782012-12-06 10:33:09 +0000936 BNX2_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000937 &cp->ctx_arr[i].mapping,
938 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700939 if (cp->ctx_arr[i].ctx == NULL)
940 return -ENOMEM;
941 }
942 }
943 return 0;
944}
945
Michael Chan59e51372011-06-14 01:32:38 +0000946static u16 cnic_bnx2_next_idx(u16 idx)
Michael Chane6c28892010-06-24 14:58:39 +0000947{
Michael Chan59e51372011-06-14 01:32:38 +0000948 return idx + 1;
949}
950
951static u16 cnic_bnx2_hw_idx(u16 idx)
952{
953 return idx;
954}
955
956static u16 cnic_bnx2x_next_idx(u16 idx)
957{
958 idx++;
959 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
960 idx++;
961
962 return idx;
963}
964
965static u16 cnic_bnx2x_hw_idx(u16 idx)
966{
967 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
968 idx++;
969 return idx;
970}
971
972static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
973 bool use_pg_tbl)
974{
975 int err, i, use_page_tbl = 0;
Michael Chane6c28892010-06-24 14:58:39 +0000976 struct kcqe **kcq;
977
Michael Chan59e51372011-06-14 01:32:38 +0000978 if (use_pg_tbl)
979 use_page_tbl = 1;
Michael Chane6c28892010-06-24 14:58:39 +0000980
Michael Chan59e51372011-06-14 01:32:38 +0000981 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
Michael Chane6c28892010-06-24 14:58:39 +0000982 if (err)
983 return err;
984
985 kcq = (struct kcqe **) info->dma.pg_arr;
986 info->kcq = kcq;
987
Michael Chan59e51372011-06-14 01:32:38 +0000988 info->next_idx = cnic_bnx2_next_idx;
989 info->hw_idx = cnic_bnx2_hw_idx;
990 if (use_pg_tbl)
Michael Chane6c28892010-06-24 14:58:39 +0000991 return 0;
992
Michael Chan59e51372011-06-14 01:32:38 +0000993 info->next_idx = cnic_bnx2x_next_idx;
994 info->hw_idx = cnic_bnx2x_hw_idx;
995
Michael Chane6c28892010-06-24 14:58:39 +0000996 for (i = 0; i < KCQ_PAGE_CNT; i++) {
997 struct bnx2x_bd_chain_next *next =
998 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
999 int j = i + 1;
1000
1001 if (j >= KCQ_PAGE_CNT)
1002 j = 0;
1003 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1004 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1005 }
1006 return 0;
1007}
1008
Michael Chan74dd0c42012-09-08 06:01:01 +00001009static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1010{
1011 struct cnic_local *cp = udev->dev->cnic_priv;
1012
1013 if (udev->l2_ring)
1014 return 0;
1015
Michael Chan2bc40782012-12-06 10:33:09 +00001016 udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
Michael Chan74dd0c42012-09-08 06:01:01 +00001017 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1018 &udev->l2_ring_map,
1019 GFP_KERNEL | __GFP_COMP);
1020 if (!udev->l2_ring)
1021 return -ENOMEM;
1022
1023 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1024 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1025 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1026 &udev->l2_buf_map,
1027 GFP_KERNEL | __GFP_COMP);
1028 if (!udev->l2_buf) {
1029 __cnic_free_uio_rings(udev);
1030 return -ENOMEM;
1031 }
1032
1033 return 0;
1034
1035}
1036
Michael Chancd801532010-10-13 14:06:49 +00001037static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +00001038{
1039 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001040 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +00001041
Michael Chana3ceeeb2010-10-13 14:06:50 +00001042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev;
Michael Chanf81b0ac2012-09-08 06:01:02 +00001046 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM;
1050 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00001051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0;
1054 }
1055 }
1056 read_unlock(&cnic_dev_lock);
1057
Michael Chancd801532010-10-13 14:06:49 +00001058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +00001060 return -ENOMEM;
1061
Michael Chancd801532010-10-13 14:06:49 +00001062 udev->uio_dev = -1;
1063
1064 udev->dev = dev;
1065 udev->pdev = dev->pcidev;
Michael Chanec0248e2009-08-26 09:49:22 +00001066
Michael Chan74dd0c42012-09-08 06:01:01 +00001067 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev;
Michael Chancd801532010-10-13 14:06:49 +00001069
Michael Chana3ceeeb2010-10-13 14:06:50 +00001070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073
1074 pci_dev_get(udev->pdev);
1075
Michael Chancd801532010-10-13 14:06:49 +00001076 cp->udev = udev;
1077
Michael Chanec0248e2009-08-26 09:49:22 +00001078 return 0;
Michael Chan74dd0c42012-09-08 06:01:01 +00001079
Jesper Juhlf7e4c972010-12-31 11:18:48 -08001080 err_udev:
1081 kfree(udev);
1082 return -ENOMEM;
Michael Chanec0248e2009-08-26 09:49:22 +00001083}
1084
Michael Chancd801532010-10-13 14:06:49 +00001085static int cnic_init_uio(struct cnic_dev *dev)
1086{
Michael Chan5e9b2db2009-08-26 09:49:23 +00001087 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001088 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001089 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +00001090 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001091
Michael Chancd801532010-10-13 14:06:49 +00001092 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +00001093 return -ENOMEM;
1094
Michael Chancd801532010-10-13 14:06:49 +00001095 uinfo = &udev->cnic_uinfo;
1096
Michael Chanae0eef62012-06-29 09:32:45 +00001097 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1098 uinfo->mem[0].internal_addr = dev->regview;
1099 uinfo->mem[0].memtype = UIO_MEM_PHYS;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001100
Michael Chan5e9b2db2009-08-26 09:49:23 +00001101 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001102 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1103 TX_MAX_TSS_RINGS + 1);
Michael Chana4dde3a2010-02-24 14:42:08 +00001104 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chancd801532010-10-13 14:06:49 +00001105 PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001106 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1108 else
1109 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1110
1111 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001112 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001113 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1114
Michael Chan71034ba2009-10-10 13:46:59 +00001115 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1116 PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001117 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001118
1119 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001120 }
1121
1122 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1123
Michael Chancd801532010-10-13 14:06:49 +00001124 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1125 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001126 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1127
Michael Chancd801532010-10-13 14:06:49 +00001128 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1129 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001130 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1131
1132 uinfo->version = CNIC_MODULE_VERSION;
1133 uinfo->irq = UIO_IRQ_CUSTOM;
1134
1135 uinfo->open = cnic_uio_open;
1136 uinfo->release = cnic_uio_close;
1137
Michael Chana3ceeeb2010-10-13 14:06:50 +00001138 if (udev->uio_dev == -1) {
1139 if (!uinfo->priv) {
1140 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001141
Michael Chana3ceeeb2010-10-13 14:06:50 +00001142 ret = uio_register_device(&udev->pdev->dev, uinfo);
1143 }
1144 } else {
1145 cnic_init_rings(dev);
1146 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001147
Michael Chancd801532010-10-13 14:06:49 +00001148 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001149}
1150
Michael Chana4636962009-06-08 18:14:43 -07001151static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1152{
1153 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001154 int ret;
1155
1156 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1157 if (ret)
1158 goto error;
1159 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1160
Michael Chan59e51372011-06-14 01:32:38 +00001161 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
Michael Chana4636962009-06-08 18:14:43 -07001162 if (ret)
1163 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001164
1165 ret = cnic_alloc_context(dev);
1166 if (ret)
1167 goto error;
1168
Michael Chancd801532010-10-13 14:06:49 +00001169 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001170 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001171 goto error;
1172
Michael Chancd801532010-10-13 14:06:49 +00001173 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001174 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001175 goto error;
1176
Michael Chana4636962009-06-08 18:14:43 -07001177 return 0;
1178
1179error:
1180 cnic_free_resc(dev);
1181 return ret;
1182}
1183
Michael Chan71034ba2009-10-10 13:46:59 +00001184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1185{
1186 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001187 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001188 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001189 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001190
Michael Chan520efdf2010-06-24 14:58:37 +00001191 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001192 blks = total_mem / ctx_blk_size;
1193 if (total_mem % ctx_blk_size)
1194 blks++;
1195
1196 if (blks > cp->ethdev->ctx_tbl_len)
1197 return -ENOMEM;
1198
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001199 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001200 if (cp->ctx_arr == NULL)
1201 return -ENOMEM;
1202
1203 cp->ctx_blks = blks;
1204 cp->ctx_blk_size = ctx_blk_size;
Michael Chan104a43e2013-09-02 11:42:28 -07001205 if (!CHIP_IS_E1(bp))
Michael Chan71034ba2009-10-10 13:46:59 +00001206 cp->ctx_align = 0;
1207 else
1208 cp->ctx_align = ctx_blk_size;
1209
1210 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1211
1212 for (i = 0; i < blks; i++) {
1213 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001214 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1215 &cp->ctx_arr[i].mapping,
1216 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001217 if (cp->ctx_arr[i].ctx == NULL)
1218 return -ENOMEM;
1219
1220 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1221 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1222 cnic_free_context(dev);
1223 cp->ctx_blk_size += cp->ctx_align;
1224 i = -1;
1225 continue;
1226 }
1227 }
1228 }
1229 return 0;
1230}
1231
1232static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1233{
1234 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001235 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan520efdf2010-06-24 14:58:37 +00001236 struct cnic_eth_dev *ethdev = cp->ethdev;
1237 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001238 int i, j, n, ret, pages;
1239 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1240
Michael Chanb37a41e2011-07-20 14:55:22 +00001241 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
Michael Chan520efdf2010-06-24 14:58:37 +00001242 cp->iscsi_start_cid = start_cid;
Michael Chane1928c82010-12-23 07:43:04 +00001243 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1244
Michael Chan104a43e2013-09-02 11:42:28 -07001245 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00001246 cp->max_cid_space += dev->max_fcoe_conn;
Michael Chane1928c82010-12-23 07:43:04 +00001247 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1248 if (!cp->fcoe_init_cid)
1249 cp->fcoe_init_cid = 0x10;
1250 }
1251
Michael Chan71034ba2009-10-10 13:46:59 +00001252 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1253 GFP_KERNEL);
1254 if (!cp->iscsi_tbl)
1255 goto error;
1256
1257 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001258 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001259 if (!cp->ctx_tbl)
1260 goto error;
1261
1262 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1263 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1264 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1265 }
1266
Michael Chane1928c82010-12-23 07:43:04 +00001267 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1269
Michael Chan520efdf2010-06-24 14:58:37 +00001270 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
Michael Chan71034ba2009-10-10 13:46:59 +00001271 PAGE_SIZE;
1272
1273 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1274 if (ret)
1275 return -ENOMEM;
1276
1277 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001278 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001279 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1280
1281 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1282 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1283 off;
1284
1285 if ((i % n) == (n - 1))
1286 j++;
1287 }
1288
Michael Chan59e51372011-06-14 01:32:38 +00001289 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
Michael Chan71034ba2009-10-10 13:46:59 +00001290 if (ret)
1291 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001292
Michael Chan104a43e2013-09-02 11:42:28 -07001293 if (CNIC_SUPPORTS_FCOE(bp)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001294 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
Michael Chane21ba412010-12-23 07:43:03 +00001295 if (ret)
1296 goto error;
1297 }
1298
Michael Chan71034ba2009-10-10 13:46:59 +00001299 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1300 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1301 if (ret)
1302 goto error;
1303
1304 ret = cnic_alloc_bnx2x_context(dev);
1305 if (ret)
1306 goto error;
1307
Michael Chan82346a72012-09-08 06:01:05 +00001308 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1309 return 0;
1310
Michael Chan71034ba2009-10-10 13:46:59 +00001311 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1312
1313 cp->l2_rx_ring_size = 15;
1314
Michael Chancd801532010-10-13 14:06:49 +00001315 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001316 if (ret)
1317 goto error;
1318
Michael Chancd801532010-10-13 14:06:49 +00001319 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001320 if (ret)
1321 goto error;
1322
1323 return 0;
1324
1325error:
1326 cnic_free_resc(dev);
1327 return -ENOMEM;
1328}
1329
Michael Chana4636962009-06-08 18:14:43 -07001330static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1331{
1332 return cp->max_kwq_idx -
1333 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1334}
1335
1336static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1337 u32 num_wqes)
1338{
1339 struct cnic_local *cp = dev->cnic_priv;
1340 struct kwqe *prod_qe;
1341 u16 prod, sw_prod, i;
1342
1343 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1344 return -EAGAIN; /* bnx2 is down */
1345
1346 spin_lock_bh(&cp->cnic_ulp_lock);
1347 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001348 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001349 spin_unlock_bh(&cp->cnic_ulp_lock);
1350 return -EAGAIN;
1351 }
1352
Michael Chan1f1332a2010-05-18 11:32:52 +00001353 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001354
1355 prod = cp->kwq_prod_idx;
1356 sw_prod = prod & MAX_KWQ_IDX;
1357 for (i = 0; i < num_wqes; i++) {
1358 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1359 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1360 prod++;
1361 sw_prod = prod & MAX_KWQ_IDX;
1362 }
1363 cp->kwq_prod_idx = prod;
1364
1365 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1366
1367 spin_unlock_bh(&cp->cnic_ulp_lock);
1368 return 0;
1369}
1370
Michael Chan71034ba2009-10-10 13:46:59 +00001371static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1372 union l5cm_specific_data *l5_data)
1373{
1374 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1375 dma_addr_t map;
1376
1377 map = ctx->kwqe_data_mapping;
1378 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1379 l5_data->phy_address.hi = (u64) map >> 32;
1380 return ctx->kwqe_data;
1381}
1382
1383static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1384 u32 type, union l5cm_specific_data *l5_data)
1385{
1386 struct cnic_local *cp = dev->cnic_priv;
1387 struct l5cm_spe kwqe;
1388 struct kwqe_16 *kwq[1];
Michael Chan68d7c1a2011-01-05 15:14:13 +00001389 u16 type_16;
Michael Chan71034ba2009-10-10 13:46:59 +00001390 int ret;
1391
1392 kwqe.hdr.conn_and_cmd_data =
1393 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chanceb7e1c2010-10-06 03:14:54 +00001394 BNX2X_HW_CID(cp, cid)));
Michael Chan68d7c1a2011-01-05 15:14:13 +00001395
1396 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1397 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1398 SPE_HDR_FUNCTION_ID;
1399
1400 kwqe.hdr.type = cpu_to_le16(type_16);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001401 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001402 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1403 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1404
1405 kwq[0] = (struct kwqe_16 *) &kwqe;
1406
1407 spin_lock_bh(&cp->cnic_ulp_lock);
1408 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1409 spin_unlock_bh(&cp->cnic_ulp_lock);
1410
1411 if (ret == 1)
1412 return 0;
1413
Michael Chan23021c22012-01-04 12:12:28 +00001414 return ret;
Michael Chan71034ba2009-10-10 13:46:59 +00001415}
1416
1417static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1418 struct kcqe *cqes[], u32 num_cqes)
1419{
1420 struct cnic_local *cp = dev->cnic_priv;
1421 struct cnic_ulp_ops *ulp_ops;
1422
1423 rcu_read_lock();
1424 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1425 if (likely(ulp_ops)) {
1426 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1427 cqes, num_cqes);
1428 }
1429 rcu_read_unlock();
1430}
1431
Eddie Waib3bd2d62013-07-28 19:03:58 -07001432static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1433 int en_tcp_dack)
1434{
1435 struct cnic_local *cp = dev->cnic_priv;
1436 struct bnx2x *bp = netdev_priv(dev->netdev);
1437 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1438 u16 tstorm_flags = 0;
1439
1440 if (time_stamps) {
1441 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1442 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1443 }
1444 if (en_tcp_dack)
1445 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1446
1447 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1448 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1449
1450 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1451 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
1452}
1453
Michael Chan71034ba2009-10-10 13:46:59 +00001454static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1455{
1456 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001457 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001458 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001459 int hq_bds, pages;
1460 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001461
1462 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1463 cp->num_ccells = req1->num_ccells_per_conn;
1464 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1465 cp->num_iscsi_tasks;
1466 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1467 BNX2X_ISCSI_R2TQE_SIZE;
1468 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1469 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1470 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1471 cp->num_cqs = req1->num_cqs;
1472
1473 if (!dev->max_iscsi_conn)
1474 return 0;
1475
1476 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001477 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001478 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001479 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001480 PAGE_SIZE);
1481 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001482 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001484 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001485 req1->num_tasks_per_conn);
1486
1487 /* init Ustorm RAM */
1488 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001489 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001490 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001491 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001492 PAGE_SIZE);
1493 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001494 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001495 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001496 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001497 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001498 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001499 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001500 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001501 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001502 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001503 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1504
1505 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001506 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001507 PAGE_SIZE);
1508 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001509 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001511 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001512 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001513 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001514 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001515 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001516 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001517 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001518 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1519
1520 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001521 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001522 PAGE_SIZE);
1523 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001524 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001526 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001527 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001528 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001529 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001530 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001531 hq_bds);
1532
Eddie Waib3bd2d62013-07-28 19:03:58 -07001533 cnic_bnx2x_set_tcp_options(dev,
1534 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1535 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1536
Michael Chan71034ba2009-10-10 13:46:59 +00001537 return 0;
1538}
1539
1540static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1541{
1542 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1543 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001544 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00001545 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001546 struct iscsi_kcqe kcqe;
1547 struct kcqe *cqes[1];
1548
1549 memset(&kcqe, 0, sizeof(kcqe));
1550 if (!dev->max_iscsi_conn) {
1551 kcqe.completion_status =
1552 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1553 goto done;
1554 }
1555
1556 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001557 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001558 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001559 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001560 req2->error_bit_map[1]);
1561
1562 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001563 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001564 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001565 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001566 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001567 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001568 req2->error_bit_map[1]);
1569
1570 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001571 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001572
1573 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1574
1575done:
1576 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1577 cqes[0] = (struct kcqe *) &kcqe;
1578 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1579
1580 return 0;
1581}
1582
1583static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1584{
1585 struct cnic_local *cp = dev->cnic_priv;
1586 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1587
1588 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1589 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1590
1591 cnic_free_dma(dev, &iscsi->hq_info);
1592 cnic_free_dma(dev, &iscsi->r2tq_info);
1593 cnic_free_dma(dev, &iscsi->task_array_info);
Michael Chane1928c82010-12-23 07:43:04 +00001594 cnic_free_id(&cp->cid_tbl, ctx->cid);
1595 } else {
1596 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001597 }
Michael Chane1928c82010-12-23 07:43:04 +00001598
Michael Chan71034ba2009-10-10 13:46:59 +00001599 ctx->cid = 0;
1600}
1601
1602static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1603{
1604 u32 cid;
1605 int ret, pages;
1606 struct cnic_local *cp = dev->cnic_priv;
1607 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1608 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1609
Michael Chane1928c82010-12-23 07:43:04 +00001610 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1611 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1612 if (cid == -1) {
1613 ret = -ENOMEM;
1614 goto error;
1615 }
1616 ctx->cid = cid;
1617 return 0;
1618 }
1619
Michael Chan71034ba2009-10-10 13:46:59 +00001620 cid = cnic_alloc_new_id(&cp->cid_tbl);
1621 if (cid == -1) {
1622 ret = -ENOMEM;
1623 goto error;
1624 }
1625
1626 ctx->cid = cid;
1627 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1628
1629 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1630 if (ret)
1631 goto error;
1632
1633 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1634 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1635 if (ret)
1636 goto error;
1637
1638 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1639 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1640 if (ret)
1641 goto error;
1642
1643 return 0;
1644
1645error:
1646 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1647 return ret;
1648}
1649
1650static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1651 struct regpair *ctx_addr)
1652{
1653 struct cnic_local *cp = dev->cnic_priv;
1654 struct cnic_eth_dev *ethdev = cp->ethdev;
1655 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1656 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1657 unsigned long align_off = 0;
1658 dma_addr_t ctx_map;
1659 void *ctx;
1660
1661 if (cp->ctx_align) {
1662 unsigned long mask = cp->ctx_align - 1;
1663
1664 if (cp->ctx_arr[blk].mapping & mask)
1665 align_off = cp->ctx_align -
1666 (cp->ctx_arr[blk].mapping & mask);
1667 }
1668 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1669 (off * BNX2X_CONTEXT_MEM_SIZE);
1670 ctx = cp->ctx_arr[blk].ctx + align_off +
1671 (off * BNX2X_CONTEXT_MEM_SIZE);
1672 if (init)
1673 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1674
1675 ctx_addr->lo = ctx_map & 0xffffffff;
1676 ctx_addr->hi = (u64) ctx_map >> 32;
1677 return ctx;
1678}
1679
1680static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1681 u32 num)
1682{
1683 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001684 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001685 struct iscsi_kwqe_conn_offload1 *req1 =
1686 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1687 struct iscsi_kwqe_conn_offload2 *req2 =
1688 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1689 struct iscsi_kwqe_conn_offload3 *req3;
1690 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1691 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1692 u32 cid = ctx->cid;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001693 u32 hw_cid = BNX2X_HW_CID(cp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001694 struct iscsi_context *ictx;
1695 struct regpair context_addr;
1696 int i, j, n = 2, n_max;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001697 u8 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00001698
1699 ctx->ctx_flags = 0;
1700 if (!req2->num_additional_wqes)
1701 return -EINVAL;
1702
1703 n_max = req2->num_additional_wqes + 2;
1704
1705 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1706 if (ictx == NULL)
1707 return -ENOMEM;
1708
1709 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1710
1711 ictx->xstorm_ag_context.hq_prod = 1;
1712
1713 ictx->xstorm_st_context.iscsi.first_burst_length =
1714 ISCSI_DEF_FIRST_BURST_LEN;
1715 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1716 ISCSI_DEF_MAX_RECV_SEG_LEN;
1717 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1718 req1->sq_page_table_addr_lo;
1719 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1720 req1->sq_page_table_addr_hi;
1721 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1722 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1723 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1724 iscsi->hq_info.pgtbl_map & 0xffffffff;
1725 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1726 (u64) iscsi->hq_info.pgtbl_map >> 32;
1727 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1728 iscsi->hq_info.pgtbl[0];
1729 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1730 iscsi->hq_info.pgtbl[1];
1731 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1732 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1733 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1734 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1735 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1736 iscsi->r2tq_info.pgtbl[0];
1737 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1738 iscsi->r2tq_info.pgtbl[1];
1739 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1740 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1741 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1742 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1743 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1744 BNX2X_ISCSI_PBL_NOT_CACHED;
1745 ictx->xstorm_st_context.iscsi.flags.flags |=
1746 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1747 ictx->xstorm_st_context.iscsi.flags.flags |=
1748 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001749 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1750 ETH_P_8021Q;
Michael Chan104a43e2013-09-02 11:42:28 -07001751 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752 cp->port_mode == CHIP_2_PORT_MODE) {
1753
1754 port = 0;
1755 }
1756 ictx->xstorm_st_context.common.flags =
1757 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1758 ictx->xstorm_st_context.common.flags =
1759 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00001760
1761 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1762 /* TSTORM requires the base address of RQ DB & not PTE */
1763 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1764 req2->rq_page_table_addr_lo & PAGE_MASK;
1765 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1766 req2->rq_page_table_addr_hi;
1767 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1768 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1769 ictx->tstorm_st_context.tcp.flags2 |=
1770 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001771 ictx->tstorm_st_context.tcp.ooo_support_mode =
1772 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001773
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001774 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001775
1776 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001777 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001778 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001779 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001780 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1781 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1782 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1783 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1784 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1785 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1786 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1787 iscsi->r2tq_info.pgtbl[0];
1788 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1789 iscsi->r2tq_info.pgtbl[1];
1790 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1791 req1->cq_page_table_addr_lo;
1792 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1793 req1->cq_page_table_addr_hi;
1794 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1795 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1796 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1797 ictx->ustorm_st_context.task_pbe_cache_index =
1798 BNX2X_ISCSI_PBL_NOT_CACHED;
1799 ictx->ustorm_st_context.task_pdu_cache_index =
1800 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1801
1802 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1803 if (j == 3) {
1804 if (n >= n_max)
1805 break;
1806 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1807 j = 0;
1808 }
1809 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1810 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1811 req3->qp_first_pte[j].hi;
1812 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1813 req3->qp_first_pte[j].lo;
1814 }
1815
1816 ictx->ustorm_st_context.task_pbl_base.lo =
1817 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1818 ictx->ustorm_st_context.task_pbl_base.hi =
1819 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1820 ictx->ustorm_st_context.tce_phy_addr.lo =
1821 iscsi->task_array_info.pgtbl[0];
1822 ictx->ustorm_st_context.tce_phy_addr.hi =
1823 iscsi->task_array_info.pgtbl[1];
1824 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1825 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1826 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1827 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1828 ISCSI_DEF_MAX_BURST_LEN;
1829 ictx->ustorm_st_context.negotiated_rx |=
1830 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1831 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1832
1833 ictx->cstorm_st_context.hq_pbl_base.lo =
1834 iscsi->hq_info.pgtbl_map & 0xffffffff;
1835 ictx->cstorm_st_context.hq_pbl_base.hi =
1836 (u64) iscsi->hq_info.pgtbl_map >> 32;
1837 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1838 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1839 ictx->cstorm_st_context.task_pbl_base.lo =
1840 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1841 ictx->cstorm_st_context.task_pbl_base.hi =
1842 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1843 /* CSTORM and USTORM initialization is different, CSTORM requires
1844 * CQ DB base & not PTE addr */
1845 ictx->cstorm_st_context.cq_db_base.lo =
1846 req1->cq_page_table_addr_lo & PAGE_MASK;
1847 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1848 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1849 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1850 for (i = 0; i < cp->num_cqs; i++) {
1851 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1852 ISCSI_INITIAL_SN;
1853 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1854 ISCSI_INITIAL_SN;
1855 }
1856
1857 ictx->xstorm_ag_context.cdu_reserved =
1858 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1859 ISCSI_CONNECTION_TYPE);
1860 ictx->ustorm_ag_context.cdu_usage =
1861 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1862 ISCSI_CONNECTION_TYPE);
1863 return 0;
1864
1865}
1866
1867static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1868 u32 num, int *work)
1869{
1870 struct iscsi_kwqe_conn_offload1 *req1;
1871 struct iscsi_kwqe_conn_offload2 *req2;
1872 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00001873 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001874 struct iscsi_kcqe kcqe;
1875 struct kcqe *cqes[1];
1876 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001877 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001878
1879 if (num < 2) {
1880 *work = num;
1881 return -EINVAL;
1882 }
1883
1884 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1885 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1886 if ((num - 2) < req2->num_additional_wqes) {
1887 *work = num;
1888 return -EINVAL;
1889 }
Joe Perches779bb412010-11-14 17:04:37 +00001890 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001891
1892 l5_cid = req1->iscsi_conn_id;
1893 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1894 return -EINVAL;
1895
1896 memset(&kcqe, 0, sizeof(kcqe));
1897 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1898 kcqe.iscsi_conn_id = l5_cid;
1899 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1900
Michael Chanfdf24082010-10-13 14:06:47 +00001901 ctx = &cp->ctx_tbl[l5_cid];
1902 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1903 kcqe.completion_status =
1904 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1905 goto done;
1906 }
1907
Michael Chan71034ba2009-10-10 13:46:59 +00001908 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1909 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001910 goto done;
1911 }
1912 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1913 if (ret) {
1914 atomic_dec(&cp->iscsi_conn);
1915 ret = 0;
1916 goto done;
1917 }
1918 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1919 if (ret < 0) {
1920 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1921 atomic_dec(&cp->iscsi_conn);
1922 goto done;
1923 }
1924
1925 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001926 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001927
1928done:
1929 cqes[0] = (struct kcqe *) &kcqe;
1930 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
Michael Chan23021c22012-01-04 12:12:28 +00001931 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001932}
1933
1934
1935static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1936{
1937 struct cnic_local *cp = dev->cnic_priv;
1938 struct iscsi_kwqe_conn_update *req =
1939 (struct iscsi_kwqe_conn_update *) kwqe;
1940 void *data;
1941 union l5cm_specific_data l5_data;
1942 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1943 int ret;
1944
1945 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1946 return -EINVAL;
1947
1948 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1949 if (!data)
1950 return -ENOMEM;
1951
1952 memcpy(data, kwqe, sizeof(struct kwqe));
1953
1954 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1955 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1956 return ret;
1957}
1958
Michael Chana2c9e762010-10-13 14:06:46 +00001959static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001960{
1961 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001962 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001963 union l5cm_specific_data l5_data;
1964 int ret;
Michael Chan68d7c1a2011-01-05 15:14:13 +00001965 u32 hw_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001966
Michael Chan71034ba2009-10-10 13:46:59 +00001967 init_waitqueue_head(&ctx->waitq);
1968 ctx->wait_cond = 0;
1969 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001970 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001971
1972 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00001973 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001974
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001975 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00001976 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001977 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1978 return -EBUSY;
1979 }
Michael Chan71034ba2009-10-10 13:46:59 +00001980
Michael Chandcc7e3a2011-08-26 09:45:40 +00001981 return 0;
Michael Chana2c9e762010-10-13 14:06:46 +00001982}
1983
1984static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1985{
1986 struct cnic_local *cp = dev->cnic_priv;
1987 struct iscsi_kwqe_conn_destroy *req =
1988 (struct iscsi_kwqe_conn_destroy *) kwqe;
1989 u32 l5_cid = req->reserved0;
1990 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1991 int ret = 0;
1992 struct iscsi_kcqe kcqe;
1993 struct kcqe *cqes[1];
1994
1995 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1996 goto skip_cfc_delete;
1997
Michael Chanfdf24082010-10-13 14:06:47 +00001998 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1999 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2000
2001 if (delta > (2 * HZ))
2002 delta = 0;
2003
2004 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2005 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2006 goto destroy_reply;
2007 }
Michael Chana2c9e762010-10-13 14:06:46 +00002008
2009 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2010
Michael Chan71034ba2009-10-10 13:46:59 +00002011skip_cfc_delete:
2012 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2013
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002014 if (!ret) {
2015 atomic_dec(&cp->iscsi_conn);
2016 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2017 }
Michael Chan71034ba2009-10-10 13:46:59 +00002018
Michael Chanfdf24082010-10-13 14:06:47 +00002019destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00002020 memset(&kcqe, 0, sizeof(kcqe));
2021 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2022 kcqe.iscsi_conn_id = l5_cid;
2023 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2024 kcqe.iscsi_conn_context_id = req->context_id;
2025
2026 cqes[0] = (struct kcqe *) &kcqe;
2027 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2028
Michael Chan23021c22012-01-04 12:12:28 +00002029 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00002030}
2031
2032static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2033 struct l4_kwq_connect_req1 *kwqe1,
2034 struct l4_kwq_connect_req3 *kwqe3,
2035 struct l5cm_active_conn_buffer *conn_buf)
2036{
2037 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2038 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2039 &conn_buf->xstorm_conn_buffer;
2040 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2041 &conn_buf->tstorm_conn_buffer;
2042 struct regpair context_addr;
2043 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2044 struct in6_addr src_ip, dst_ip;
2045 int i;
2046 u32 *addrp;
2047
2048 addrp = (u32 *) &conn_addr->local_ip_addr;
2049 for (i = 0; i < 4; i++, addrp++)
2050 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2051
2052 addrp = (u32 *) &conn_addr->remote_ip_addr;
2053 for (i = 0; i < 4; i++, addrp++)
2054 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2055
2056 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2057
2058 xstorm_buf->context_addr.hi = context_addr.hi;
2059 xstorm_buf->context_addr.lo = context_addr.lo;
2060 xstorm_buf->mss = 0xffff;
2061 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2062 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2063 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2064 xstorm_buf->pseudo_header_checksum =
2065 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2066
Michael Chan71034ba2009-10-10 13:46:59 +00002067 if (kwqe3->ka_timeout) {
2068 tstorm_buf->ka_enable = 1;
2069 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2070 tstorm_buf->ka_interval = kwqe3->ka_interval;
2071 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2072 }
Michael Chan71034ba2009-10-10 13:46:59 +00002073 tstorm_buf->max_rt_time = 0xffffffff;
2074}
2075
2076static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2077{
2078 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002079 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00002080 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00002081 u8 *mac = dev->mac_addr;
2082
2083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002084 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00002085 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002086 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002087 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002088 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00002089 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002090 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002091 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002092 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00002093 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002094 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002095
2096 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002097 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002098 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002099 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002100 mac[4]);
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002102 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002103 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002104 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002105 mac[2]);
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002107 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002108 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002109 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002110 mac[0]);
2111}
2112
Michael Chan71034ba2009-10-10 13:46:59 +00002113static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2114 u32 num, int *work)
2115{
2116 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002117 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00002118 struct l4_kwq_connect_req1 *kwqe1 =
2119 (struct l4_kwq_connect_req1 *) wqes[0];
2120 struct l4_kwq_connect_req3 *kwqe3;
2121 struct l5cm_active_conn_buffer *conn_buf;
2122 struct l5cm_conn_addr_params *conn_addr;
2123 union l5cm_specific_data l5_data;
2124 u32 l5_cid = kwqe1->pg_cid;
2125 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2126 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2127 int ret;
2128
2129 if (num < 2) {
2130 *work = num;
2131 return -EINVAL;
2132 }
2133
2134 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2135 *work = 3;
2136 else
2137 *work = 2;
2138
2139 if (num < *work) {
2140 *work = num;
2141 return -EINVAL;
2142 }
2143
2144 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002145 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002146 return -ENOMEM;
2147 }
2148 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2149 if (!conn_buf)
2150 return -ENOMEM;
2151
2152 memset(conn_buf, 0, sizeof(*conn_buf));
2153
2154 conn_addr = &conn_buf->conn_addr_buf;
2155 conn_addr->remote_addr_0 = csk->ha[0];
2156 conn_addr->remote_addr_1 = csk->ha[1];
2157 conn_addr->remote_addr_2 = csk->ha[2];
2158 conn_addr->remote_addr_3 = csk->ha[3];
2159 conn_addr->remote_addr_4 = csk->ha[4];
2160 conn_addr->remote_addr_5 = csk->ha[5];
2161
2162 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2163 struct l4_kwq_connect_req2 *kwqe2 =
2164 (struct l4_kwq_connect_req2 *) wqes[1];
2165
2166 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2167 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2168 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2169
2170 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2171 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2172 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2173 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2174 }
2175 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2176
2177 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2178 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2179 conn_addr->local_tcp_port = kwqe1->src_port;
2180 conn_addr->remote_tcp_port = kwqe1->dst_port;
2181
2182 conn_addr->pmtu = kwqe3->pmtu;
2183 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2184
2185 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002186 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002187
Michael Chan71034ba2009-10-10 13:46:59 +00002188 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2189 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2190 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002191 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002192
2193 return ret;
2194}
2195
2196static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2197{
2198 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2199 union l5cm_specific_data l5_data;
2200 int ret;
2201
2202 memset(&l5_data, 0, sizeof(l5_data));
2203 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2204 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2205 return ret;
2206}
2207
2208static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2209{
2210 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2211 union l5cm_specific_data l5_data;
2212 int ret;
2213
2214 memset(&l5_data, 0, sizeof(l5_data));
2215 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2216 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2217 return ret;
2218}
2219static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2220{
2221 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2222 struct l4_kcq kcqe;
2223 struct kcqe *cqes[1];
2224
2225 memset(&kcqe, 0, sizeof(kcqe));
2226 kcqe.pg_host_opaque = req->host_opaque;
2227 kcqe.pg_cid = req->host_opaque;
2228 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2229 cqes[0] = (struct kcqe *) &kcqe;
2230 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2231 return 0;
2232}
2233
2234static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2235{
2236 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2237 struct l4_kcq kcqe;
2238 struct kcqe *cqes[1];
2239
2240 memset(&kcqe, 0, sizeof(kcqe));
2241 kcqe.pg_host_opaque = req->pg_host_opaque;
2242 kcqe.pg_cid = req->pg_cid;
2243 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2244 cqes[0] = (struct kcqe *) &kcqe;
2245 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2246 return 0;
2247}
2248
Michael Chane1928c82010-12-23 07:43:04 +00002249static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2250{
2251 struct fcoe_kwqe_stat *req;
2252 struct fcoe_stat_ramrod_params *fcoe_stat;
2253 union l5cm_specific_data l5_data;
2254 struct cnic_local *cp = dev->cnic_priv;
2255 int ret;
2256 u32 cid;
2257
2258 req = (struct fcoe_kwqe_stat *) kwqe;
2259 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2260
2261 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2262 if (!fcoe_stat)
2263 return -ENOMEM;
2264
2265 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2266 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2267
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002268 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002269 FCOE_CONNECTION_TYPE, &l5_data);
2270 return ret;
2271}
2272
2273static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2274 u32 num, int *work)
2275{
2276 int ret;
2277 struct cnic_local *cp = dev->cnic_priv;
2278 u32 cid;
2279 struct fcoe_init_ramrod_params *fcoe_init;
2280 struct fcoe_kwqe_init1 *req1;
2281 struct fcoe_kwqe_init2 *req2;
2282 struct fcoe_kwqe_init3 *req3;
2283 union l5cm_specific_data l5_data;
2284
2285 if (num < 3) {
2286 *work = num;
2287 return -EINVAL;
2288 }
2289 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2290 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2291 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2292 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2293 *work = 1;
2294 return -EINVAL;
2295 }
2296 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2297 *work = 2;
2298 return -EINVAL;
2299 }
2300
2301 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2302 netdev_err(dev->netdev, "fcoe_init size too big\n");
2303 return -ENOMEM;
2304 }
2305 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2306 if (!fcoe_init)
2307 return -ENOMEM;
2308
2309 memset(fcoe_init, 0, sizeof(*fcoe_init));
2310 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2311 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2312 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002313 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2314 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2315 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
Michael Chane1928c82010-12-23 07:43:04 +00002316
2317 fcoe_init->sb_num = cp->status_blk_num;
2318 fcoe_init->eq_prod = MAX_KCQ_IDX;
2319 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2320 cp->kcq2.sw_prod_idx = 0;
2321
2322 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002323 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002324 FCOE_CONNECTION_TYPE, &l5_data);
2325 *work = 3;
2326 return ret;
2327}
2328
2329static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2330 u32 num, int *work)
2331{
2332 int ret = 0;
2333 u32 cid = -1, l5_cid;
2334 struct cnic_local *cp = dev->cnic_priv;
2335 struct fcoe_kwqe_conn_offload1 *req1;
2336 struct fcoe_kwqe_conn_offload2 *req2;
2337 struct fcoe_kwqe_conn_offload3 *req3;
2338 struct fcoe_kwqe_conn_offload4 *req4;
2339 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2340 struct cnic_context *ctx;
2341 struct fcoe_context *fctx;
2342 struct regpair ctx_addr;
2343 union l5cm_specific_data l5_data;
2344 struct fcoe_kcqe kcqe;
2345 struct kcqe *cqes[1];
2346
2347 if (num < 4) {
2348 *work = num;
2349 return -EINVAL;
2350 }
2351 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2352 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2353 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2354 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2355
2356 *work = 4;
2357
2358 l5_cid = req1->fcoe_conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002359 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002360 goto err_reply;
2361
2362 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2363
2364 ctx = &cp->ctx_tbl[l5_cid];
2365 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2366 goto err_reply;
2367
2368 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2369 if (ret) {
2370 ret = 0;
2371 goto err_reply;
2372 }
2373 cid = ctx->cid;
2374
2375 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2376 if (fctx) {
2377 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2378 u32 val;
2379
2380 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2381 FCOE_CONNECTION_TYPE);
2382 fctx->xstorm_ag_context.cdu_reserved = val;
2383 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2384 FCOE_CONNECTION_TYPE);
2385 fctx->ustorm_ag_context.cdu_usage = val;
2386 }
2387 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2388 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2389 goto err_reply;
2390 }
2391 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2392 if (!fcoe_offload)
2393 goto err_reply;
2394
2395 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2396 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2397 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2398 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2399 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2400
2401 cid = BNX2X_HW_CID(cp, cid);
2402 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2403 FCOE_CONNECTION_TYPE, &l5_data);
2404 if (!ret)
2405 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2406
2407 return ret;
2408
2409err_reply:
2410 if (cid != -1)
2411 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2412
2413 memset(&kcqe, 0, sizeof(kcqe));
2414 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2415 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2416 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2417
2418 cqes[0] = (struct kcqe *) &kcqe;
2419 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2420 return ret;
2421}
2422
2423static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2424{
2425 struct fcoe_kwqe_conn_enable_disable *req;
2426 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2427 union l5cm_specific_data l5_data;
2428 int ret;
2429 u32 cid, l5_cid;
2430 struct cnic_local *cp = dev->cnic_priv;
2431
2432 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2433 cid = req->context_id;
2434 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2435
2436 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2437 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2438 return -ENOMEM;
2439 }
2440 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2441 if (!fcoe_enable)
2442 return -ENOMEM;
2443
2444 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2445 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2446 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2447 FCOE_CONNECTION_TYPE, &l5_data);
2448 return ret;
2449}
2450
2451static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2452{
2453 struct fcoe_kwqe_conn_enable_disable *req;
2454 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2455 union l5cm_specific_data l5_data;
2456 int ret;
2457 u32 cid, l5_cid;
2458 struct cnic_local *cp = dev->cnic_priv;
2459
2460 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2461 cid = req->context_id;
2462 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002463 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002464 return -EINVAL;
2465
2466 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2467
2468 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2469 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2470 return -ENOMEM;
2471 }
2472 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2473 if (!fcoe_disable)
2474 return -ENOMEM;
2475
2476 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2477 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2478 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2479 FCOE_CONNECTION_TYPE, &l5_data);
2480 return ret;
2481}
2482
2483static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2484{
2485 struct fcoe_kwqe_conn_destroy *req;
2486 union l5cm_specific_data l5_data;
2487 int ret;
2488 u32 cid, l5_cid;
2489 struct cnic_local *cp = dev->cnic_priv;
2490 struct cnic_context *ctx;
2491 struct fcoe_kcqe kcqe;
2492 struct kcqe *cqes[1];
2493
2494 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2495 cid = req->context_id;
2496 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002497 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002498 return -EINVAL;
2499
2500 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2501
2502 ctx = &cp->ctx_tbl[l5_cid];
2503
2504 init_waitqueue_head(&ctx->waitq);
2505 ctx->wait_cond = 0;
2506
Michael Chandcc7e3a2011-08-26 09:45:40 +00002507 memset(&kcqe, 0, sizeof(kcqe));
2508 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
Michael Chane1928c82010-12-23 07:43:04 +00002509 memset(&l5_data, 0, sizeof(l5_data));
2510 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2511 FCOE_CONNECTION_TYPE, &l5_data);
2512 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00002513 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2514 if (ctx->wait_cond)
2515 kcqe.completion_status = 0;
Michael Chane1928c82010-12-23 07:43:04 +00002516 }
2517
Michael Chandcc7e3a2011-08-26 09:45:40 +00002518 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2519 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2520
Michael Chane1928c82010-12-23 07:43:04 +00002521 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2522 kcqe.fcoe_conn_id = req->conn_id;
2523 kcqe.fcoe_conn_context_id = cid;
2524
2525 cqes[0] = (struct kcqe *) &kcqe;
2526 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2527 return ret;
2528}
2529
Michael Chan74e49bb2011-07-20 14:55:23 +00002530static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2531{
2532 struct cnic_local *cp = dev->cnic_priv;
2533 u32 i;
2534
2535 for (i = start_cid; i < cp->max_cid_space; i++) {
2536 struct cnic_context *ctx = &cp->ctx_tbl[i];
2537 int j;
2538
2539 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2540 msleep(10);
2541
2542 for (j = 0; j < 5; j++) {
2543 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2544 break;
2545 msleep(20);
2546 }
2547
2548 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2549 netdev_warn(dev->netdev, "CID %x not deleted\n",
2550 ctx->cid);
2551 }
2552}
2553
Michael Chane1928c82010-12-23 07:43:04 +00002554static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2555{
2556 struct fcoe_kwqe_destroy *req;
2557 union l5cm_specific_data l5_data;
2558 struct cnic_local *cp = dev->cnic_priv;
2559 int ret;
2560 u32 cid;
2561
Michael Chan74e49bb2011-07-20 14:55:23 +00002562 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2563
Michael Chane1928c82010-12-23 07:43:04 +00002564 req = (struct fcoe_kwqe_destroy *) kwqe;
2565 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2566
2567 memset(&l5_data, 0, sizeof(l5_data));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002568 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002569 FCOE_CONNECTION_TYPE, &l5_data);
2570 return ret;
2571}
2572
Michael Chan23021c22012-01-04 12:12:28 +00002573static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2574{
2575 struct cnic_local *cp = dev->cnic_priv;
2576 struct kcqe kcqe;
2577 struct kcqe *cqes[1];
2578 u32 cid;
2579 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2580 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
Michael Chan3238a9b2012-02-05 15:24:40 +00002581 u32 kcqe_op;
Michael Chan23021c22012-01-04 12:12:28 +00002582 int ulp_type;
2583
2584 cid = kwqe->kwqe_info0;
2585 memset(&kcqe, 0, sizeof(kcqe));
2586
Michael Chan3238a9b2012-02-05 15:24:40 +00002587 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2588 u32 l5_cid = 0;
2589
2590 ulp_type = CNIC_ULP_FCOE;
2591 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2592 struct fcoe_kwqe_conn_enable_disable *req;
2593
2594 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2595 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2596 cid = req->context_id;
2597 l5_cid = req->conn_id;
2598 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2599 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2600 } else {
2601 return;
2602 }
2603 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2604 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
Michael Chan8ec3e702012-03-21 15:38:34 +00002605 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan3238a9b2012-02-05 15:24:40 +00002606 kcqe.kcqe_info2 = cid;
2607 kcqe.kcqe_info0 = l5_cid;
2608
2609 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
Michael Chan23021c22012-01-04 12:12:28 +00002610 ulp_type = CNIC_ULP_ISCSI;
2611 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2612 cid = kwqe->kwqe_info1;
2613
2614 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2615 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
Michael Chan8ec3e702012-03-21 15:38:34 +00002616 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
Michael Chan23021c22012-01-04 12:12:28 +00002617 kcqe.kcqe_info2 = cid;
2618 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2619
2620 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2621 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002622
2623 ulp_type = CNIC_ULP_L4;
2624 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2625 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2626 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2627 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2628 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2629 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2630 else
2631 return;
2632
2633 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2634 KCQE_FLAGS_LAYER_MASK_L4;
Michael Chan8ec3e702012-03-21 15:38:34 +00002635 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan23021c22012-01-04 12:12:28 +00002636 l4kcqe->cid = cid;
2637 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2638 } else {
2639 return;
2640 }
2641
Joe Perches64699332012-06-04 12:44:16 +00002642 cqes[0] = &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002643 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2644}
2645
Michael Chane1928c82010-12-23 07:43:04 +00002646static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2647 struct kwqe *wqes[], u32 num_wqes)
Michael Chan71034ba2009-10-10 13:46:59 +00002648{
2649 int i, work, ret;
2650 u32 opcode;
2651 struct kwqe *kwqe;
2652
2653 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2654 return -EAGAIN; /* bnx2 is down */
2655
2656 for (i = 0; i < num_wqes; ) {
2657 kwqe = wqes[i];
2658 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2659 work = 1;
2660
2661 switch (opcode) {
2662 case ISCSI_KWQE_OPCODE_INIT1:
2663 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2664 break;
2665 case ISCSI_KWQE_OPCODE_INIT2:
2666 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2667 break;
2668 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2669 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2670 num_wqes - i, &work);
2671 break;
2672 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2673 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2674 break;
2675 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2676 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2677 break;
2678 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2679 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2680 &work);
2681 break;
2682 case L4_KWQE_OPCODE_VALUE_CLOSE:
2683 ret = cnic_bnx2x_close(dev, kwqe);
2684 break;
2685 case L4_KWQE_OPCODE_VALUE_RESET:
2686 ret = cnic_bnx2x_reset(dev, kwqe);
2687 break;
2688 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2689 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2690 break;
2691 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2692 ret = cnic_bnx2x_update_pg(dev, kwqe);
2693 break;
2694 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2695 ret = 0;
2696 break;
2697 default:
2698 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002699 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2700 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002701 break;
2702 }
Michael Chan23021c22012-01-04 12:12:28 +00002703 if (ret < 0) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002704 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2705 opcode);
Michael Chan23021c22012-01-04 12:12:28 +00002706
2707 /* Possibly bnx2x parity error, send completion
2708 * to ulp drivers with error code to speed up
2709 * cleanup and reset recovery.
2710 */
2711 if (ret == -EIO || ret == -EAGAIN)
2712 cnic_bnx2x_kwqe_err(dev, kwqe);
2713 }
Michael Chan71034ba2009-10-10 13:46:59 +00002714 i += work;
2715 }
2716 return 0;
2717}
2718
Michael Chane1928c82010-12-23 07:43:04 +00002719static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2720 struct kwqe *wqes[], u32 num_wqes)
2721{
Michael Chan104a43e2013-09-02 11:42:28 -07002722 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002723 int i, work, ret;
2724 u32 opcode;
2725 struct kwqe *kwqe;
2726
2727 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2728 return -EAGAIN; /* bnx2 is down */
2729
Michael Chan104a43e2013-09-02 11:42:28 -07002730 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
Michael Chane1928c82010-12-23 07:43:04 +00002731 return -EINVAL;
2732
2733 for (i = 0; i < num_wqes; ) {
2734 kwqe = wqes[i];
2735 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2736 work = 1;
2737
2738 switch (opcode) {
2739 case FCOE_KWQE_OPCODE_INIT1:
2740 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2741 num_wqes - i, &work);
2742 break;
2743 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2744 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2745 num_wqes - i, &work);
2746 break;
2747 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2748 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2749 break;
2750 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2751 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2752 break;
2753 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2754 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2755 break;
2756 case FCOE_KWQE_OPCODE_DESTROY:
2757 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2758 break;
2759 case FCOE_KWQE_OPCODE_STAT:
2760 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2761 break;
2762 default:
2763 ret = 0;
2764 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2765 opcode);
2766 break;
2767 }
Michael Chan3238a9b2012-02-05 15:24:40 +00002768 if (ret < 0) {
Michael Chane1928c82010-12-23 07:43:04 +00002769 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2770 opcode);
Michael Chan3238a9b2012-02-05 15:24:40 +00002771
2772 /* Possibly bnx2x parity error, send completion
2773 * to ulp drivers with error code to speed up
2774 * cleanup and reset recovery.
2775 */
2776 if (ret == -EIO || ret == -EAGAIN)
2777 cnic_bnx2x_kwqe_err(dev, kwqe);
2778 }
Michael Chane1928c82010-12-23 07:43:04 +00002779 i += work;
2780 }
2781 return 0;
2782}
2783
2784static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2785 u32 num_wqes)
2786{
2787 int ret = -EINVAL;
2788 u32 layer_code;
2789
2790 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2791 return -EAGAIN; /* bnx2x is down */
2792
2793 if (!num_wqes)
2794 return 0;
2795
2796 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2797 switch (layer_code) {
2798 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2799 case KWQE_FLAGS_LAYER_MASK_L4:
2800 case KWQE_FLAGS_LAYER_MASK_L2:
2801 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2802 break;
2803
2804 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2805 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2806 break;
2807 }
2808 return ret;
2809}
2810
2811static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2812{
2813 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2814 return KCQE_FLAGS_LAYER_MASK_L4;
2815
2816 return opflag & KCQE_FLAGS_LAYER_MASK;
2817}
2818
Michael Chana4636962009-06-08 18:14:43 -07002819static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2820{
2821 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002822 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002823
2824 i = 0;
2825 j = 1;
2826 while (num_cqes) {
2827 struct cnic_ulp_ops *ulp_ops;
2828 int ulp_type;
2829 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
Michael Chane1928c82010-12-23 07:43:04 +00002830 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002831
2832 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002833 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002834
2835 while (j < num_cqes) {
2836 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2837
Michael Chane1928c82010-12-23 07:43:04 +00002838 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
Michael Chana4636962009-06-08 18:14:43 -07002839 break;
2840
2841 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002842 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002843 j++;
2844 }
2845
2846 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2847 ulp_type = CNIC_ULP_RDMA;
2848 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2849 ulp_type = CNIC_ULP_ISCSI;
Michael Chane1928c82010-12-23 07:43:04 +00002850 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2851 ulp_type = CNIC_ULP_FCOE;
Michael Chana4636962009-06-08 18:14:43 -07002852 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2853 ulp_type = CNIC_ULP_L4;
2854 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2855 goto end;
2856 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002857 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2858 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002859 goto end;
2860 }
2861
2862 rcu_read_lock();
2863 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2864 if (likely(ulp_ops)) {
2865 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2866 cp->completed_kcq + i, j);
2867 }
2868 rcu_read_unlock();
2869end:
2870 num_cqes -= j;
2871 i += j;
2872 j = 1;
2873 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002874 if (unlikely(comp))
2875 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002876}
2877
Michael Chan644b9d42010-06-24 14:58:40 +00002878static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002879{
2880 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002881 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002882 struct kcqe *kcqe;
2883 int kcqe_cnt = 0, last_cnt = 0;
2884
Michael Chan644b9d42010-06-24 14:58:40 +00002885 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002886 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002887 hw_prod = *info->hw_prod_idx_ptr;
Michael Chan59e51372011-06-14 01:32:38 +00002888 hw_prod = info->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002889
2890 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002891 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002892 cp->completed_kcq[kcqe_cnt++] = kcqe;
Michael Chan59e51372011-06-14 01:32:38 +00002893 i = info->next_idx(i);
Michael Chana4636962009-06-08 18:14:43 -07002894 ri = i & MAX_KCQ_IDX;
2895 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2896 last_cnt = kcqe_cnt;
2897 last = i;
2898 }
2899 }
2900
Michael Chan644b9d42010-06-24 14:58:40 +00002901 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002902 return last_cnt;
2903}
2904
Michael Chan48f753d2010-05-18 11:32:53 +00002905static int cnic_l2_completion(struct cnic_local *cp)
2906{
2907 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002908 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002909 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chan2bc40782012-12-06 10:33:09 +00002910 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002911 u32 cmd;
2912 int comp = 0;
2913
2914 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2915 return 0;
2916
2917 hw_cons = *cp->rx_cons_ptr;
2918 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2919 hw_cons++;
2920
2921 sw_cons = cp->rx_cons;
2922 while (sw_cons != hw_cons) {
2923 u8 cqe_fp_flags;
2924
2925 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2926 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2927 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2928 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2929 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2930 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2931 cmd == RAMROD_CMD_ID_ETH_HALT)
2932 comp++;
2933 }
2934 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2935 }
2936 return comp;
2937}
2938
Michael Chan86b53602009-10-10 13:46:57 +00002939static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002940{
Michael Chan541a7812010-10-06 03:17:22 +00002941 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002942 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002943
Michael Chan541a7812010-10-06 03:17:22 +00002944 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002945 return;
2946
Michael Chan541a7812010-10-06 03:17:22 +00002947 rx_cons = *cp->rx_cons_ptr;
2948 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002949 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002950 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2951 comp = cnic_l2_completion(cp);
2952
Michael Chana4636962009-06-08 18:14:43 -07002953 cp->tx_cons = tx_cons;
2954 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002955
Michael Chancd801532010-10-13 14:06:49 +00002956 if (cp->udev)
2957 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002958 }
Michael Chan48f753d2010-05-18 11:32:53 +00002959 if (comp)
2960 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002961}
2962
Michael Chanb177a5d52010-06-24 14:58:41 +00002963static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002964{
Michael Chana4636962009-06-08 18:14:43 -07002965 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002966 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002967 int kcqe_cnt;
2968
Michael Chan107c3f42011-03-02 13:00:49 +00002969 /* status block index must be read before reading other fields */
2970 rmb();
Michael Chana4636962009-06-08 18:14:43 -07002971 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2972
Michael Chan644b9d42010-06-24 14:58:40 +00002973 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002974
2975 service_kcqes(dev, kcqe_cnt);
2976
2977 /* Tell compiler that status_blk fields can change. */
2978 barrier();
Michael Chan93736652011-06-08 19:29:32 +00002979 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2980 /* status block index must be read first */
2981 rmb();
2982 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002983 }
2984
Michael Chan644b9d42010-06-24 14:58:40 +00002985 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002986
Michael Chan86b53602009-10-10 13:46:57 +00002987 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002988
Michael Chana4636962009-06-08 18:14:43 -07002989 return status_idx;
2990}
2991
Michael Chanb177a5d52010-06-24 14:58:41 +00002992static int cnic_service_bnx2(void *data, void *status_blk)
2993{
2994 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002995
Michael Chaneaaa6e92010-12-23 08:38:30 +00002996 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2997 struct status_block *sblk = status_blk;
2998
2999 return sblk->status_idx;
3000 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003001
3002 return cnic_service_bnx2_queues(dev);
3003}
3004
Michael Chana4636962009-06-08 18:14:43 -07003005static void cnic_service_bnx2_msix(unsigned long data)
3006{
3007 struct cnic_dev *dev = (struct cnic_dev *) data;
3008 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003009
Michael Chanb177a5d52010-06-24 14:58:41 +00003010 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07003011
Michael Chana4636962009-06-08 18:14:43 -07003012 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3013 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3014}
3015
Michael Chan66fee9e2010-06-24 14:58:38 +00003016static void cnic_doirq(struct cnic_dev *dev)
3017{
3018 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00003019
3020 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00003021 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3022
Michael Chan66fee9e2010-06-24 14:58:38 +00003023 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00003024 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00003025
3026 tasklet_schedule(&cp->cnic_irq_task);
3027 }
3028}
3029
Michael Chana4636962009-06-08 18:14:43 -07003030static irqreturn_t cnic_irq(int irq, void *dev_instance)
3031{
3032 struct cnic_dev *dev = dev_instance;
3033 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003034
3035 if (cp->ack_int)
3036 cp->ack_int(dev);
3037
Michael Chan66fee9e2010-06-24 14:58:38 +00003038 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07003039
3040 return IRQ_HANDLED;
3041}
3042
Michael Chan71034ba2009-10-10 13:46:59 +00003043static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3044 u16 index, u8 op, u8 update)
3045{
3046 struct cnic_local *cp = dev->cnic_priv;
3047 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
3048 COMMAND_REG_INT_ACK);
3049 struct igu_ack_register igu_ack;
3050
3051 igu_ack.status_block_index = index;
3052 igu_ack.sb_id_and_flags =
3053 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3054 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3055 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3056 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3057
3058 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3059}
3060
Michael Chanee87a822010-10-13 14:06:51 +00003061static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3062 u16 index, u8 op, u8 update)
3063{
3064 struct igu_regular cmd_data;
3065 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3066
3067 cmd_data.sb_id_and_flags =
3068 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3069 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3070 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3071 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3072
3073
3074 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3075}
3076
Michael Chan71034ba2009-10-10 13:46:59 +00003077static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3078{
3079 struct cnic_local *cp = dev->cnic_priv;
3080
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003081 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00003082 IGU_INT_DISABLE, 0);
3083}
3084
Michael Chanee87a822010-10-13 14:06:51 +00003085static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3086{
3087 struct cnic_local *cp = dev->cnic_priv;
3088
3089 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3090 IGU_INT_DISABLE, 0);
3091}
3092
Michael Chan8cc0e022012-09-08 06:01:03 +00003093static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3094{
3095 struct cnic_local *cp = dev->cnic_priv;
3096
3097 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3098 IGU_INT_ENABLE, 1);
3099}
3100
3101static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3102{
3103 struct cnic_local *cp = dev->cnic_priv;
3104
3105 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3106 IGU_INT_ENABLE, 1);
3107}
3108
Michael Chanb177a5d52010-06-24 14:58:41 +00003109static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00003110{
Michael Chanb177a5d52010-06-24 14:58:41 +00003111 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00003112 int kcqe_cnt;
3113
Michael Chan107c3f42011-03-02 13:00:49 +00003114 /* status block index must be read before reading the KCQ */
3115 rmb();
Michael Chanb177a5d52010-06-24 14:58:41 +00003116 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00003117
3118 service_kcqes(dev, kcqe_cnt);
3119
3120 /* Tell compiler that sblk fields can change. */
3121 barrier();
Michael Chan71034ba2009-10-10 13:46:59 +00003122
Michael Chanb177a5d52010-06-24 14:58:41 +00003123 last_status = *info->status_idx_ptr;
Michael Chan107c3f42011-03-02 13:00:49 +00003124 /* status block index must be read before reading the KCQ */
3125 rmb();
Michael Chan71034ba2009-10-10 13:46:59 +00003126 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003127 return last_status;
3128}
3129
3130static void cnic_service_bnx2x_bh(unsigned long data)
3131{
3132 struct cnic_dev *dev = (struct cnic_dev *) data;
3133 struct cnic_local *cp = dev->cnic_priv;
Michael Chan0197b082011-03-02 13:00:50 +00003134 u32 status_idx, new_status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00003135
3136 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3137 return;
3138
Michael Chan0197b082011-03-02 13:00:50 +00003139 while (1) {
3140 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00003141
Michael Chan0197b082011-03-02 13:00:50 +00003142 CNIC_WR16(dev, cp->kcq1.io_addr,
3143 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00003144
Michael Chan51a8f542012-09-08 06:01:04 +00003145 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
Michael Chan8cc0e022012-09-08 06:01:03 +00003146 cp->arm_int(dev, status_idx);
Michael Chan0197b082011-03-02 13:00:50 +00003147 break;
3148 }
3149
3150 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3151
3152 if (new_status_idx != status_idx)
3153 continue;
Michael Chane21ba412010-12-23 07:43:03 +00003154
3155 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3156 MAX_KCQ_IDX);
3157
Michael Chanee87a822010-10-13 14:06:51 +00003158 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3159 status_idx, IGU_INT_ENABLE, 1);
Michael Chan0197b082011-03-02 13:00:50 +00003160
3161 break;
Michael Chane21ba412010-12-23 07:43:03 +00003162 }
Michael Chan71034ba2009-10-10 13:46:59 +00003163}
3164
3165static int cnic_service_bnx2x(void *data, void *status_blk)
3166{
3167 struct cnic_dev *dev = data;
3168 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00003169
Michael Chan66fee9e2010-06-24 14:58:38 +00003170 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3171 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00003172
Michael Chan66fee9e2010-06-24 14:58:38 +00003173 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003174
3175 return 0;
3176}
3177
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003178static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3179{
3180 struct cnic_ulp_ops *ulp_ops;
3181
3182 if (if_type == CNIC_ULP_ISCSI)
3183 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3184
3185 mutex_lock(&cnic_lock);
3186 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3187 lockdep_is_held(&cnic_lock));
3188 if (!ulp_ops) {
3189 mutex_unlock(&cnic_lock);
3190 return;
3191 }
3192 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3193 mutex_unlock(&cnic_lock);
3194
3195 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3196 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3197
3198 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3199}
3200
Michael Chana4636962009-06-08 18:14:43 -07003201static void cnic_ulp_stop(struct cnic_dev *dev)
3202{
3203 struct cnic_local *cp = dev->cnic_priv;
3204 int if_type;
3205
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003206 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3207 cnic_ulp_stop_one(cp, if_type);
Michael Chana4636962009-06-08 18:14:43 -07003208}
3209
3210static void cnic_ulp_start(struct cnic_dev *dev)
3211{
3212 struct cnic_local *cp = dev->cnic_priv;
3213 int if_type;
3214
Michael Chana4636962009-06-08 18:14:43 -07003215 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3216 struct cnic_ulp_ops *ulp_ops;
3217
Michael Chan681dbd72009-08-14 15:49:46 +00003218 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003219 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3220 lockdep_is_held(&cnic_lock));
Michael Chan681dbd72009-08-14 15:49:46 +00003221 if (!ulp_ops || !ulp_ops->cnic_start) {
3222 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003223 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00003224 }
3225 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3226 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003227
3228 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3229 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00003230
3231 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07003232 }
Michael Chana4636962009-06-08 18:14:43 -07003233}
3234
Barak Witkowski1d187b32011-12-05 22:41:50 +00003235static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3236{
3237 struct cnic_local *cp = dev->cnic_priv;
3238 struct cnic_ulp_ops *ulp_ops;
3239 int rc;
3240
3241 mutex_lock(&cnic_lock);
3242 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3243 if (ulp_ops && ulp_ops->cnic_get_stats)
3244 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3245 else
3246 rc = -ENODEV;
3247 mutex_unlock(&cnic_lock);
3248 return rc;
3249}
3250
Michael Chana4636962009-06-08 18:14:43 -07003251static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3252{
3253 struct cnic_dev *dev = data;
Barak Witkowski1d187b32011-12-05 22:41:50 +00003254 int ulp_type = CNIC_ULP_ISCSI;
Michael Chana4636962009-06-08 18:14:43 -07003255
3256 switch (info->cmd) {
3257 case CNIC_CTL_STOP_CMD:
3258 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003259
3260 cnic_ulp_stop(dev);
3261 cnic_stop_hw(dev);
3262
Michael Chana4636962009-06-08 18:14:43 -07003263 cnic_put(dev);
3264 break;
3265 case CNIC_CTL_START_CMD:
3266 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003267
3268 if (!cnic_start_hw(dev))
3269 cnic_ulp_start(dev);
3270
Michael Chana4636962009-06-08 18:14:43 -07003271 cnic_put(dev);
3272 break;
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003273 case CNIC_CTL_STOP_ISCSI_CMD: {
3274 struct cnic_local *cp = dev->cnic_priv;
3275 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3276 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3277 break;
3278 }
Michael Chan71034ba2009-10-10 13:46:59 +00003279 case CNIC_CTL_COMPLETION_CMD: {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003280 struct cnic_ctl_completion *comp = &info->data.comp;
3281 u32 cid = BNX2X_SW_CID(comp->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00003282 u32 l5_cid;
3283 struct cnic_local *cp = dev->cnic_priv;
3284
Michael Chana2028b232012-06-27 15:08:19 +00003285 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3286 break;
3287
Michael Chan71034ba2009-10-10 13:46:59 +00003288 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3289 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3290
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003291 if (unlikely(comp->error)) {
3292 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3293 netdev_err(dev->netdev,
3294 "CID %x CFC delete comp error %x\n",
3295 cid, comp->error);
3296 }
3297
Michael Chan71034ba2009-10-10 13:46:59 +00003298 ctx->wait_cond = 1;
3299 wake_up(&ctx->waitq);
3300 }
3301 break;
3302 }
Barak Witkowski1d187b32011-12-05 22:41:50 +00003303 case CNIC_CTL_FCOE_STATS_GET_CMD:
3304 ulp_type = CNIC_ULP_FCOE;
3305 /* fall through */
3306 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3307 cnic_hold(dev);
3308 cnic_copy_ulp_stats(dev, ulp_type);
3309 cnic_put(dev);
3310 break;
3311
Michael Chana4636962009-06-08 18:14:43 -07003312 default:
3313 return -EINVAL;
3314 }
3315 return 0;
3316}
3317
3318static void cnic_ulp_init(struct cnic_dev *dev)
3319{
3320 int i;
3321 struct cnic_local *cp = dev->cnic_priv;
3322
Michael Chana4636962009-06-08 18:14:43 -07003323 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3324 struct cnic_ulp_ops *ulp_ops;
3325
Michael Chan7fc1ece2009-08-14 15:49:47 +00003326 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003327 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003328 if (!ulp_ops || !ulp_ops->cnic_init) {
3329 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003330 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003331 }
3332 ulp_get(ulp_ops);
3333 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003334
3335 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3336 ulp_ops->cnic_init(dev);
3337
Michael Chan7fc1ece2009-08-14 15:49:47 +00003338 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003339 }
Michael Chana4636962009-06-08 18:14:43 -07003340}
3341
3342static void cnic_ulp_exit(struct cnic_dev *dev)
3343{
3344 int i;
3345 struct cnic_local *cp = dev->cnic_priv;
3346
Michael Chana4636962009-06-08 18:14:43 -07003347 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3348 struct cnic_ulp_ops *ulp_ops;
3349
Michael Chan7fc1ece2009-08-14 15:49:47 +00003350 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003351 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003352 if (!ulp_ops || !ulp_ops->cnic_exit) {
3353 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003354 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003355 }
3356 ulp_get(ulp_ops);
3357 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003358
3359 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3360 ulp_ops->cnic_exit(dev);
3361
Michael Chan7fc1ece2009-08-14 15:49:47 +00003362 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003363 }
Michael Chana4636962009-06-08 18:14:43 -07003364}
3365
3366static int cnic_cm_offload_pg(struct cnic_sock *csk)
3367{
3368 struct cnic_dev *dev = csk->dev;
3369 struct l4_kwq_offload_pg *l4kwqe;
3370 struct kwqe *wqes[1];
3371
3372 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3373 memset(l4kwqe, 0, sizeof(*l4kwqe));
3374 wqes[0] = (struct kwqe *) l4kwqe;
3375
3376 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3377 l4kwqe->flags =
3378 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3379 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3380
3381 l4kwqe->da0 = csk->ha[0];
3382 l4kwqe->da1 = csk->ha[1];
3383 l4kwqe->da2 = csk->ha[2];
3384 l4kwqe->da3 = csk->ha[3];
3385 l4kwqe->da4 = csk->ha[4];
3386 l4kwqe->da5 = csk->ha[5];
3387
3388 l4kwqe->sa0 = dev->mac_addr[0];
3389 l4kwqe->sa1 = dev->mac_addr[1];
3390 l4kwqe->sa2 = dev->mac_addr[2];
3391 l4kwqe->sa3 = dev->mac_addr[3];
3392 l4kwqe->sa4 = dev->mac_addr[4];
3393 l4kwqe->sa5 = dev->mac_addr[5];
3394
3395 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00003396 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07003397 l4kwqe->host_opaque = csk->l5_cid;
3398
3399 if (csk->vlan_id) {
3400 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3401 l4kwqe->vlan_tag = csk->vlan_id;
3402 l4kwqe->l2hdr_nbytes += 4;
3403 }
3404
3405 return dev->submit_kwqes(dev, wqes, 1);
3406}
3407
3408static int cnic_cm_update_pg(struct cnic_sock *csk)
3409{
3410 struct cnic_dev *dev = csk->dev;
3411 struct l4_kwq_update_pg *l4kwqe;
3412 struct kwqe *wqes[1];
3413
3414 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3415 memset(l4kwqe, 0, sizeof(*l4kwqe));
3416 wqes[0] = (struct kwqe *) l4kwqe;
3417
3418 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3419 l4kwqe->flags =
3420 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3421 l4kwqe->pg_cid = csk->pg_cid;
3422
3423 l4kwqe->da0 = csk->ha[0];
3424 l4kwqe->da1 = csk->ha[1];
3425 l4kwqe->da2 = csk->ha[2];
3426 l4kwqe->da3 = csk->ha[3];
3427 l4kwqe->da4 = csk->ha[4];
3428 l4kwqe->da5 = csk->ha[5];
3429
3430 l4kwqe->pg_host_opaque = csk->l5_cid;
3431 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3432
3433 return dev->submit_kwqes(dev, wqes, 1);
3434}
3435
3436static int cnic_cm_upload_pg(struct cnic_sock *csk)
3437{
3438 struct cnic_dev *dev = csk->dev;
3439 struct l4_kwq_upload *l4kwqe;
3440 struct kwqe *wqes[1];
3441
3442 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3443 memset(l4kwqe, 0, sizeof(*l4kwqe));
3444 wqes[0] = (struct kwqe *) l4kwqe;
3445
3446 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3447 l4kwqe->flags =
3448 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3449 l4kwqe->cid = csk->pg_cid;
3450
3451 return dev->submit_kwqes(dev, wqes, 1);
3452}
3453
3454static int cnic_cm_conn_req(struct cnic_sock *csk)
3455{
3456 struct cnic_dev *dev = csk->dev;
3457 struct l4_kwq_connect_req1 *l4kwqe1;
3458 struct l4_kwq_connect_req2 *l4kwqe2;
3459 struct l4_kwq_connect_req3 *l4kwqe3;
3460 struct kwqe *wqes[3];
3461 u8 tcp_flags = 0;
3462 int num_wqes = 2;
3463
3464 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3465 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3466 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3467 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3468 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3469 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3470
3471 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3472 l4kwqe3->flags =
3473 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3474 l4kwqe3->ka_timeout = csk->ka_timeout;
3475 l4kwqe3->ka_interval = csk->ka_interval;
3476 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3477 l4kwqe3->tos = csk->tos;
3478 l4kwqe3->ttl = csk->ttl;
3479 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3480 l4kwqe3->pmtu = csk->mtu;
3481 l4kwqe3->rcv_buf = csk->rcv_buf;
3482 l4kwqe3->snd_buf = csk->snd_buf;
3483 l4kwqe3->seed = csk->seed;
3484
3485 wqes[0] = (struct kwqe *) l4kwqe1;
3486 if (test_bit(SK_F_IPV6, &csk->flags)) {
3487 wqes[1] = (struct kwqe *) l4kwqe2;
3488 wqes[2] = (struct kwqe *) l4kwqe3;
3489 num_wqes = 3;
3490
3491 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3492 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3493 l4kwqe2->flags =
3494 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3495 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3496 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3497 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3498 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3499 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3500 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3501 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3502 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3503 sizeof(struct tcphdr);
3504 } else {
3505 wqes[1] = (struct kwqe *) l4kwqe3;
3506 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3507 sizeof(struct tcphdr);
3508 }
3509
3510 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3511 l4kwqe1->flags =
3512 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3513 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3514 l4kwqe1->cid = csk->cid;
3515 l4kwqe1->pg_cid = csk->pg_cid;
3516 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3517 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3518 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3519 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3520 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3521 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3522 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3523 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3524 if (csk->tcp_flags & SK_TCP_NAGLE)
3525 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3526 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3527 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3528 if (csk->tcp_flags & SK_TCP_SACK)
3529 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3530 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3531 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3532
3533 l4kwqe1->tcp_flags = tcp_flags;
3534
3535 return dev->submit_kwqes(dev, wqes, num_wqes);
3536}
3537
3538static int cnic_cm_close_req(struct cnic_sock *csk)
3539{
3540 struct cnic_dev *dev = csk->dev;
3541 struct l4_kwq_close_req *l4kwqe;
3542 struct kwqe *wqes[1];
3543
3544 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3545 memset(l4kwqe, 0, sizeof(*l4kwqe));
3546 wqes[0] = (struct kwqe *) l4kwqe;
3547
3548 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3549 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3550 l4kwqe->cid = csk->cid;
3551
3552 return dev->submit_kwqes(dev, wqes, 1);
3553}
3554
3555static int cnic_cm_abort_req(struct cnic_sock *csk)
3556{
3557 struct cnic_dev *dev = csk->dev;
3558 struct l4_kwq_reset_req *l4kwqe;
3559 struct kwqe *wqes[1];
3560
3561 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3562 memset(l4kwqe, 0, sizeof(*l4kwqe));
3563 wqes[0] = (struct kwqe *) l4kwqe;
3564
3565 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3566 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3567 l4kwqe->cid = csk->cid;
3568
3569 return dev->submit_kwqes(dev, wqes, 1);
3570}
3571
3572static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3573 u32 l5_cid, struct cnic_sock **csk, void *context)
3574{
3575 struct cnic_local *cp = dev->cnic_priv;
3576 struct cnic_sock *csk1;
3577
3578 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3579 return -EINVAL;
3580
Michael Chanfdf24082010-10-13 14:06:47 +00003581 if (cp->ctx_tbl) {
3582 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3583
3584 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3585 return -EAGAIN;
3586 }
3587
Michael Chana4636962009-06-08 18:14:43 -07003588 csk1 = &cp->csk_tbl[l5_cid];
3589 if (atomic_read(&csk1->ref_count))
3590 return -EAGAIN;
3591
3592 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3593 return -EBUSY;
3594
3595 csk1->dev = dev;
3596 csk1->cid = cid;
3597 csk1->l5_cid = l5_cid;
3598 csk1->ulp_type = ulp_type;
3599 csk1->context = context;
3600
3601 csk1->ka_timeout = DEF_KA_TIMEOUT;
3602 csk1->ka_interval = DEF_KA_INTERVAL;
3603 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3604 csk1->tos = DEF_TOS;
3605 csk1->ttl = DEF_TTL;
3606 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3607 csk1->rcv_buf = DEF_RCV_BUF;
3608 csk1->snd_buf = DEF_SND_BUF;
3609 csk1->seed = DEF_SEED;
Eddie Wai6cdcdbb2013-07-28 19:03:57 -07003610 csk1->tcp_flags = 0;
Michael Chana4636962009-06-08 18:14:43 -07003611
3612 *csk = csk1;
3613 return 0;
3614}
3615
3616static void cnic_cm_cleanup(struct cnic_sock *csk)
3617{
3618 if (csk->src_port) {
3619 struct cnic_dev *dev = csk->dev;
3620 struct cnic_local *cp = dev->cnic_priv;
3621
Michael Chan9b093362010-12-23 07:42:56 +00003622 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07003623 csk->src_port = 0;
3624 }
3625}
3626
3627static void cnic_close_conn(struct cnic_sock *csk)
3628{
3629 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3630 cnic_cm_upload_pg(csk);
3631 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3632 }
3633 cnic_cm_cleanup(csk);
3634}
3635
3636static int cnic_cm_destroy(struct cnic_sock *csk)
3637{
3638 if (!cnic_in_use(csk))
3639 return -EINVAL;
3640
3641 csk_hold(csk);
3642 clear_bit(SK_F_INUSE, &csk->flags);
3643 smp_mb__after_clear_bit();
3644 while (atomic_read(&csk->ref_count) != 1)
3645 msleep(1);
3646 cnic_cm_cleanup(csk);
3647
3648 csk->flags = 0;
3649 csk_put(csk);
3650 return 0;
3651}
3652
3653static inline u16 cnic_get_vlan(struct net_device *dev,
3654 struct net_device **vlan_dev)
3655{
3656 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3657 *vlan_dev = vlan_dev_real_dev(dev);
3658 return vlan_dev_vlan_id(dev);
3659 }
3660 *vlan_dev = dev;
3661 return 0;
3662}
3663
3664static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3665 struct dst_entry **dst)
3666{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003667#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07003668 struct rtable *rt;
3669
David S. Miller78fbfd82011-03-12 00:00:52 -05003670 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3671 if (!IS_ERR(rt)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07003672 *dst = &rt->dst;
David S. Miller78fbfd82011-03-12 00:00:52 -05003673 return 0;
3674 }
3675 return PTR_ERR(rt);
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003676#else
3677 return -ENETUNREACH;
3678#endif
Michael Chana4636962009-06-08 18:14:43 -07003679}
3680
3681static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3682 struct dst_entry **dst)
3683{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003684#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
David S. Miller4c9483b2011-03-12 16:22:43 -05003685 struct flowi6 fl6;
Michael Chana4636962009-06-08 18:14:43 -07003686
David S. Miller4c9483b2011-03-12 16:22:43 -05003687 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003688 fl6.daddr = dst_addr->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -05003689 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3690 fl6.flowi6_oif = dst_addr->sin6_scope_id;
Michael Chana4636962009-06-08 18:14:43 -07003691
David S. Miller4c9483b2011-03-12 16:22:43 -05003692 *dst = ip6_route_output(&init_net, NULL, &fl6);
RongQing.Li05417432012-02-21 22:10:50 +00003693 if ((*dst)->error) {
3694 dst_release(*dst);
3695 *dst = NULL;
3696 return -ENETUNREACH;
3697 } else
Michael Chana4636962009-06-08 18:14:43 -07003698 return 0;
3699#endif
3700
3701 return -ENETUNREACH;
3702}
3703
3704static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3705 int ulp_type)
3706{
3707 struct cnic_dev *dev = NULL;
3708 struct dst_entry *dst;
3709 struct net_device *netdev = NULL;
3710 int err = -ENETUNREACH;
3711
3712 if (dst_addr->sin_family == AF_INET)
3713 err = cnic_get_v4_route(dst_addr, &dst);
3714 else if (dst_addr->sin_family == AF_INET6) {
3715 struct sockaddr_in6 *dst_addr6 =
3716 (struct sockaddr_in6 *) dst_addr;
3717
3718 err = cnic_get_v6_route(dst_addr6, &dst);
3719 } else
3720 return NULL;
3721
3722 if (err)
3723 return NULL;
3724
3725 if (!dst->dev)
3726 goto done;
3727
3728 cnic_get_vlan(dst->dev, &netdev);
3729
3730 dev = cnic_from_netdev(netdev);
3731
3732done:
3733 dst_release(dst);
3734 if (dev)
3735 cnic_put(dev);
3736 return dev;
3737}
3738
3739static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3740{
3741 struct cnic_dev *dev = csk->dev;
3742 struct cnic_local *cp = dev->cnic_priv;
3743
3744 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3745}
3746
3747static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3748{
3749 struct cnic_dev *dev = csk->dev;
3750 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003751 int is_v6, rc = 0;
3752 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003753 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003754 __be16 local_port;
3755 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003756
3757 if (saddr->local.v6.sin6_family == AF_INET6 &&
3758 saddr->remote.v6.sin6_family == AF_INET6)
3759 is_v6 = 1;
3760 else if (saddr->local.v4.sin_family == AF_INET &&
3761 saddr->remote.v4.sin_family == AF_INET)
3762 is_v6 = 0;
3763 else
3764 return -EINVAL;
3765
3766 clear_bit(SK_F_IPV6, &csk->flags);
3767
3768 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003769 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003770 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003771
3772 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3773 sizeof(struct in6_addr));
3774 csk->dst_port = saddr->remote.v6.sin6_port;
3775 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003776
3777 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003778 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003779
3780 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3781 csk->dst_port = saddr->remote.v4.sin_port;
3782 local_port = saddr->local.v4.sin_port;
3783 }
3784
Michael Chanc76284a2010-02-24 14:42:07 +00003785 csk->vlan_id = 0;
3786 csk->mtu = dev->netdev->mtu;
3787 if (dst && dst->dev) {
3788 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3789 if (realdev == dev->netdev) {
3790 csk->vlan_id = vlan;
3791 csk->mtu = dst_mtu(dst);
3792 }
3793 }
Michael Chana4636962009-06-08 18:14:43 -07003794
Michael Chan9b093362010-12-23 07:42:56 +00003795 port_id = be16_to_cpu(local_port);
3796 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3797 port_id < CNIC_LOCAL_PORT_MAX) {
3798 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3799 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003800 } else
Michael Chan9b093362010-12-23 07:42:56 +00003801 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003802
Michael Chan9b093362010-12-23 07:42:56 +00003803 if (!port_id) {
3804 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3805 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003806 rc = -ENOMEM;
3807 goto err_out;
3808 }
Michael Chan9b093362010-12-23 07:42:56 +00003809 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003810 }
3811 csk->src_port = local_port;
3812
Michael Chana4636962009-06-08 18:14:43 -07003813err_out:
3814 dst_release(dst);
3815 return rc;
3816}
3817
3818static void cnic_init_csk_state(struct cnic_sock *csk)
3819{
3820 csk->state = 0;
3821 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3822 clear_bit(SK_F_CLOSING, &csk->flags);
3823}
3824
3825static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3826{
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003827 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003828 int err = 0;
3829
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003830 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3831 return -EOPNOTSUPP;
3832
Michael Chana4636962009-06-08 18:14:43 -07003833 if (!cnic_in_use(csk))
3834 return -EINVAL;
3835
3836 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3837 return -EINVAL;
3838
3839 cnic_init_csk_state(csk);
3840
3841 err = cnic_get_route(csk, saddr);
3842 if (err)
3843 goto err_out;
3844
3845 err = cnic_resolve_addr(csk, saddr);
3846 if (!err)
3847 return 0;
3848
3849err_out:
3850 clear_bit(SK_F_CONNECT_START, &csk->flags);
3851 return err;
3852}
3853
3854static int cnic_cm_abort(struct cnic_sock *csk)
3855{
3856 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003857 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003858
3859 if (!cnic_in_use(csk))
3860 return -EINVAL;
3861
3862 if (cnic_abort_prep(csk))
3863 return cnic_cm_abort_req(csk);
3864
3865 /* Getting here means that we haven't started connect, or
Eddie Wai0d650ec2012-12-05 10:10:15 +00003866 * connect was not successful, or it has been reset by the target.
Michael Chana4636962009-06-08 18:14:43 -07003867 */
3868
Michael Chana4636962009-06-08 18:14:43 -07003869 cp->close_conn(csk, opcode);
Eddie Wai0d650ec2012-12-05 10:10:15 +00003870 if (csk->state != opcode) {
3871 /* Wait for remote reset sequence to complete */
3872 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3873 msleep(1);
3874
Michael Chan7b34a462010-06-15 08:57:03 +00003875 return -EALREADY;
Eddie Wai0d650ec2012-12-05 10:10:15 +00003876 }
Michael Chana4636962009-06-08 18:14:43 -07003877
3878 return 0;
3879}
3880
3881static int cnic_cm_close(struct cnic_sock *csk)
3882{
3883 if (!cnic_in_use(csk))
3884 return -EINVAL;
3885
3886 if (cnic_close_prep(csk)) {
3887 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3888 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003889 } else {
Eddie Wai0d650ec2012-12-05 10:10:15 +00003890 /* Wait for remote reset sequence to complete */
3891 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3892 msleep(1);
3893
Michael Chaned99daa52010-06-15 08:57:00 +00003894 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003895 }
3896 return 0;
3897}
3898
3899static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3900 u8 opcode)
3901{
3902 struct cnic_ulp_ops *ulp_ops;
3903 int ulp_type = csk->ulp_type;
3904
3905 rcu_read_lock();
3906 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3907 if (ulp_ops) {
3908 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3909 ulp_ops->cm_connect_complete(csk);
3910 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3911 ulp_ops->cm_close_complete(csk);
3912 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3913 ulp_ops->cm_remote_abort(csk);
3914 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3915 ulp_ops->cm_abort_complete(csk);
3916 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3917 ulp_ops->cm_remote_close(csk);
3918 }
3919 rcu_read_unlock();
3920}
3921
3922static int cnic_cm_set_pg(struct cnic_sock *csk)
3923{
3924 if (cnic_offld_prep(csk)) {
3925 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3926 cnic_cm_update_pg(csk);
3927 else
3928 cnic_cm_offload_pg(csk);
3929 }
3930 return 0;
3931}
3932
3933static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3934{
3935 struct cnic_local *cp = dev->cnic_priv;
3936 u32 l5_cid = kcqe->pg_host_opaque;
3937 u8 opcode = kcqe->op_code;
3938 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3939
3940 csk_hold(csk);
3941 if (!cnic_in_use(csk))
3942 goto done;
3943
3944 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3945 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3946 goto done;
3947 }
Eddie Waia9736c02010-02-24 14:42:04 +00003948 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3949 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3950 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3951 cnic_cm_upcall(cp, csk,
3952 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3953 goto done;
3954 }
3955
Michael Chana4636962009-06-08 18:14:43 -07003956 csk->pg_cid = kcqe->pg_cid;
3957 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3958 cnic_cm_conn_req(csk);
3959
3960done:
3961 csk_put(csk);
3962}
3963
Michael Chane1928c82010-12-23 07:43:04 +00003964static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3965{
3966 struct cnic_local *cp = dev->cnic_priv;
3967 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3968 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3969 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3970
3971 ctx->timestamp = jiffies;
3972 ctx->wait_cond = 1;
3973 wake_up(&ctx->waitq);
3974}
3975
Michael Chana4636962009-06-08 18:14:43 -07003976static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3977{
3978 struct cnic_local *cp = dev->cnic_priv;
3979 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3980 u8 opcode = l4kcqe->op_code;
3981 u32 l5_cid;
3982 struct cnic_sock *csk;
3983
Michael Chane1928c82010-12-23 07:43:04 +00003984 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3985 cnic_process_fcoe_term_conn(dev, kcqe);
3986 return;
3987 }
Michael Chana4636962009-06-08 18:14:43 -07003988 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3989 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3990 cnic_cm_process_offld_pg(dev, l4kcqe);
3991 return;
3992 }
3993
3994 l5_cid = l4kcqe->conn_id;
3995 if (opcode & 0x80)
3996 l5_cid = l4kcqe->cid;
3997 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3998 return;
3999
4000 csk = &cp->csk_tbl[l5_cid];
4001 csk_hold(csk);
4002
4003 if (!cnic_in_use(csk)) {
4004 csk_put(csk);
4005 return;
4006 }
4007
4008 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00004009 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4010 if (l4kcqe->status != 0) {
4011 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4012 cnic_cm_upcall(cp, csk,
4013 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4014 }
4015 break;
Michael Chana4636962009-06-08 18:14:43 -07004016 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4017 if (l4kcqe->status == 0)
4018 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
Michael Chan8ec3e702012-03-21 15:38:34 +00004019 else if (l4kcqe->status ==
4020 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Jeffrey Huang0cb1f4b2012-02-08 17:33:56 +00004021 set_bit(SK_F_HW_ERR, &csk->flags);
Michael Chana4636962009-06-08 18:14:43 -07004022
4023 smp_mb__before_clear_bit();
4024 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4025 cnic_cm_upcall(cp, csk, opcode);
4026 break;
4027
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004028 case L5CM_RAMROD_CMD_ID_CLOSE: {
4029 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4030
4031 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4032 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4033 l4kcqe->status, l5kcqe->completion_status);
Eddie Wai7bc910f2012-06-27 15:08:22 +00004034 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4035 /* Fall through */
4036 } else {
4037 break;
4038 }
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004039 }
Michael Chana4636962009-06-08 18:14:43 -07004040 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07004041 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4042 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00004043 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4044 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chan8ec3e702012-03-21 15:38:34 +00004045 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Michael Chan23021c22012-01-04 12:12:28 +00004046 set_bit(SK_F_HW_ERR, &csk->flags);
4047
Michael Chana4636962009-06-08 18:14:43 -07004048 cp->close_conn(csk, opcode);
4049 break;
4050
4051 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
Michael Chan101c40c2011-06-08 19:29:33 +00004052 /* after we already sent CLOSE_REQ */
4053 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4054 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4055 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4056 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4057 else
4058 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004059 break;
4060 }
4061 csk_put(csk);
4062}
4063
4064static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4065{
4066 struct cnic_dev *dev = data;
4067 int i;
4068
4069 for (i = 0; i < num; i++)
4070 cnic_cm_process_kcqe(dev, kcqe[i]);
4071}
4072
4073static struct cnic_ulp_ops cm_ulp_ops = {
4074 .indicate_kcqes = cnic_cm_indicate_kcqe,
4075};
4076
4077static void cnic_cm_free_mem(struct cnic_dev *dev)
4078{
4079 struct cnic_local *cp = dev->cnic_priv;
4080
4081 kfree(cp->csk_tbl);
4082 cp->csk_tbl = NULL;
4083 cnic_free_id_tbl(&cp->csk_port_tbl);
4084}
4085
4086static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4087{
4088 struct cnic_local *cp = dev->cnic_priv;
Eddie Wai11f23aa2011-06-08 19:29:34 +00004089 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07004090
4091 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4092 GFP_KERNEL);
4093 if (!cp->csk_tbl)
4094 return -ENOMEM;
4095
Akinobu Mitae00adf32013-05-07 16:18:15 -07004096 port_id = prandom_u32();
Eddie Wai11f23aa2011-06-08 19:29:34 +00004097 port_id %= CNIC_LOCAL_PORT_RANGE;
Michael Chana4636962009-06-08 18:14:43 -07004098 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
Eddie Wai11f23aa2011-06-08 19:29:34 +00004099 CNIC_LOCAL_PORT_MIN, port_id)) {
Michael Chana4636962009-06-08 18:14:43 -07004100 cnic_cm_free_mem(dev);
4101 return -ENOMEM;
4102 }
4103 return 0;
4104}
4105
4106static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4107{
Michael Chan943189f2010-06-15 08:57:02 +00004108 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4109 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4110 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4111 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00004112 }
Michael Chan943189f2010-06-15 08:57:02 +00004113
4114 /* 1. If event opcode matches the expected event in csk->state
Michael Chan101c40c2011-06-08 19:29:33 +00004115 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4116 * event
Michael Chan7b34a462010-06-15 08:57:03 +00004117 * 3. If the expected event is 0, meaning the connection was never
4118 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00004119 */
Michael Chan7b34a462010-06-15 08:57:03 +00004120 if (opcode == csk->state || csk->state == 0 ||
Michael Chan101c40c2011-06-08 19:29:33 +00004121 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4122 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
Michael Chan7b34a462010-06-15 08:57:03 +00004123 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4124 if (csk->state == 0)
4125 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07004126 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00004127 }
Michael Chana4636962009-06-08 18:14:43 -07004128 }
4129 return 0;
4130}
4131
4132static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4133{
4134 struct cnic_dev *dev = csk->dev;
4135 struct cnic_local *cp = dev->cnic_priv;
4136
Michael Chana1e621b2010-06-15 08:57:01 +00004137 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4138 cnic_cm_upcall(cp, csk, opcode);
4139 return;
4140 }
4141
Michael Chana4636962009-06-08 18:14:43 -07004142 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00004143 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00004144 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00004145 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004146}
4147
4148static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4149{
4150}
4151
4152static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4153{
4154 u32 seed;
4155
Akinobu Mitae00adf32013-05-07 16:18:15 -07004156 seed = prandom_u32();
Michael Chana4636962009-06-08 18:14:43 -07004157 cnic_ctx_wr(dev, 45, 0, seed);
4158 return 0;
4159}
4160
Michael Chan71034ba2009-10-10 13:46:59 +00004161static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4162{
4163 struct cnic_dev *dev = csk->dev;
4164 struct cnic_local *cp = dev->cnic_priv;
4165 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4166 union l5cm_specific_data l5_data;
4167 u32 cmd = 0;
4168 int close_complete = 0;
4169
4170 switch (opcode) {
4171 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4172 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4173 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00004174 if (cnic_ready_to_close(csk, opcode)) {
Michael Chan23021c22012-01-04 12:12:28 +00004175 if (test_bit(SK_F_HW_ERR, &csk->flags))
4176 close_complete = 1;
4177 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
Michael Chan7b34a462010-06-15 08:57:03 +00004178 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4179 else
4180 close_complete = 1;
4181 }
Michael Chan71034ba2009-10-10 13:46:59 +00004182 break;
4183 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4184 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4185 break;
4186 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4187 close_complete = 1;
4188 break;
4189 }
4190 if (cmd) {
4191 memset(&l5_data, 0, sizeof(l5_data));
4192
4193 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4194 &l5_data);
4195 } else if (close_complete) {
4196 ctx->timestamp = jiffies;
4197 cnic_close_conn(csk);
4198 cnic_cm_upcall(cp, csk, csk->state);
4199 }
4200}
4201
4202static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4203{
Michael Chanfdf24082010-10-13 14:06:47 +00004204 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00004205
4206 if (!cp->ctx_tbl)
4207 return;
4208
4209 if (!netif_running(dev->netdev))
4210 return;
4211
Michael Chan74e49bb2011-07-20 14:55:23 +00004212 cnic_bnx2x_delete_wait(dev, 0);
Michael Chanfdf24082010-10-13 14:06:47 +00004213
4214 cancel_delayed_work(&cp->delete_task);
4215 flush_workqueue(cnic_wq);
4216
4217 if (atomic_read(&cp->iscsi_conn) != 0)
4218 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4219 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00004220}
4221
4222static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4223{
4224 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004225 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan14203982010-10-06 03:16:06 +00004226 u32 pfid = cp->pfid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004227 u32 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00004228
4229 cnic_init_bnx2x_mac(dev);
Eddie Waib3bd2d62013-07-28 19:03:58 -07004230 cnic_bnx2x_set_tcp_options(dev, 0, 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004231
4232 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004233 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004234
4235 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004236 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004237 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004238 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00004239 DEF_MAX_DA_COUNT);
4240
4241 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004242 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00004243 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004244 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00004245 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004246 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00004247 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004248 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00004249
Michael Chan14203982010-10-06 03:16:06 +00004250 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004251 DEF_MAX_CWND);
4252 return 0;
4253}
4254
Michael Chanfdf24082010-10-13 14:06:47 +00004255static void cnic_delete_task(struct work_struct *work)
4256{
4257 struct cnic_local *cp;
4258 struct cnic_dev *dev;
4259 u32 i;
4260 int need_resched = 0;
4261
4262 cp = container_of(work, struct cnic_local, delete_task.work);
4263 dev = cp->dev;
4264
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004265 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4266 struct drv_ctl_info info;
4267
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004268 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004269
4270 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4271 cp->ethdev->drv_ctl(dev->netdev, &info);
4272 }
4273
Michael Chanfdf24082010-10-13 14:06:47 +00004274 for (i = 0; i < cp->max_cid_space; i++) {
4275 struct cnic_context *ctx = &cp->ctx_tbl[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004276 int err;
Michael Chanfdf24082010-10-13 14:06:47 +00004277
4278 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4279 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4280 continue;
4281
4282 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4283 need_resched = 1;
4284 continue;
4285 }
4286
4287 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4288 continue;
4289
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004290 err = cnic_bnx2x_destroy_ramrod(dev, i);
Michael Chanfdf24082010-10-13 14:06:47 +00004291
4292 cnic_free_bnx2x_conn_resc(dev, i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004293 if (!err) {
4294 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4295 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00004296
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004297 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4298 }
Michael Chanfdf24082010-10-13 14:06:47 +00004299 }
4300
4301 if (need_resched)
4302 queue_delayed_work(cnic_wq, &cp->delete_task,
4303 msecs_to_jiffies(10));
4304
4305}
4306
Michael Chana4636962009-06-08 18:14:43 -07004307static int cnic_cm_open(struct cnic_dev *dev)
4308{
4309 struct cnic_local *cp = dev->cnic_priv;
4310 int err;
4311
4312 err = cnic_cm_alloc_mem(dev);
4313 if (err)
4314 return err;
4315
4316 err = cp->start_cm(dev);
4317
4318 if (err)
4319 goto err_out;
4320
Michael Chanfdf24082010-10-13 14:06:47 +00004321 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4322
Michael Chana4636962009-06-08 18:14:43 -07004323 dev->cm_create = cnic_cm_create;
4324 dev->cm_destroy = cnic_cm_destroy;
4325 dev->cm_connect = cnic_cm_connect;
4326 dev->cm_abort = cnic_cm_abort;
4327 dev->cm_close = cnic_cm_close;
4328 dev->cm_select_dev = cnic_cm_select_dev;
4329
4330 cp->ulp_handle[CNIC_ULP_L4] = dev;
4331 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4332 return 0;
4333
4334err_out:
4335 cnic_cm_free_mem(dev);
4336 return err;
4337}
4338
4339static int cnic_cm_shutdown(struct cnic_dev *dev)
4340{
4341 struct cnic_local *cp = dev->cnic_priv;
4342 int i;
4343
Michael Chana4636962009-06-08 18:14:43 -07004344 if (!cp->csk_tbl)
4345 return 0;
4346
4347 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4348 struct cnic_sock *csk = &cp->csk_tbl[i];
4349
4350 clear_bit(SK_F_INUSE, &csk->flags);
4351 cnic_cm_cleanup(csk);
4352 }
4353 cnic_cm_free_mem(dev);
4354
4355 return 0;
4356}
4357
4358static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4359{
Michael Chana4636962009-06-08 18:14:43 -07004360 u32 cid_addr;
4361 int i;
4362
Michael Chana4636962009-06-08 18:14:43 -07004363 cid_addr = GET_CID_ADDR(cid);
4364
4365 for (i = 0; i < CTX_SIZE; i += 4)
4366 cnic_ctx_wr(dev, cid_addr, i, 0);
4367}
4368
4369static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4370{
4371 struct cnic_local *cp = dev->cnic_priv;
4372 int ret = 0, i;
4373 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4374
Michael Chan4ce45e02012-12-06 10:33:10 +00004375 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004376 return 0;
4377
4378 for (i = 0; i < cp->ctx_blks; i++) {
4379 int j;
4380 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4381 u32 val;
4382
Michael Chan2bc40782012-12-06 10:33:09 +00004383 memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07004384
4385 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4386 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4387 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4388 (u64) cp->ctx_arr[i].mapping >> 32);
4389 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4390 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4391 for (j = 0; j < 10; j++) {
4392
4393 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4394 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4395 break;
4396 udelay(5);
4397 }
4398 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4399 ret = -EBUSY;
4400 break;
4401 }
4402 }
4403 return ret;
4404}
4405
4406static void cnic_free_irq(struct cnic_dev *dev)
4407{
4408 struct cnic_local *cp = dev->cnic_priv;
4409 struct cnic_eth_dev *ethdev = cp->ethdev;
4410
4411 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4412 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004413 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07004414 free_irq(ethdev->irq_arr[0].vector, dev);
4415 }
4416}
4417
Michael Chan6e0dc642010-10-13 14:06:44 +00004418static int cnic_request_irq(struct cnic_dev *dev)
4419{
4420 struct cnic_local *cp = dev->cnic_priv;
4421 struct cnic_eth_dev *ethdev = cp->ethdev;
4422 int err;
4423
4424 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4425 if (err)
4426 tasklet_disable(&cp->cnic_irq_task);
4427
4428 return err;
4429}
4430
Michael Chana4636962009-06-08 18:14:43 -07004431static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4432{
4433 struct cnic_local *cp = dev->cnic_priv;
4434 struct cnic_eth_dev *ethdev = cp->ethdev;
4435
4436 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4437 int err, i = 0;
4438 int sblk_num = cp->status_blk_num;
4439 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4440 BNX2_HC_SB_CONFIG_1;
4441
4442 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4443
4444 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4445 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4446 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4447
Michael Chana4dde3a2010-02-24 14:42:08 +00004448 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00004449 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07004450 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004451 err = cnic_request_irq(dev);
4452 if (err)
Michael Chana4636962009-06-08 18:14:43 -07004453 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00004454
Michael Chana4dde3a2010-02-24 14:42:08 +00004455 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07004456 i < 10) {
4457 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4458 1 << (11 + sblk_num));
4459 udelay(10);
4460 i++;
4461 barrier();
4462 }
Michael Chana4dde3a2010-02-24 14:42:08 +00004463 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07004464 cnic_free_irq(dev);
4465 goto failed;
4466 }
4467
4468 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00004469 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004470 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4471 int i = 0;
4472
4473 while (sblk->status_completion_producer_index && i < 10) {
4474 CNIC_WR(dev, BNX2_HC_COMMAND,
4475 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4476 udelay(10);
4477 i++;
4478 barrier();
4479 }
4480 if (sblk->status_completion_producer_index)
4481 goto failed;
4482
4483 }
4484 return 0;
4485
4486failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00004487 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07004488 return -EBUSY;
4489}
4490
4491static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4492{
4493 struct cnic_local *cp = dev->cnic_priv;
4494 struct cnic_eth_dev *ethdev = cp->ethdev;
4495
4496 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4497 return;
4498
4499 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4500 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4501}
4502
4503static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4504{
4505 struct cnic_local *cp = dev->cnic_priv;
4506 struct cnic_eth_dev *ethdev = cp->ethdev;
4507
4508 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4509 return;
4510
4511 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4512 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4513 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4514 synchronize_irq(ethdev->irq_arr[0].vector);
4515}
4516
4517static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4518{
4519 struct cnic_local *cp = dev->cnic_priv;
4520 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004521 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004522 u32 cid_addr, tx_cid, sb_id;
4523 u32 val, offset0, offset1, offset2, offset3;
4524 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004525 struct bnx2_tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00004526 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00004527 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004528
4529 sb_id = cp->status_blk_num;
4530 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07004531 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4532 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004533 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004534
4535 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07004536 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4537 (TX_TSS_CID << 7));
4538 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4539 }
4540 cp->tx_cons = *cp->tx_cons_ptr;
4541
4542 cid_addr = GET_CID_ADDR(tx_cid);
Michael Chan4ce45e02012-12-06 10:33:10 +00004543 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -07004544 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4545
4546 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4547 cnic_ctx_wr(dev, cid_addr2, i, 0);
4548
4549 offset0 = BNX2_L2CTX_TYPE_XI;
4550 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4551 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4552 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4553 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07004554 cnic_init_context(dev, tx_cid);
4555 cnic_init_context(dev, tx_cid + 1);
4556
Michael Chana4636962009-06-08 18:14:43 -07004557 offset0 = BNX2_L2CTX_TYPE;
4558 offset1 = BNX2_L2CTX_CMD_TYPE;
4559 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4560 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4561 }
4562 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4563 cnic_ctx_wr(dev, cid_addr, offset0, val);
4564
4565 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4566 cnic_ctx_wr(dev, cid_addr, offset1, val);
4567
Joe Perches43d620c2011-06-16 19:08:06 +00004568 txbd = udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07004569
Michael Chancd801532010-10-13 14:06:49 +00004570 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004571 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004572 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4573 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4574 }
Michael Chancd801532010-10-13 14:06:49 +00004575 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004576 cnic_ctx_wr(dev, cid_addr, offset2, val);
4577 txbd->tx_bd_haddr_hi = val;
4578
Michael Chancd801532010-10-13 14:06:49 +00004579 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004580 cnic_ctx_wr(dev, cid_addr, offset3, val);
4581 txbd->tx_bd_haddr_lo = val;
4582}
4583
4584static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4585{
4586 struct cnic_local *cp = dev->cnic_priv;
4587 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004588 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004589 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4590 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004591 struct bnx2_rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00004592 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00004593 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07004594
4595 sb_id = cp->status_blk_num;
4596 cnic_init_context(dev, 2);
4597 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4598 coal_reg = BNX2_HC_COMMAND;
4599 coal_val = CNIC_RD(dev, coal_reg);
4600 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004601 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004602
4603 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4604 coal_reg = BNX2_HC_COALESCE_NOW;
4605 coal_val = 1 << (11 + sb_id);
4606 }
4607 i = 0;
4608 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4609 CNIC_WR(dev, coal_reg, coal_val);
4610 udelay(10);
4611 i++;
4612 barrier();
4613 }
4614 cp->rx_cons = *cp->rx_cons_ptr;
4615
4616 cid_addr = GET_CID_ADDR(2);
4617 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4618 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4619 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4620
4621 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07004622 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07004623 else
Michael Chand0549382009-10-28 03:41:59 -07004624 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004625 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4626
Michael Chan2bc40782012-12-06 10:33:09 +00004627 rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
4628 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004629 dma_addr_t buf_map;
4630 int n = (i % cp->l2_rx_ring_size) + 1;
4631
Michael Chancd801532010-10-13 14:06:49 +00004632 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07004633 rxbd->rx_bd_len = cp->l2_single_buf_size;
4634 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4635 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4636 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4637 }
Michael Chan2bc40782012-12-06 10:33:09 +00004638 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004639 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4640 rxbd->rx_bd_haddr_hi = val;
4641
Michael Chan2bc40782012-12-06 10:33:09 +00004642 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004643 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4644 rxbd->rx_bd_haddr_lo = val;
4645
4646 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4647 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4648}
4649
4650static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4651{
4652 struct kwqe *wqes[1], l2kwqe;
4653
4654 memset(&l2kwqe, 0, sizeof(l2kwqe));
4655 wqes[0] = &l2kwqe;
Michael Chane1928c82010-12-23 07:43:04 +00004656 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
Michael Chana4636962009-06-08 18:14:43 -07004657 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4658 KWQE_OPCODE_SHIFT) | 2;
4659 dev->submit_kwqes(dev, wqes, 1);
4660}
4661
4662static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4663{
4664 struct cnic_local *cp = dev->cnic_priv;
4665 u32 val;
4666
4667 val = cp->func << 2;
4668
4669 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4670
4671 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4672 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4673 dev->mac_addr[0] = (u8) (val >> 8);
4674 dev->mac_addr[1] = (u8) val;
4675
4676 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4677
4678 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4679 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4680 dev->mac_addr[2] = (u8) (val >> 24);
4681 dev->mac_addr[3] = (u8) (val >> 16);
4682 dev->mac_addr[4] = (u8) (val >> 8);
4683 dev->mac_addr[5] = (u8) val;
4684
4685 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4686
4687 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
Michael Chan4ce45e02012-12-06 10:33:10 +00004688 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004689 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4690
4691 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4692 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4693 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4694}
4695
4696static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4697{
4698 struct cnic_local *cp = dev->cnic_priv;
4699 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00004700 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00004701 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07004702 int err;
4703
4704 cnic_set_bnx2_mac(dev);
4705
4706 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4707 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
Michael Chan2bc40782012-12-06 10:33:09 +00004708 if (BNX2_PAGE_BITS > 12)
Michael Chana4636962009-06-08 18:14:43 -07004709 val |= (12 - 8) << 4;
4710 else
Michael Chan2bc40782012-12-06 10:33:09 +00004711 val |= (BNX2_PAGE_BITS - 8) << 4;
Michael Chana4636962009-06-08 18:14:43 -07004712
4713 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4714
4715 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4716 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4717 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4718
4719 err = cnic_setup_5709_context(dev, 1);
4720 if (err)
4721 return err;
4722
4723 cnic_init_context(dev, KWQ_CID);
4724 cnic_init_context(dev, KCQ_CID);
4725
Michael Chane6c28892010-06-24 14:58:39 +00004726 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07004727 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4728
4729 cp->max_kwq_idx = MAX_KWQ_IDX;
4730 cp->kwq_prod_idx = 0;
4731 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00004732 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07004733
Michael Chan4ce45e02012-12-06 10:33:10 +00004734 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
Michael Chana4636962009-06-08 18:14:43 -07004735 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4736 else
4737 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4738
4739 /* Initialize the kernel work queue context. */
4740 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chan2bc40782012-12-06 10:33:09 +00004741 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004742 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004743
Michael Chan2bc40782012-12-06 10:33:09 +00004744 val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004746
Michael Chan2bc40782012-12-06 10:33:09 +00004747 val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004748 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004749
4750 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00004751 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004752
4753 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00004754 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004755
Michael Chane6c28892010-06-24 14:58:39 +00004756 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4757 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07004758
Michael Chane6c28892010-06-24 14:58:39 +00004759 cp->kcq1.sw_prod_idx = 0;
4760 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004761 &sblk->status_completion_producer_index;
Michael Chane6c28892010-06-24 14:58:39 +00004762
Joe Perches64699332012-06-04 12:44:16 +00004763 cp->kcq1.status_idx_ptr = &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004764
4765 /* Initialize the kernel complete queue context. */
4766 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chan2bc40782012-12-06 10:33:09 +00004767 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004769
Michael Chan2bc40782012-12-06 10:33:09 +00004770 val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004772
Michael Chan2bc40782012-12-06 10:33:09 +00004773 val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004774 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004775
Michael Chane6c28892010-06-24 14:58:39 +00004776 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4777 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004778
Michael Chane6c28892010-06-24 14:58:39 +00004779 val = (u32) cp->kcq1.dma.pgtbl_map;
4780 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004781
4782 cp->int_num = 0;
4783 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004784 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004785 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004786 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004787
Michael Chane6c28892010-06-24 14:58:39 +00004788 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004789 &msblk->status_completion_producer_index;
4790 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4791 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004792 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004793 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4794 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004795 }
4796
4797 /* Enable Commnad Scheduler notification when we write to the
4798 * host producer index of the kernel contexts. */
4799 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4800
4801 /* Enable Command Scheduler notification when we write to either
4802 * the Send Queue or Receive Queue producer indexes of the kernel
4803 * bypass contexts. */
4804 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4805 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4806
4807 /* Notify COM when the driver post an application buffer. */
4808 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4809
4810 /* Set the CP and COM doorbells. These two processors polls the
4811 * doorbell for a non zero value before running. This must be done
4812 * after setting up the kernel queue contexts. */
4813 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4814 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4815
4816 cnic_init_bnx2_tx_ring(dev);
4817 cnic_init_bnx2_rx_ring(dev);
4818
4819 err = cnic_init_bnx2_irq(dev);
4820 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004821 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004822 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4823 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4824 return err;
4825 }
4826
Michael Chanad9b4352013-01-23 03:21:52 +00004827 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4828
Michael Chana4636962009-06-08 18:14:43 -07004829 return 0;
4830}
4831
Michael Chan71034ba2009-10-10 13:46:59 +00004832static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4833{
4834 struct cnic_local *cp = dev->cnic_priv;
4835 struct cnic_eth_dev *ethdev = cp->ethdev;
4836 u32 start_offset = ethdev->ctx_tbl_offset;
4837 int i;
4838
4839 for (i = 0; i < cp->ctx_blks; i++) {
4840 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4841 dma_addr_t map = ctx->mapping;
4842
4843 if (cp->ctx_align) {
4844 unsigned long mask = cp->ctx_align - 1;
4845
4846 map = (map + mask) & ~mask;
4847 }
4848
4849 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4850 }
4851}
4852
4853static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4854{
4855 struct cnic_local *cp = dev->cnic_priv;
4856 struct cnic_eth_dev *ethdev = cp->ethdev;
4857 int err = 0;
4858
Joe Perches164165d2009-11-19 09:30:10 +00004859 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004860 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004861 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4862 err = cnic_request_irq(dev);
4863
Michael Chan71034ba2009-10-10 13:46:59 +00004864 return err;
4865}
4866
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004867static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4868 u16 sb_id, u8 sb_index,
4869 u8 disable)
4870{
Michael Chan68c64d22012-12-06 10:33:11 +00004871 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004872
4873 u32 addr = BAR_CSTRORM_INTMEM +
4874 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4875 offsetof(struct hc_status_block_data_e1x, index_data) +
4876 sizeof(struct hc_index_data)*sb_index +
4877 offsetof(struct hc_index_data, flags);
4878 u16 flags = CNIC_RD16(dev, addr);
4879 /* clear and set */
4880 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4881 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4882 HC_INDEX_DATA_HC_ENABLED);
4883 CNIC_WR16(dev, addr, flags);
4884}
4885
Michael Chan71034ba2009-10-10 13:46:59 +00004886static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4887{
4888 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004889 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00004890 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004891
4892 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004893 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4894 offsetof(struct hc_status_block_data_e1x, index_data) +
4895 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004896 offsetof(struct hc_index_data, timeout), 64 / 4);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004897 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004898}
4899
4900static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4901{
4902}
4903
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004904static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4905 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004906{
4907 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004908 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004909 struct cnic_uio_dev *udev = cp->udev;
4910 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4911 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004912 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004913 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004914 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004915 u32 val;
4916
Michael Chan2bc40782012-12-06 10:33:09 +00004917 memset(txbd, 0, BNX2_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004918
Michael Chancd801532010-10-13 14:06:49 +00004919 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004920 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
Michael Chan71034ba2009-10-10 13:46:59 +00004921 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004922 struct eth_tx_parse_bd_e1x *pbd_e1x =
4923 &((txbd + 1)->parse_bd_e1x);
4924 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
Michael Chan71034ba2009-10-10 13:46:59 +00004925 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4926
4927 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4928 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4929 reg_bd->addr_hi = start_bd->addr_hi;
4930 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4931 start_bd->nbytes = cpu_to_le16(0x10);
4932 start_bd->nbd = cpu_to_le16(3);
4933 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004934 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
Michael Chan71034ba2009-10-10 13:46:59 +00004935 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4936
Michael Chan104a43e2013-09-02 11:42:28 -07004937 if (BNX2X_CHIP_IS_E2_PLUS(bp))
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004938 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
Michael Chan4ce45e02012-12-06 10:33:10 +00004939 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004940 else
Michael Chan4ce45e02012-12-06 10:33:10 +00004941 pbd_e1x->global_data = (UNICAST_ADDRESS <<
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004942 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00004943 }
Michael Chan71034ba2009-10-10 13:46:59 +00004944
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004945 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004946 txbd->next_bd.addr_hi = cpu_to_le32(val);
4947
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004948 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004949
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004950 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004951 txbd->next_bd.addr_lo = cpu_to_le32(val);
4952
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004953 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004954
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004955 /* Other ramrod params */
4956 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4957 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004958
4959 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004960 if (cli < MAX_STAT_COUNTER_ID) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004961 data->general.statistics_zero_flg = 1;
4962 data->general.statistics_en_flg = 1;
4963 data->general.statistics_counter_id = cli;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004964 }
Michael Chan71034ba2009-10-10 13:46:59 +00004965
4966 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004967 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004968}
4969
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004970static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4971 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004972{
4973 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004974 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004975 struct cnic_uio_dev *udev = cp->udev;
4976 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chan2bc40782012-12-06 10:33:09 +00004977 BNX2_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004978 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chan2bc40782012-12-06 10:33:09 +00004979 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004980 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004981 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004982 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan104a43e2013-09-02 11:42:28 -07004983 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004984 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004985 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004986
4987 /* General data */
4988 data->general.client_id = cli;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004989 data->general.activate_flg = 1;
4990 data->general.sp_client_id = cli;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004991 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4992 data->general.func_id = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004993
4994 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4995 dma_addr_t buf_map;
4996 int n = (i % cp->l2_rx_ring_size) + 1;
4997
Michael Chancd801532010-10-13 14:06:49 +00004998 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004999 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5000 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5001 }
Michael Chan71034ba2009-10-10 13:46:59 +00005002
Michael Chan2bc40782012-12-06 10:33:09 +00005003 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005004 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005005 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005006
Michael Chan2bc40782012-12-06 10:33:09 +00005007 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005008 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005009 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005010
5011 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Michael Chan2bc40782012-12-06 10:33:09 +00005012 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005013 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005014 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005015
Michael Chan2bc40782012-12-06 10:33:09 +00005016 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005017 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005018 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005019
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005020 /* Other ramrod params */
5021 data->rx.client_qzone_id = cl_qzone_id;
5022 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5023 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00005024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005025 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00005026
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005027 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005028 data->rx.outer_vlan_removal_enable_flg = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005029 data->rx.silent_vlan_removal_flg = 1;
5030 data->rx.silent_vlan_value = 0;
5031 data->rx.silent_vlan_mask = 0xffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005032
5033 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005034 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00005035 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005036}
5037
Michael Chane21ba412010-12-23 07:43:03 +00005038static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5039{
5040 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005041 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane21ba412010-12-23 07:43:03 +00005042 u32 pfid = cp->pfid;
5043
5044 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5045 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5046 cp->kcq1.sw_prod_idx = 0;
5047
Michael Chan104a43e2013-09-02 11:42:28 -07005048 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005049 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5050
5051 cp->kcq1.hw_prod_idx_ptr =
5052 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5053 cp->kcq1.status_idx_ptr =
5054 &sb->sb.running_index[SM_RX_ID];
5055 } else {
5056 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5057
5058 cp->kcq1.hw_prod_idx_ptr =
5059 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5060 cp->kcq1.status_idx_ptr =
5061 &sb->sb.running_index[SM_RX_ID];
5062 }
5063
Michael Chan104a43e2013-09-02 11:42:28 -07005064 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005065 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5066
5067 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5068 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5069 cp->kcq2.sw_prod_idx = 0;
5070 cp->kcq2.hw_prod_idx_ptr =
5071 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5072 cp->kcq2.status_idx_ptr =
5073 &sb->sb.running_index[SM_RX_ID];
5074 }
5075}
5076
Michael Chan71034ba2009-10-10 13:46:59 +00005077static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5078{
5079 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005080 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005081 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan68c64d22012-12-06 10:33:11 +00005082 int func, ret;
Michael Chan14203982010-10-06 03:16:06 +00005083 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00005084
Michael Chana9e0a4f2012-01-04 12:12:27 +00005085 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
Michael Chan68c64d22012-12-06 10:33:11 +00005086 cp->port_mode = bp->common.chip_port_mode;
5087 cp->pfid = bp->pfid;
5088 cp->func = bp->pf_num;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005089
Michael Chan68c64d22012-12-06 10:33:11 +00005090 func = CNIC_FUNC(cp);
Michael Chan14203982010-10-06 03:16:06 +00005091 pfid = cp->pfid;
5092
Michael Chan71034ba2009-10-10 13:46:59 +00005093 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005094 cp->iscsi_start_cid, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005095
5096 if (ret)
5097 return -ENOMEM;
5098
Michael Chan104a43e2013-09-02 11:42:28 -07005099 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00005100 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005101 cp->fcoe_start_cid, 0);
Michael Chane1928c82010-12-23 07:43:04 +00005102
5103 if (ret)
5104 return -ENOMEM;
5105 }
5106
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005107 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5108
Michael Chane21ba412010-12-23 07:43:03 +00005109 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005110
Michael Chan71034ba2009-10-10 13:46:59 +00005111 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00005112 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00005113 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005114 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005115 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005116 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005117 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005118 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005119 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005120 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005121 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005122 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005123 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005124 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005125 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005126 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005127 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005128 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00005129 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005130 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00005131 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005132 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005133 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00005134
Michael Chan71034ba2009-10-10 13:46:59 +00005135 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005136 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00005137 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5138 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005139 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00005140 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5141
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005142 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5143 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5144
Michael Chan71034ba2009-10-10 13:46:59 +00005145 cnic_setup_bnx2x_context(dev);
5146
Michael Chan71034ba2009-10-10 13:46:59 +00005147 ret = cnic_init_bnx2x_irq(dev);
5148 if (ret)
5149 return ret;
5150
Michael Chanad9b4352013-01-23 03:21:52 +00005151 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chan71034ba2009-10-10 13:46:59 +00005152 return 0;
5153}
5154
Michael Chan86b53602009-10-10 13:46:57 +00005155static void cnic_init_rings(struct cnic_dev *dev)
5156{
Michael Chan541a7812010-10-06 03:17:22 +00005157 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005158 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00005159 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00005160
5161 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5162 return;
5163
Michael Chan86b53602009-10-10 13:46:57 +00005164 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5165 cnic_init_bnx2_tx_ring(dev);
5166 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00005167 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00005168 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005169 u32 cli = cp->ethdev->iscsi_l2_client_id;
5170 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan68d7c1a2011-01-05 15:14:13 +00005171 u32 cl_qzone_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005172 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00005173 union l5cm_specific_data l5_data;
5174 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chane1dd8832011-07-13 17:24:19 +00005175 u32 off, i, *cid_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005176
5177 rx_prods.bd_prod = 0;
5178 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5179 barrier();
5180
Michael Chan104a43e2013-09-02 11:42:28 -07005181 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005182
Michael Chanc7596b72009-12-02 15:15:35 +00005183 off = BAR_USTRORM_INTMEM +
Michael Chan104a43e2013-09-02 11:42:28 -07005184 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
Michael Chanee87a822010-10-13 14:06:51 +00005185 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5186 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00005187
5188 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00005189 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00005190
Michael Chan48f753d2010-05-18 11:32:53 +00005191 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5192
Michael Chancd801532010-10-13 14:06:49 +00005193 data = udev->l2_buf;
Michael Chane1dd8832011-07-13 17:24:19 +00005194 cid_ptr = udev->l2_buf + 12;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005195
5196 memset(data, 0, sizeof(*data));
5197
5198 cnic_init_bnx2x_tx_ring(dev, data);
5199 cnic_init_bnx2x_rx_ring(dev, data);
5200
Michael Chancd801532010-10-13 14:06:49 +00005201 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5202 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005203
Michael Chan541a7812010-10-06 03:17:22 +00005204 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5205
Michael Chan71034ba2009-10-10 13:46:59 +00005206 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005207 cid, ETH_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005208
Michael Chan48f753d2010-05-18 11:32:53 +00005209 i = 0;
5210 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5211 ++i < 10)
5212 msleep(1);
5213
5214 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5215 netdev_err(dev->netdev,
5216 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005217 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00005218 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chane1dd8832011-07-13 17:24:19 +00005219 *cid_ptr = cid;
Michael Chan86b53602009-10-10 13:46:57 +00005220 }
5221}
5222
5223static void cnic_shutdown_rings(struct cnic_dev *dev)
5224{
Michael Chan541a7812010-10-06 03:17:22 +00005225 struct cnic_local *cp = dev->cnic_priv;
Michael Chane1dd8832011-07-13 17:24:19 +00005226 struct cnic_uio_dev *udev = cp->udev;
5227 void *rx_ring;
Michael Chan541a7812010-10-06 03:17:22 +00005228
5229 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5230 return;
5231
Michael Chan86b53602009-10-10 13:46:57 +00005232 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5233 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005234 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005235 u32 cli = cp->ethdev->iscsi_l2_client_id;
5236 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00005237 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00005238 int i;
Michael Chan71034ba2009-10-10 13:46:59 +00005239
Michael Chan5159fdc2010-12-23 07:42:59 +00005240 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00005241
Michael Chan48f753d2010-05-18 11:32:53 +00005242 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5243
Michael Chan8b065b62009-12-02 15:15:36 +00005244 l5_data.phy_address.lo = cli;
5245 l5_data.phy_address.hi = 0;
5246 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00005247 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00005248 i = 0;
5249 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5250 ++i < 10)
5251 msleep(1);
5252
5253 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5254 netdev_err(dev->netdev,
5255 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005256 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00005257
5258 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005259 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005260 cid, NONE_CONNECTION_TYPE, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00005261 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00005262 }
Michael Chan541a7812010-10-06 03:17:22 +00005263 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan2bc40782012-12-06 10:33:09 +00005264 rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
5265 memset(rx_ring, 0, BNX2_PAGE_SIZE);
Michael Chan86b53602009-10-10 13:46:57 +00005266}
5267
Michael Chana3059b12009-08-14 15:49:44 +00005268static int cnic_register_netdev(struct cnic_dev *dev)
5269{
5270 struct cnic_local *cp = dev->cnic_priv;
5271 struct cnic_eth_dev *ethdev = cp->ethdev;
5272 int err;
5273
5274 if (!ethdev)
5275 return -ENODEV;
5276
5277 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5278 return 0;
5279
5280 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5281 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00005282 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00005283
Michael Chan9e9402e2013-08-02 11:28:23 -07005284 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5285 * can change after firmware is downloaded.
5286 */
5287 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5288 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5289 dev->max_iscsi_conn = 0;
5290
Michael Chana3059b12009-08-14 15:49:44 +00005291 return err;
5292}
5293
5294static void cnic_unregister_netdev(struct cnic_dev *dev)
5295{
5296 struct cnic_local *cp = dev->cnic_priv;
5297 struct cnic_eth_dev *ethdev = cp->ethdev;
5298
5299 if (!ethdev)
5300 return;
5301
5302 ethdev->drv_unregister_cnic(dev->netdev);
5303}
5304
Michael Chana4636962009-06-08 18:14:43 -07005305static int cnic_start_hw(struct cnic_dev *dev)
5306{
5307 struct cnic_local *cp = dev->cnic_priv;
5308 struct cnic_eth_dev *ethdev = cp->ethdev;
5309 int err;
5310
5311 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5312 return -EALREADY;
5313
Michael Chana4636962009-06-08 18:14:43 -07005314 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07005315 pci_dev_get(dev->pcidev);
5316 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00005317 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07005318 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5319
5320 err = cp->alloc_resc(dev);
5321 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005322 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005323 goto err1;
5324 }
5325
5326 err = cp->start_hw(dev);
5327 if (err)
5328 goto err1;
5329
5330 err = cnic_cm_open(dev);
5331 if (err)
5332 goto err1;
5333
5334 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5335
5336 cp->enable_int(dev);
5337
5338 return 0;
5339
5340err1:
Michael Chana4636962009-06-08 18:14:43 -07005341 cp->free_resc(dev);
5342 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07005343 return err;
5344}
5345
5346static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5347{
Michael Chana4636962009-06-08 18:14:43 -07005348 cnic_disable_bnx2_int_sync(dev);
5349
5350 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5351 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5352
5353 cnic_init_context(dev, KWQ_CID);
5354 cnic_init_context(dev, KCQ_CID);
5355
5356 cnic_setup_5709_context(dev, 0);
5357 cnic_free_irq(dev);
5358
Michael Chana4636962009-06-08 18:14:43 -07005359 cnic_free_resc(dev);
5360}
5361
Michael Chan71034ba2009-10-10 13:46:59 +00005362
5363static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5364{
5365 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005366 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancaa9e932012-12-05 10:10:14 +00005367 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5368 u32 sb_id = cp->status_blk_num;
5369 u32 idx_off, syn_off;
Michael Chan71034ba2009-10-10 13:46:59 +00005370
5371 cnic_free_irq(dev);
Michael Chancaa9e932012-12-05 10:10:14 +00005372
Michael Chan104a43e2013-09-02 11:42:28 -07005373 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chancaa9e932012-12-05 10:10:14 +00005374 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5375 (hc_index * sizeof(u16));
5376
5377 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5378 } else {
5379 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5380 (hc_index * sizeof(u16));
5381
5382 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5383 }
5384 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5385 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5386 idx_off, 0);
5387
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005388 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00005389 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005390 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00005391 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005392 cnic_free_resc(dev);
5393}
5394
Michael Chana4636962009-06-08 18:14:43 -07005395static void cnic_stop_hw(struct cnic_dev *dev)
5396{
5397 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5398 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00005399 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07005400
Michael Chan48f753d2010-05-18 11:32:53 +00005401 /* Need to wait for the ring shutdown event to complete
5402 * before clearing the CNIC_UP flag.
5403 */
Michael Chan82346a72012-09-08 06:01:05 +00005404 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00005405 msleep(100);
5406 i++;
5407 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005408 cnic_shutdown_rings(dev);
Michael Chana2028b232012-06-27 15:08:19 +00005409 cp->stop_cm(dev);
Michael Chanad9b4352013-01-23 03:21:52 +00005410 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chana4636962009-06-08 18:14:43 -07005411 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
Eric Dumazet2cfa5a02011-11-23 07:09:32 +00005412 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
Michael Chana4636962009-06-08 18:14:43 -07005413 synchronize_rcu();
5414 cnic_cm_shutdown(dev);
5415 cp->stop_hw(dev);
5416 pci_dev_put(dev->pcidev);
5417 }
5418}
5419
5420static void cnic_free_dev(struct cnic_dev *dev)
5421{
5422 int i = 0;
5423
5424 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5425 msleep(100);
5426 i++;
5427 }
5428 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00005429 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07005430
Joe Perchesddf79b22010-02-17 15:01:54 +00005431 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005432 dev_put(dev->netdev);
5433 kfree(dev);
5434}
5435
5436static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5437 struct pci_dev *pdev)
5438{
5439 struct cnic_dev *cdev;
5440 struct cnic_local *cp;
5441 int alloc_size;
5442
5443 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5444
Joe Perchesb2adaca2013-02-03 17:43:58 +00005445 cdev = kzalloc(alloc_size, GFP_KERNEL);
5446 if (cdev == NULL)
Michael Chana4636962009-06-08 18:14:43 -07005447 return NULL;
Michael Chana4636962009-06-08 18:14:43 -07005448
5449 cdev->netdev = dev;
5450 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5451 cdev->register_device = cnic_register_device;
5452 cdev->unregister_device = cnic_unregister_device;
5453 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5454
5455 cp = cdev->cnic_priv;
5456 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07005457 cp->l2_single_buf_size = 0x400;
5458 cp->l2_rx_ring_size = 3;
5459
5460 spin_lock_init(&cp->cnic_ulp_lock);
5461
Joe Perchesddf79b22010-02-17 15:01:54 +00005462 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005463
5464 return cdev;
5465}
5466
5467static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5468{
5469 struct pci_dev *pdev;
5470 struct cnic_dev *cdev;
5471 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005472 struct bnx2 *bp = netdev_priv(dev);
Michael Chana4636962009-06-08 18:14:43 -07005473 struct cnic_eth_dev *ethdev = NULL;
Michael Chana4636962009-06-08 18:14:43 -07005474
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005475 if (bp->cnic_probe)
5476 ethdev = (bp->cnic_probe)(dev);
5477
Michael Chana4636962009-06-08 18:14:43 -07005478 if (!ethdev)
5479 return NULL;
5480
5481 pdev = ethdev->pdev;
5482 if (!pdev)
5483 return NULL;
5484
5485 dev_hold(dev);
5486 pci_dev_get(pdev);
Sergei Shtylyovff938e42011-02-28 11:57:33 -08005487 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5488 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5489 (pdev->revision < 0x10)) {
5490 pci_dev_put(pdev);
5491 goto cnic_err;
Michael Chana4636962009-06-08 18:14:43 -07005492 }
5493 pci_dev_put(pdev);
5494
5495 cdev = cnic_alloc_dev(dev, pdev);
5496 if (cdev == NULL)
5497 goto cnic_err;
5498
5499 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5500 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5501
5502 cp = cdev->cnic_priv;
5503 cp->ethdev = ethdev;
5504 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005505 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07005506
Michael Chan7625eb22011-06-08 19:29:36 +00005507 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5508
Michael Chana4636962009-06-08 18:14:43 -07005509 cp->cnic_ops = &cnic_bnx2_ops;
5510 cp->start_hw = cnic_start_bnx2_hw;
5511 cp->stop_hw = cnic_stop_bnx2_hw;
5512 cp->setup_pgtbl = cnic_setup_page_tbl;
5513 cp->alloc_resc = cnic_alloc_bnx2_resc;
5514 cp->free_resc = cnic_free_resc;
5515 cp->start_cm = cnic_cm_init_bnx2_hw;
5516 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5517 cp->enable_int = cnic_enable_bnx2_int;
5518 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5519 cp->close_conn = cnic_close_bnx2_conn;
Michael Chana4636962009-06-08 18:14:43 -07005520 return cdev;
5521
5522cnic_err:
5523 dev_put(dev);
5524 return NULL;
5525}
5526
Michael Chan71034ba2009-10-10 13:46:59 +00005527static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5528{
5529 struct pci_dev *pdev;
5530 struct cnic_dev *cdev;
5531 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005532 struct bnx2x *bp = netdev_priv(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005533 struct cnic_eth_dev *ethdev = NULL;
Michael Chan71034ba2009-10-10 13:46:59 +00005534
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005535 if (bp->cnic_probe)
5536 ethdev = bp->cnic_probe(dev);
5537
Michael Chan71034ba2009-10-10 13:46:59 +00005538 if (!ethdev)
5539 return NULL;
5540
5541 pdev = ethdev->pdev;
5542 if (!pdev)
5543 return NULL;
5544
5545 dev_hold(dev);
5546 cdev = cnic_alloc_dev(dev, pdev);
5547 if (cdev == NULL) {
5548 dev_put(dev);
5549 return NULL;
5550 }
5551
5552 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5553 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5554
5555 cp = cdev->cnic_priv;
5556 cp->ethdev = ethdev;
5557 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005558 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00005559
Barak Witkowski1d187b32011-12-05 22:41:50 +00005560 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5561
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005562 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5563 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
Michael Chan104a43e2013-09-02 11:42:28 -07005564 if (CNIC_SUPPORTS_FCOE(bp)) {
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005565 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
Bhanu Prakash Gollapudi0eb43b42013-04-22 19:22:30 +00005566 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5567 }
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005568
Michael Chandc219a22011-08-26 09:45:39 +00005569 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5570 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5571
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005572 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5573
Michael Chan71034ba2009-10-10 13:46:59 +00005574 cp->cnic_ops = &cnic_bnx2x_ops;
5575 cp->start_hw = cnic_start_bnx2x_hw;
5576 cp->stop_hw = cnic_stop_bnx2x_hw;
5577 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5578 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5579 cp->free_resc = cnic_free_resc;
5580 cp->start_cm = cnic_cm_init_bnx2x_hw;
5581 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5582 cp->enable_int = cnic_enable_bnx2x_int;
5583 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chan104a43e2013-09-02 11:42:28 -07005584 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chanee87a822010-10-13 14:06:51 +00005585 cp->ack_int = cnic_ack_bnx2x_e2_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005586 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5587 } else {
Michael Chanee87a822010-10-13 14:06:51 +00005588 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005589 cp->arm_int = cnic_arm_bnx2x_msix;
5590 }
Michael Chan71034ba2009-10-10 13:46:59 +00005591 cp->close_conn = cnic_close_bnx2x_conn;
Michael Chan71034ba2009-10-10 13:46:59 +00005592 return cdev;
5593}
5594
Michael Chana4636962009-06-08 18:14:43 -07005595static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5596{
5597 struct ethtool_drvinfo drvinfo;
5598 struct cnic_dev *cdev = NULL;
5599
5600 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5601 memset(&drvinfo, 0, sizeof(drvinfo));
5602 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5603
5604 if (!strcmp(drvinfo.driver, "bnx2"))
5605 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005606 if (!strcmp(drvinfo.driver, "bnx2x"))
5607 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07005608 if (cdev) {
5609 write_lock(&cnic_dev_lock);
5610 list_add(&cdev->list, &cnic_dev_list);
5611 write_unlock(&cnic_dev_lock);
5612 }
5613 }
5614 return cdev;
5615}
5616
Michael Chan415199f2011-07-20 14:55:24 +00005617static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5618 u16 vlan_id)
5619{
5620 int if_type;
5621
5622 rcu_read_lock();
5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5624 struct cnic_ulp_ops *ulp_ops;
5625 void *ctx;
5626
5627 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5628 if (!ulp_ops || !ulp_ops->indicate_netevent)
5629 continue;
5630
5631 ctx = cp->ulp_handle[if_type];
5632
5633 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5634 }
5635 rcu_read_unlock();
5636}
5637
Ben Hutchings1aa8b472012-07-10 10:56:59 +00005638/* netdev event handler */
Michael Chana4636962009-06-08 18:14:43 -07005639static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5640 void *ptr)
5641{
Jiri Pirko351638e2013-05-28 01:30:21 +00005642 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
Michael Chana4636962009-06-08 18:14:43 -07005643 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -07005644 int new_dev = 0;
5645
5646 dev = cnic_from_netdev(netdev);
5647
Michael Chan415fb872013-07-28 19:03:55 -07005648 if (!dev && event == NETDEV_REGISTER) {
Michael Chana4636962009-06-08 18:14:43 -07005649 /* Check for the hot-plug device */
5650 dev = is_cnic_dev(netdev);
5651 if (dev) {
5652 new_dev = 1;
5653 cnic_hold(dev);
5654 }
5655 }
5656 if (dev) {
5657 struct cnic_local *cp = dev->cnic_priv;
5658
5659 if (new_dev)
5660 cnic_ulp_init(dev);
5661 else if (event == NETDEV_UNREGISTER)
5662 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07005663
Michael Chan415fb872013-07-28 19:03:55 -07005664 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00005665 if (cnic_register_netdev(dev) != 0) {
5666 cnic_put(dev);
5667 goto done;
5668 }
Michael Chana4636962009-06-08 18:14:43 -07005669 if (!cnic_start_hw(dev))
5670 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07005671 }
5672
Michael Chan415199f2011-07-20 14:55:24 +00005673 cnic_rcv_netevent(cp, event, 0);
Michael Chana4636962009-06-08 18:14:43 -07005674
5675 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005676 cnic_ulp_stop(dev);
5677 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005678 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005679 } else if (event == NETDEV_UNREGISTER) {
5680 write_lock(&cnic_dev_lock);
5681 list_del_init(&dev->list);
5682 write_unlock(&cnic_dev_lock);
5683
5684 cnic_put(dev);
5685 cnic_free_dev(dev);
5686 goto done;
5687 }
5688 cnic_put(dev);
Michael Chan415199f2011-07-20 14:55:24 +00005689 } else {
5690 struct net_device *realdev;
5691 u16 vid;
5692
5693 vid = cnic_get_vlan(netdev, &realdev);
5694 if (realdev) {
5695 dev = cnic_from_netdev(realdev);
5696 if (dev) {
5697 vid |= VLAN_TAG_PRESENT;
5698 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5699 cnic_put(dev);
5700 }
5701 }
Michael Chana4636962009-06-08 18:14:43 -07005702 }
5703done:
5704 return NOTIFY_DONE;
5705}
5706
5707static struct notifier_block cnic_netdev_notifier = {
5708 .notifier_call = cnic_netdev_event
5709};
5710
5711static void cnic_release(void)
5712{
Michael Chana3ceeeb2010-10-13 14:06:50 +00005713 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005714
Michael Chana3ceeeb2010-10-13 14:06:50 +00005715 while (!list_empty(&cnic_udev_list)) {
5716 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5717 list);
5718 cnic_free_uio(udev);
5719 }
Michael Chana4636962009-06-08 18:14:43 -07005720}
5721
5722static int __init cnic_init(void)
5723{
5724 int rc = 0;
5725
Joe Perchesddf79b22010-02-17 15:01:54 +00005726 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005727
5728 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5729 if (rc) {
5730 cnic_release();
5731 return rc;
5732 }
5733
Michael Chanfdf24082010-10-13 14:06:47 +00005734 cnic_wq = create_singlethread_workqueue("cnic_wq");
5735 if (!cnic_wq) {
5736 cnic_release();
5737 unregister_netdevice_notifier(&cnic_netdev_notifier);
5738 return -ENOMEM;
5739 }
5740
Michael Chana4636962009-06-08 18:14:43 -07005741 return 0;
5742}
5743
5744static void __exit cnic_exit(void)
5745{
5746 unregister_netdevice_notifier(&cnic_netdev_notifier);
5747 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005748 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005749}
5750
5751module_init(cnic_init);
5752module_exit(cnic_exit);