blob: a4b25bc7113a6fb1882ba6b0062dbc046eb15896 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chanc3661282014-03-17 19:19:08 -08003 * Copyright (c) 2006-2014 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040030#include <linux/prefetch.h>
Michael Chan973e5742011-07-13 17:24:17 +000031#include <linux/random.h>
Michael Chana4636962009-06-08 18:14:43 -070032#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33#define BCM_VLAN 1
34#endif
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/route.h>
38#include <net/ipv6.h>
39#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070040#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070041#include <scsi/iscsi_if.h>
42
Michael Chan4bd9b0ff2012-12-06 10:33:12 +000043#define BCM_CNIC 1
Michael Chana4636962009-06-08 18:14:43 -070044#include "cnic_if.h"
45#include "bnx2.h"
Michael Chan68c64d22012-12-06 10:33:11 +000046#include "bnx2x/bnx2x.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000047#include "bnx2x/bnx2x_reg.h"
48#include "bnx2x/bnx2x_fw_defs.h"
49#include "bnx2x/bnx2x_hsi.h"
Jeff Kirsheradfc5212011-04-07 06:03:04 -070050#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
51#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chan8ec3e702012-03-21 15:38:34 +000052#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
Michael Chana4636962009-06-08 18:14:43 -070053#include "cnic.h"
54#include "cnic_defs.h"
55
Michael Chan68c64d22012-12-06 10:33:11 +000056#define CNIC_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070057
Bill Pemberton047fc562012-12-03 09:24:23 -050058static char version[] =
Michael Chan68c64d22012-12-06 10:33:11 +000059 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
Michael Chana4636962009-06-08 18:14:43 -070060
61MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
62 "Chen (zongxi@broadcom.com");
63MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
64MODULE_LICENSE("GPL");
65MODULE_VERSION(CNIC_MODULE_VERSION);
66
Michael Chan8adc92402010-12-23 07:42:57 +000067/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070068static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000069static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070070static DEFINE_RWLOCK(cnic_dev_lock);
71static DEFINE_MUTEX(cnic_lock);
72
Eric Dumazet13707f92011-01-26 19:28:23 +000073static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
74
75/* helper function, assuming cnic_lock is held */
76static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
77{
78 return rcu_dereference_protected(cnic_ulp_tbl[type],
79 lockdep_is_held(&cnic_lock));
80}
Michael Chana4636962009-06-08 18:14:43 -070081
82static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000083static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070084static int cnic_ctl(void *, struct cnic_ctl_info *);
85
86static struct cnic_ops cnic_bnx2_ops = {
87 .cnic_owner = THIS_MODULE,
88 .cnic_handler = cnic_service_bnx2,
89 .cnic_ctl = cnic_ctl,
90};
91
Michael Chan71034ba2009-10-10 13:46:59 +000092static struct cnic_ops cnic_bnx2x_ops = {
93 .cnic_owner = THIS_MODULE,
94 .cnic_handler = cnic_service_bnx2x,
95 .cnic_ctl = cnic_ctl,
96};
97
Michael Chanfdf24082010-10-13 14:06:47 +000098static struct workqueue_struct *cnic_wq;
99
Michael Chan86b53602009-10-10 13:46:57 +0000100static void cnic_shutdown_rings(struct cnic_dev *);
101static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -0700102static int cnic_cm_set_pg(struct cnic_sock *);
103
104static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
105{
Michael Chancd801532010-10-13 14:06:49 +0000106 struct cnic_uio_dev *udev = uinfo->priv;
107 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -0700108
109 if (!capable(CAP_NET_ADMIN))
110 return -EPERM;
111
Michael Chancd801532010-10-13 14:06:49 +0000112 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700113 return -EBUSY;
114
Michael Chan86b53602009-10-10 13:46:57 +0000115 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000116 dev = udev->dev;
117
Michael Chana3ceeeb2010-10-13 14:06:50 +0000118 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000119 rtnl_unlock();
120 return -ENODEV;
121 }
122
Michael Chancd801532010-10-13 14:06:49 +0000123 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700124
Michael Chana3ceeeb2010-10-13 14:06:50 +0000125 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000126 cnic_init_rings(dev);
127 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700128
129 return 0;
130}
131
132static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
133{
Michael Chancd801532010-10-13 14:06:49 +0000134 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000135
Michael Chancd801532010-10-13 14:06:49 +0000136 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700137 return 0;
138}
139
140static inline void cnic_hold(struct cnic_dev *dev)
141{
142 atomic_inc(&dev->ref_count);
143}
144
145static inline void cnic_put(struct cnic_dev *dev)
146{
147 atomic_dec(&dev->ref_count);
148}
149
150static inline void csk_hold(struct cnic_sock *csk)
151{
152 atomic_inc(&csk->ref_count);
153}
154
155static inline void csk_put(struct cnic_sock *csk)
156{
157 atomic_dec(&csk->ref_count);
158}
159
160static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
161{
162 struct cnic_dev *cdev;
163
164 read_lock(&cnic_dev_lock);
165 list_for_each_entry(cdev, &cnic_dev_list, list) {
166 if (netdev == cdev->netdev) {
167 cnic_hold(cdev);
168 read_unlock(&cnic_dev_lock);
169 return cdev;
170 }
171 }
172 read_unlock(&cnic_dev_lock);
173 return NULL;
174}
175
Michael Chan7fc1ece2009-08-14 15:49:47 +0000176static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
177{
178 atomic_inc(&ulp_ops->ref_count);
179}
180
181static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
182{
183 atomic_dec(&ulp_ops->ref_count);
184}
185
Michael Chana4636962009-06-08 18:14:43 -0700186static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191 struct drv_ctl_io *io = &info.data.io;
192
193 info.cmd = DRV_CTL_CTX_WR_CMD;
194 io->cid_addr = cid_addr;
195 io->offset = off;
196 io->data = val;
197 ethdev->drv_ctl(dev->netdev, &info);
198}
199
Michael Chan71034ba2009-10-10 13:46:59 +0000200static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
201{
202 struct cnic_local *cp = dev->cnic_priv;
203 struct cnic_eth_dev *ethdev = cp->ethdev;
204 struct drv_ctl_info info;
205 struct drv_ctl_io *io = &info.data.io;
206
207 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
208 io->offset = off;
209 io->dma_addr = addr;
210 ethdev->drv_ctl(dev->netdev, &info);
211}
212
213static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
214{
215 struct cnic_local *cp = dev->cnic_priv;
216 struct cnic_eth_dev *ethdev = cp->ethdev;
217 struct drv_ctl_info info;
218 struct drv_ctl_l2_ring *ring = &info.data.ring;
219
220 if (start)
221 info.cmd = DRV_CTL_START_L2_CMD;
222 else
223 info.cmd = DRV_CTL_STOP_L2_CMD;
224
225 ring->cid = cid;
226 ring->client_id = cl_id;
227 ethdev->drv_ctl(dev->netdev, &info);
228}
229
Michael Chana4636962009-06-08 18:14:43 -0700230static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
231{
232 struct cnic_local *cp = dev->cnic_priv;
233 struct cnic_eth_dev *ethdev = cp->ethdev;
234 struct drv_ctl_info info;
235 struct drv_ctl_io *io = &info.data.io;
236
237 info.cmd = DRV_CTL_IO_WR_CMD;
238 io->offset = off;
239 io->data = val;
240 ethdev->drv_ctl(dev->netdev, &info);
241}
242
243static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
244{
245 struct cnic_local *cp = dev->cnic_priv;
246 struct cnic_eth_dev *ethdev = cp->ethdev;
247 struct drv_ctl_info info;
248 struct drv_ctl_io *io = &info.data.io;
249
250 info.cmd = DRV_CTL_IO_RD_CMD;
251 io->offset = off;
252 ethdev->drv_ctl(dev->netdev, &info);
253 return io->data;
254}
255
Barak Witkowski1d187b32011-12-05 22:41:50 +0000256static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
257{
258 struct cnic_local *cp = dev->cnic_priv;
259 struct cnic_eth_dev *ethdev = cp->ethdev;
260 struct drv_ctl_info info;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000261 struct fcoe_capabilities *fcoe_cap =
262 &info.data.register_data.fcoe_features;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000263
Barak Witkowski2e499d32012-06-26 01:31:19 +0000264 if (reg) {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000265 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000266 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
267 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
268 } else {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000269 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000270 }
Barak Witkowski1d187b32011-12-05 22:41:50 +0000271
272 info.data.ulp_type = ulp_type;
273 ethdev->drv_ctl(dev->netdev, &info);
274}
275
Michael Chana4636962009-06-08 18:14:43 -0700276static int cnic_in_use(struct cnic_sock *csk)
277{
278 return test_bit(SK_F_INUSE, &csk->flags);
279}
280
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000281static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700282{
283 struct cnic_local *cp = dev->cnic_priv;
284 struct cnic_eth_dev *ethdev = cp->ethdev;
285 struct drv_ctl_info info;
286
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000287 info.cmd = cmd;
288 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700289 ethdev->drv_ctl(dev->netdev, &info);
290}
291
Michael Chan71034ba2009-10-10 13:46:59 +0000292static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
293{
294 u32 i;
295
Michael Chana2028b232012-06-27 15:08:19 +0000296 if (!cp->ctx_tbl)
297 return -EINVAL;
298
Michael Chan520efdf2010-06-24 14:58:37 +0000299 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000300 if (cp->ctx_tbl[i].cid == cid) {
301 *l5_cid = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
Michael Chana4636962009-06-08 18:14:43 -0700308static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
309 struct cnic_sock *csk)
310{
311 struct iscsi_path path_req;
312 char *buf = NULL;
313 u16 len = 0;
314 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
315 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000316 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000317 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700318
Michael Chancd801532010-10-13 14:06:49 +0000319 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700320 return -ENODEV;
321
322 if (csk) {
323 len = sizeof(path_req);
324 buf = (char *) &path_req;
325 memset(&path_req, 0, len);
326
327 msg_type = ISCSI_KEVENT_PATH_REQ;
328 path_req.handle = (u64) csk->l5_cid;
329 if (test_bit(SK_F_IPV6, &csk->flags)) {
330 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
331 sizeof(struct in6_addr));
332 path_req.ip_addr_len = 16;
333 } else {
334 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
335 sizeof(struct in_addr));
336 path_req.ip_addr_len = 4;
337 }
338 path_req.vlan_id = csk->vlan_id;
339 path_req.pmtu = csk->mtu;
340 }
341
Michael Chan939b82e2010-12-23 07:42:58 +0000342 while (retry < 3) {
343 rc = 0;
344 rcu_read_lock();
Michael Chanf7bd12d2014-03-17 19:19:06 -0800345 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
Michael Chan939b82e2010-12-23 07:42:58 +0000346 if (ulp_ops)
347 rc = ulp_ops->iscsi_nl_send_msg(
348 cp->ulp_handle[CNIC_ULP_ISCSI],
349 msg_type, buf, len);
350 rcu_read_unlock();
351 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
352 break;
353
354 msleep(100);
355 retry++;
356 }
Michael Chan558e4c72011-07-13 17:24:20 +0000357 return rc;
Michael Chana4636962009-06-08 18:14:43 -0700358}
359
Eddie Wai42ecbb82010-12-23 07:43:02 +0000360static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
361
Michael Chana4636962009-06-08 18:14:43 -0700362static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
363 char *buf, u16 len)
364{
365 int rc = -EINVAL;
366
367 switch (msg_type) {
368 case ISCSI_UEVENT_PATH_UPDATE: {
369 struct cnic_local *cp;
370 u32 l5_cid;
371 struct cnic_sock *csk;
372 struct iscsi_path *path_resp;
373
374 if (len < sizeof(*path_resp))
375 break;
376
377 path_resp = (struct iscsi_path *) buf;
378 cp = dev->cnic_priv;
379 l5_cid = (u32) path_resp->handle;
380 if (l5_cid >= MAX_CM_SK_TBL_SZ)
381 break;
382
Michael Chand02a5e62010-02-24 14:42:06 +0000383 rcu_read_lock();
384 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
385 rc = -ENODEV;
386 rcu_read_unlock();
387 break;
388 }
Michael Chana4636962009-06-08 18:14:43 -0700389 csk = &cp->csk_tbl[l5_cid];
390 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000391 if (cnic_in_use(csk) &&
392 test_bit(SK_F_CONNECT_START, &csk->flags)) {
393
Eddie Wai4cbbb042012-02-08 17:33:57 +0000394 csk->vlan_id = path_resp->vlan_id;
395
Joe Perchesd458cdf2013-10-01 19:04:40 -0700396 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
Michael Chana4636962009-06-08 18:14:43 -0700397 if (test_bit(SK_F_IPV6, &csk->flags))
398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
399 sizeof(struct in6_addr));
400 else
401 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
402 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000403
404 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700405 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000406 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
407 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
408
409 cnic_cm_upcall(cp, csk,
410 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
411 clear_bit(SK_F_CONNECT_START, &csk->flags);
412 }
Michael Chana4636962009-06-08 18:14:43 -0700413 }
414 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000415 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700416 rc = 0;
417 }
418 }
419
420 return rc;
421}
422
423static int cnic_offld_prep(struct cnic_sock *csk)
424{
425 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 return 0;
427
428 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
429 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
430 return 0;
431 }
432
433 return 1;
434}
435
436static int cnic_close_prep(struct cnic_sock *csk)
437{
438 clear_bit(SK_F_CONNECT_START, &csk->flags);
439 smp_mb__after_clear_bit();
440
441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
443 msleep(1);
444
445 return 1;
446 }
447 return 0;
448}
449
450static int cnic_abort_prep(struct cnic_sock *csk)
451{
452 clear_bit(SK_F_CONNECT_START, &csk->flags);
453 smp_mb__after_clear_bit();
454
455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 msleep(1);
457
458 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
459 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
460 return 1;
461 }
462
463 return 0;
464}
465
466int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
467{
468 struct cnic_dev *dev;
469
roel kluin0d37f362009-11-02 06:53:44 +0000470 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000471 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700472 return -EINVAL;
473 }
474 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000475 if (cnic_ulp_tbl_prot(ulp_type)) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000476 pr_err("%s: Type %d has already been registered\n",
477 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700478 mutex_unlock(&cnic_lock);
479 return -EBUSY;
480 }
481
482 read_lock(&cnic_dev_lock);
483 list_for_each_entry(dev, &cnic_dev_list, list) {
484 struct cnic_local *cp = dev->cnic_priv;
485
486 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
487 }
488 read_unlock(&cnic_dev_lock);
489
Michael Chan7fc1ece2009-08-14 15:49:47 +0000490 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
492 mutex_unlock(&cnic_lock);
493
494 /* Prevent race conditions with netdev_event */
495 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700496 list_for_each_entry(dev, &cnic_dev_list, list) {
497 struct cnic_local *cp = dev->cnic_priv;
498
499 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
500 ulp_ops->cnic_init(dev);
501 }
Michael Chana4636962009-06-08 18:14:43 -0700502 rtnl_unlock();
503
504 return 0;
505}
506
507int cnic_unregister_driver(int ulp_type)
508{
509 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000510 struct cnic_ulp_ops *ulp_ops;
511 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700512
roel kluin0d37f362009-11-02 06:53:44 +0000513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000514 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700515 return -EINVAL;
516 }
517 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000518 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000519 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000520 pr_err("%s: Type %d has not been registered\n",
521 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700522 goto out_unlock;
523 }
524 read_lock(&cnic_dev_lock);
525 list_for_each_entry(dev, &cnic_dev_list, list) {
526 struct cnic_local *cp = dev->cnic_priv;
527
528 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000529 pr_err("%s: Type %d still has devices registered\n",
530 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700531 read_unlock(&cnic_dev_lock);
532 goto out_unlock;
533 }
534 }
535 read_unlock(&cnic_dev_lock);
536
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000537 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700538
539 mutex_unlock(&cnic_lock);
540 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000541 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
542 msleep(100);
543 i++;
544 }
545
546 if (atomic_read(&ulp_ops->ref_count) != 0)
Julia Lawall022f0972012-07-08 01:37:43 +0000547 pr_warn("%s: Failed waiting for ref count to go to zero\n",
548 __func__);
Michael Chana4636962009-06-08 18:14:43 -0700549 return 0;
550
551out_unlock:
552 mutex_unlock(&cnic_lock);
553 return -EINVAL;
554}
555
556static int cnic_start_hw(struct cnic_dev *);
557static void cnic_stop_hw(struct cnic_dev *);
558
559static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
560 void *ulp_ctx)
561{
562 struct cnic_local *cp = dev->cnic_priv;
563 struct cnic_ulp_ops *ulp_ops;
564
roel kluin0d37f362009-11-02 06:53:44 +0000565 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000566 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700567 return -EINVAL;
568 }
569 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000570 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000571 pr_err("%s: Driver with type %d has not been registered\n",
572 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700573 mutex_unlock(&cnic_lock);
574 return -EAGAIN;
575 }
576 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000577 pr_err("%s: Type %d has already been registered to this device\n",
578 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700579 mutex_unlock(&cnic_lock);
580 return -EBUSY;
581 }
582
583 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
584 cp->ulp_handle[ulp_type] = ulp_ctx;
Eric Dumazet13707f92011-01-26 19:28:23 +0000585 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700586 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
587 cnic_hold(dev);
588
589 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
590 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
591 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
592
593 mutex_unlock(&cnic_lock);
594
Barak Witkowski1d187b32011-12-05 22:41:50 +0000595 cnic_ulp_ctl(dev, ulp_type, true);
596
Michael Chana4636962009-06-08 18:14:43 -0700597 return 0;
598
599}
600EXPORT_SYMBOL(cnic_register_driver);
601
602static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
603{
604 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000605 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700606
roel kluin0d37f362009-11-02 06:53:44 +0000607 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700609 return -EINVAL;
610 }
Michael Chan59436912014-06-02 23:08:48 -0700611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
Michael Chana4636962009-06-08 18:14:43 -0700615 mutex_lock(&cnic_lock);
616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700618 cnic_put(dev);
619 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000620 pr_err("%s: device not registered to this ulp type %d\n",
621 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700622 mutex_unlock(&cnic_lock);
623 return -EINVAL;
624 }
625 mutex_unlock(&cnic_lock);
626
Michael Chan59436912014-06-02 23:08:48 -0700627 if (ulp_type == CNIC_ULP_FCOE)
Barak Witkowski2e499d32012-06-26 01:31:19 +0000628 dev->fcoe_cap = NULL;
Michael Chan42bb8d52011-01-03 15:21:46 +0000629
Michael Chana4636962009-06-08 18:14:43 -0700630 synchronize_rcu();
631
Michael Chan681dbd72009-08-14 15:49:46 +0000632 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
633 i < 20) {
634 msleep(100);
635 i++;
636 }
637 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000638 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000639
Barak Witkowski1d187b32011-12-05 22:41:50 +0000640 cnic_ulp_ctl(dev, ulp_type, false);
641
Michael Chana4636962009-06-08 18:14:43 -0700642 return 0;
643}
644EXPORT_SYMBOL(cnic_unregister_driver);
645
Eddie Wai11f23aa2011-06-08 19:29:34 +0000646static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
647 u32 next)
Michael Chana4636962009-06-08 18:14:43 -0700648{
649 id_tbl->start = start_id;
650 id_tbl->max = size;
Eddie Wai11f23aa2011-06-08 19:29:34 +0000651 id_tbl->next = next;
Michael Chana4636962009-06-08 18:14:43 -0700652 spin_lock_init(&id_tbl->lock);
653 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
654 if (!id_tbl->table)
655 return -ENOMEM;
656
657 return 0;
658}
659
660static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
661{
662 kfree(id_tbl->table);
663 id_tbl->table = NULL;
664}
665
666static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
667{
668 int ret = -1;
669
670 id -= id_tbl->start;
671 if (id >= id_tbl->max)
672 return ret;
673
674 spin_lock(&id_tbl->lock);
675 if (!test_bit(id, id_tbl->table)) {
676 set_bit(id, id_tbl->table);
677 ret = 0;
678 }
679 spin_unlock(&id_tbl->lock);
680 return ret;
681}
682
683/* Returns -1 if not successful */
684static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
685{
686 u32 id;
687
688 spin_lock(&id_tbl->lock);
689 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
690 if (id >= id_tbl->max) {
691 id = -1;
692 if (id_tbl->next != 0) {
693 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
694 if (id >= id_tbl->next)
695 id = -1;
696 }
697 }
698
699 if (id < id_tbl->max) {
700 set_bit(id, id_tbl->table);
701 id_tbl->next = (id + 1) & (id_tbl->max - 1);
702 id += id_tbl->start;
703 }
704
705 spin_unlock(&id_tbl->lock);
706
707 return id;
708}
709
710static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
711{
712 if (id == -1)
713 return;
714
715 id -= id_tbl->start;
716 if (id >= id_tbl->max)
717 return;
718
719 clear_bit(id, id_tbl->table);
720}
721
722static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
723{
724 int i;
725
726 if (!dma->pg_arr)
727 return;
728
729 for (i = 0; i < dma->num_pages; i++) {
730 if (dma->pg_arr[i]) {
Michael Chanbe1fefc2014-03-17 19:19:07 -0800731 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000732 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700733 dma->pg_arr[i] = NULL;
734 }
735 }
736 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000737 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
738 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700739 dma->pgtbl = NULL;
740 }
741 kfree(dma->pg_arr);
742 dma->pg_arr = NULL;
743 dma->num_pages = 0;
744}
745
746static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
747{
748 int i;
Michael Chan51388262011-01-25 22:14:50 +0000749 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chana4636962009-06-08 18:14:43 -0700750
751 for (i = 0; i < dma->num_pages; i++) {
752 /* Each entry needs to be in big endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000753 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chana4636962009-06-08 18:14:43 -0700754 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000755 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chana4636962009-06-08 18:14:43 -0700756 page_table++;
757 }
758}
759
Michael Chan71034ba2009-10-10 13:46:59 +0000760static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
761{
762 int i;
Michael Chan51388262011-01-25 22:14:50 +0000763 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chan71034ba2009-10-10 13:46:59 +0000764
765 for (i = 0; i < dma->num_pages; i++) {
766 /* Each entry needs to be in little endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000767 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +0000768 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000769 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +0000770 page_table++;
771 }
772}
773
Michael Chana4636962009-06-08 18:14:43 -0700774static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
775 int pages, int use_pg_tbl)
776{
777 int i, size;
778 struct cnic_local *cp = dev->cnic_priv;
779
780 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
781 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
782 if (dma->pg_arr == NULL)
783 return -ENOMEM;
784
785 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
786 dma->num_pages = pages;
787
788 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000789 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
Michael Chanbe1fefc2014-03-17 19:19:07 -0800790 CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000791 &dma->pg_map_arr[i],
792 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700793 if (dma->pg_arr[i] == NULL)
794 goto error;
795 }
796 if (!use_pg_tbl)
797 return 0;
798
Michael Chanbe1fefc2014-03-17 19:19:07 -0800799 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
800 ~(CNIC_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000801 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
802 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700803 if (dma->pgtbl == NULL)
804 goto error;
805
806 cp->setup_pgtbl(dev, dma);
807
808 return 0;
809
810error:
811 cnic_free_dma(dev, dma);
812 return -ENOMEM;
813}
814
Michael Chan86b53602009-10-10 13:46:57 +0000815static void cnic_free_context(struct cnic_dev *dev)
816{
817 struct cnic_local *cp = dev->cnic_priv;
818 int i;
819
820 for (i = 0; i < cp->ctx_blks; i++) {
821 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000822 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
823 cp->ctx_arr[i].ctx,
824 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000825 cp->ctx_arr[i].ctx = NULL;
826 }
827 }
828}
829
Michael Chan74dd0c42012-09-08 06:01:01 +0000830static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700831{
Michael Chancd801532010-10-13 14:06:49 +0000832 if (udev->l2_buf) {
833 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
834 udev->l2_buf, udev->l2_buf_map);
835 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700836 }
837
Michael Chancd801532010-10-13 14:06:49 +0000838 if (udev->l2_ring) {
839 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
840 udev->l2_ring, udev->l2_ring_map);
841 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700842 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000843
Michael Chan74dd0c42012-09-08 06:01:01 +0000844}
845
846static void __cnic_free_uio(struct cnic_uio_dev *udev)
847{
848 uio_unregister_device(&udev->cnic_uinfo);
849
850 __cnic_free_uio_rings(udev);
851
Michael Chana3ceeeb2010-10-13 14:06:50 +0000852 pci_dev_put(udev->pdev);
853 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000854}
855
Michael Chancd801532010-10-13 14:06:49 +0000856static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000857{
Michael Chancd801532010-10-13 14:06:49 +0000858 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000859 return;
860
Michael Chana3ceeeb2010-10-13 14:06:50 +0000861 write_lock(&cnic_dev_lock);
862 list_del_init(&udev->list);
863 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000864 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000865}
866
867static void cnic_free_resc(struct cnic_dev *dev)
868{
869 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000870 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000871
Michael Chancd801532010-10-13 14:06:49 +0000872 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000873 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000874 cp->udev = NULL;
Michael Chanf81b0ac2012-09-08 06:01:02 +0000875 if (udev->uio_dev == -1)
876 __cnic_free_uio_rings(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000877 }
Michael Chana4636962009-06-08 18:14:43 -0700878
Michael Chan86b53602009-10-10 13:46:57 +0000879 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700880 kfree(cp->ctx_arr);
881 cp->ctx_arr = NULL;
882 cp->ctx_blks = 0;
883
884 cnic_free_dma(dev, &cp->gbl_buf_info);
Michael Chana4636962009-06-08 18:14:43 -0700885 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000886 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000887 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000888 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700889 kfree(cp->iscsi_tbl);
890 cp->iscsi_tbl = NULL;
891 kfree(cp->ctx_tbl);
892 cp->ctx_tbl = NULL;
893
Michael Chane1928c82010-12-23 07:43:04 +0000894 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
Michael Chana4636962009-06-08 18:14:43 -0700895 cnic_free_id_tbl(&cp->cid_tbl);
896}
897
898static int cnic_alloc_context(struct cnic_dev *dev)
899{
900 struct cnic_local *cp = dev->cnic_priv;
901
Michael Chan4ce45e02012-12-06 10:33:10 +0000902 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -0700903 int i, k, arr_size;
904
Michael Chanbe1fefc2014-03-17 19:19:07 -0800905 cp->ctx_blk_size = CNIC_PAGE_SIZE;
906 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
Michael Chana4636962009-06-08 18:14:43 -0700907 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
908 sizeof(struct cnic_ctx);
909 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
910 if (cp->ctx_arr == NULL)
911 return -ENOMEM;
912
913 k = 0;
914 for (i = 0; i < 2; i++) {
915 u32 j, reg, off, lo, hi;
916
917 if (i == 0)
918 off = BNX2_PG_CTX_MAP;
919 else
920 off = BNX2_ISCSI_CTX_MAP;
921
922 reg = cnic_reg_rd_ind(dev, off);
923 lo = reg >> 16;
924 hi = reg & 0xffff;
925 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
926 cp->ctx_arr[k].cid = j;
927 }
928
929 cp->ctx_blks = k;
930 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
931 cp->ctx_blks = 0;
932 return -ENOMEM;
933 }
934
935 for (i = 0; i < cp->ctx_blks; i++) {
936 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000937 dma_alloc_coherent(&dev->pcidev->dev,
Michael Chanbe1fefc2014-03-17 19:19:07 -0800938 CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000939 &cp->ctx_arr[i].mapping,
940 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700941 if (cp->ctx_arr[i].ctx == NULL)
942 return -ENOMEM;
943 }
944 }
945 return 0;
946}
947
Michael Chan59e51372011-06-14 01:32:38 +0000948static u16 cnic_bnx2_next_idx(u16 idx)
Michael Chane6c28892010-06-24 14:58:39 +0000949{
Michael Chan59e51372011-06-14 01:32:38 +0000950 return idx + 1;
951}
952
953static u16 cnic_bnx2_hw_idx(u16 idx)
954{
955 return idx;
956}
957
958static u16 cnic_bnx2x_next_idx(u16 idx)
959{
960 idx++;
961 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
962 idx++;
963
964 return idx;
965}
966
967static u16 cnic_bnx2x_hw_idx(u16 idx)
968{
969 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
970 idx++;
971 return idx;
972}
973
974static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
975 bool use_pg_tbl)
976{
977 int err, i, use_page_tbl = 0;
Michael Chane6c28892010-06-24 14:58:39 +0000978 struct kcqe **kcq;
979
Michael Chan59e51372011-06-14 01:32:38 +0000980 if (use_pg_tbl)
981 use_page_tbl = 1;
Michael Chane6c28892010-06-24 14:58:39 +0000982
Michael Chan59e51372011-06-14 01:32:38 +0000983 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
Michael Chane6c28892010-06-24 14:58:39 +0000984 if (err)
985 return err;
986
987 kcq = (struct kcqe **) info->dma.pg_arr;
988 info->kcq = kcq;
989
Michael Chan59e51372011-06-14 01:32:38 +0000990 info->next_idx = cnic_bnx2_next_idx;
991 info->hw_idx = cnic_bnx2_hw_idx;
992 if (use_pg_tbl)
Michael Chane6c28892010-06-24 14:58:39 +0000993 return 0;
994
Michael Chan59e51372011-06-14 01:32:38 +0000995 info->next_idx = cnic_bnx2x_next_idx;
996 info->hw_idx = cnic_bnx2x_hw_idx;
997
Michael Chane6c28892010-06-24 14:58:39 +0000998 for (i = 0; i < KCQ_PAGE_CNT; i++) {
999 struct bnx2x_bd_chain_next *next =
1000 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1001 int j = i + 1;
1002
1003 if (j >= KCQ_PAGE_CNT)
1004 j = 0;
1005 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1006 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1007 }
1008 return 0;
1009}
1010
Michael Chan74dd0c42012-09-08 06:01:01 +00001011static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1012{
1013 struct cnic_local *cp = udev->dev->cnic_priv;
1014
1015 if (udev->l2_ring)
1016 return 0;
1017
Michael Chanbe1fefc2014-03-17 19:19:07 -08001018 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
Michael Chan74dd0c42012-09-08 06:01:01 +00001019 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1020 &udev->l2_ring_map,
1021 GFP_KERNEL | __GFP_COMP);
1022 if (!udev->l2_ring)
1023 return -ENOMEM;
1024
1025 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001026 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
Michael Chan74dd0c42012-09-08 06:01:01 +00001027 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1028 &udev->l2_buf_map,
1029 GFP_KERNEL | __GFP_COMP);
1030 if (!udev->l2_buf) {
1031 __cnic_free_uio_rings(udev);
1032 return -ENOMEM;
1033 }
1034
1035 return 0;
1036
1037}
1038
Michael Chancd801532010-10-13 14:06:49 +00001039static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +00001040{
1041 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001042 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +00001043
Michael Chana3ceeeb2010-10-13 14:06:50 +00001044 list_for_each_entry(udev, &cnic_udev_list, list) {
1045 if (udev->pdev == dev->pcidev) {
1046 udev->dev = dev;
Michael Chanf81b0ac2012-09-08 06:01:02 +00001047 if (__cnic_alloc_uio_rings(udev, pages)) {
1048 udev->dev = NULL;
Michael Chanf81b0ac2012-09-08 06:01:02 +00001049 return -ENOMEM;
1050 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00001051 cp->udev = udev;
Michael Chana3ceeeb2010-10-13 14:06:50 +00001052 return 0;
1053 }
1054 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00001055
Michael Chancd801532010-10-13 14:06:49 +00001056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1057 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +00001058 return -ENOMEM;
1059
Michael Chancd801532010-10-13 14:06:49 +00001060 udev->uio_dev = -1;
1061
1062 udev->dev = dev;
1063 udev->pdev = dev->pcidev;
Michael Chanec0248e2009-08-26 09:49:22 +00001064
Michael Chan74dd0c42012-09-08 06:01:01 +00001065 if (__cnic_alloc_uio_rings(udev, pages))
1066 goto err_udev;
Michael Chancd801532010-10-13 14:06:49 +00001067
Michael Chana3ceeeb2010-10-13 14:06:50 +00001068 list_add(&udev->list, &cnic_udev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +00001069
1070 pci_dev_get(udev->pdev);
1071
Michael Chancd801532010-10-13 14:06:49 +00001072 cp->udev = udev;
1073
Michael Chanec0248e2009-08-26 09:49:22 +00001074 return 0;
Michael Chan74dd0c42012-09-08 06:01:01 +00001075
Jesper Juhlf7e4c972010-12-31 11:18:48 -08001076 err_udev:
1077 kfree(udev);
1078 return -ENOMEM;
Michael Chanec0248e2009-08-26 09:49:22 +00001079}
1080
Michael Chancd801532010-10-13 14:06:49 +00001081static int cnic_init_uio(struct cnic_dev *dev)
1082{
Michael Chan5e9b2db2009-08-26 09:49:23 +00001083 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001084 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001085 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +00001086 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001087
Michael Chancd801532010-10-13 14:06:49 +00001088 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +00001089 return -ENOMEM;
1090
Michael Chancd801532010-10-13 14:06:49 +00001091 uinfo = &udev->cnic_uinfo;
1092
Michael Chanae0eef62012-06-29 09:32:45 +00001093 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1094 uinfo->mem[0].internal_addr = dev->regview;
1095 uinfo->mem[0].memtype = UIO_MEM_PHYS;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001096
Michael Chan5e9b2db2009-08-26 09:49:23 +00001097 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001098 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1099 TX_MAX_TSS_RINGS + 1);
Michael Chana4dde3a2010-02-24 14:42:08 +00001100 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chanbe1fefc2014-03-17 19:19:07 -08001101 CNIC_PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001102 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1103 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1104 else
1105 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1106
1107 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001108 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001109 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1110
Michael Chan71034ba2009-10-10 13:46:59 +00001111 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
Michael Chanbe1fefc2014-03-17 19:19:07 -08001112 CNIC_PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001113 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001114
1115 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001116 }
1117
1118 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1119
Michael Chancd801532010-10-13 14:06:49 +00001120 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1121 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001122 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1123
Michael Chancd801532010-10-13 14:06:49 +00001124 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1125 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001126 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1127
1128 uinfo->version = CNIC_MODULE_VERSION;
1129 uinfo->irq = UIO_IRQ_CUSTOM;
1130
1131 uinfo->open = cnic_uio_open;
1132 uinfo->release = cnic_uio_close;
1133
Michael Chana3ceeeb2010-10-13 14:06:50 +00001134 if (udev->uio_dev == -1) {
1135 if (!uinfo->priv) {
1136 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001137
Michael Chana3ceeeb2010-10-13 14:06:50 +00001138 ret = uio_register_device(&udev->pdev->dev, uinfo);
1139 }
1140 } else {
1141 cnic_init_rings(dev);
1142 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001143
Michael Chancd801532010-10-13 14:06:49 +00001144 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001145}
1146
Michael Chana4636962009-06-08 18:14:43 -07001147static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1148{
1149 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001150 int ret;
1151
1152 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1153 if (ret)
1154 goto error;
1155 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1156
Michael Chan59e51372011-06-14 01:32:38 +00001157 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
Michael Chana4636962009-06-08 18:14:43 -07001158 if (ret)
1159 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001160
1161 ret = cnic_alloc_context(dev);
1162 if (ret)
1163 goto error;
1164
Michael Chancd801532010-10-13 14:06:49 +00001165 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001166 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001167 goto error;
1168
Michael Chancd801532010-10-13 14:06:49 +00001169 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001170 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001171 goto error;
1172
Michael Chana4636962009-06-08 18:14:43 -07001173 return 0;
1174
1175error:
1176 cnic_free_resc(dev);
1177 return ret;
1178}
1179
Michael Chan71034ba2009-10-10 13:46:59 +00001180static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1181{
1182 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001183 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001184 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001185 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001186
Michael Chan520efdf2010-06-24 14:58:37 +00001187 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001188 blks = total_mem / ctx_blk_size;
1189 if (total_mem % ctx_blk_size)
1190 blks++;
1191
1192 if (blks > cp->ethdev->ctx_tbl_len)
1193 return -ENOMEM;
1194
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001195 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001196 if (cp->ctx_arr == NULL)
1197 return -ENOMEM;
1198
1199 cp->ctx_blks = blks;
1200 cp->ctx_blk_size = ctx_blk_size;
Michael Chan104a43e2013-09-02 11:42:28 -07001201 if (!CHIP_IS_E1(bp))
Michael Chan71034ba2009-10-10 13:46:59 +00001202 cp->ctx_align = 0;
1203 else
1204 cp->ctx_align = ctx_blk_size;
1205
1206 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1207
1208 for (i = 0; i < blks; i++) {
1209 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001210 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1211 &cp->ctx_arr[i].mapping,
1212 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001213 if (cp->ctx_arr[i].ctx == NULL)
1214 return -ENOMEM;
1215
1216 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1217 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1218 cnic_free_context(dev);
1219 cp->ctx_blk_size += cp->ctx_align;
1220 i = -1;
1221 continue;
1222 }
1223 }
1224 }
1225 return 0;
1226}
1227
1228static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1229{
1230 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001231 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan520efdf2010-06-24 14:58:37 +00001232 struct cnic_eth_dev *ethdev = cp->ethdev;
1233 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001234 int i, j, n, ret, pages;
1235 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1236
Michael Chanb37a41e2011-07-20 14:55:22 +00001237 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
Michael Chan520efdf2010-06-24 14:58:37 +00001238 cp->iscsi_start_cid = start_cid;
Michael Chane1928c82010-12-23 07:43:04 +00001239 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1240
Michael Chan104a43e2013-09-02 11:42:28 -07001241 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00001242 cp->max_cid_space += dev->max_fcoe_conn;
Michael Chane1928c82010-12-23 07:43:04 +00001243 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1244 if (!cp->fcoe_init_cid)
1245 cp->fcoe_init_cid = 0x10;
1246 }
1247
Michael Chan71034ba2009-10-10 13:46:59 +00001248 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1249 GFP_KERNEL);
1250 if (!cp->iscsi_tbl)
1251 goto error;
1252
1253 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001254 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001255 if (!cp->ctx_tbl)
1256 goto error;
1257
1258 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1259 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1260 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1261 }
1262
Michael Chane1928c82010-12-23 07:43:04 +00001263 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1264 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1265
Michael Chanbe1fefc2014-03-17 19:19:07 -08001266 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1267 CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001268
1269 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1270 if (ret)
1271 return -ENOMEM;
1272
Michael Chanbe1fefc2014-03-17 19:19:07 -08001273 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001274 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001275 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1276
1277 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1278 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1279 off;
1280
1281 if ((i % n) == (n - 1))
1282 j++;
1283 }
1284
Michael Chan59e51372011-06-14 01:32:38 +00001285 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
Michael Chan71034ba2009-10-10 13:46:59 +00001286 if (ret)
1287 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001288
Michael Chan104a43e2013-09-02 11:42:28 -07001289 if (CNIC_SUPPORTS_FCOE(bp)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001290 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
Michael Chane21ba412010-12-23 07:43:03 +00001291 if (ret)
1292 goto error;
1293 }
1294
Michael Chanbe1fefc2014-03-17 19:19:07 -08001295 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001296 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1297 if (ret)
1298 goto error;
1299
1300 ret = cnic_alloc_bnx2x_context(dev);
1301 if (ret)
1302 goto error;
1303
Michael Chan82346a72012-09-08 06:01:05 +00001304 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1305 return 0;
1306
Michael Chan71034ba2009-10-10 13:46:59 +00001307 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1308
1309 cp->l2_rx_ring_size = 15;
1310
Michael Chancd801532010-10-13 14:06:49 +00001311 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001312 if (ret)
1313 goto error;
1314
Michael Chancd801532010-10-13 14:06:49 +00001315 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001316 if (ret)
1317 goto error;
1318
1319 return 0;
1320
1321error:
1322 cnic_free_resc(dev);
1323 return -ENOMEM;
1324}
1325
Michael Chana4636962009-06-08 18:14:43 -07001326static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1327{
1328 return cp->max_kwq_idx -
1329 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1330}
1331
1332static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1333 u32 num_wqes)
1334{
1335 struct cnic_local *cp = dev->cnic_priv;
1336 struct kwqe *prod_qe;
1337 u16 prod, sw_prod, i;
1338
1339 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1340 return -EAGAIN; /* bnx2 is down */
1341
1342 spin_lock_bh(&cp->cnic_ulp_lock);
1343 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001344 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001345 spin_unlock_bh(&cp->cnic_ulp_lock);
1346 return -EAGAIN;
1347 }
1348
Michael Chan1f1332a2010-05-18 11:32:52 +00001349 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001350
1351 prod = cp->kwq_prod_idx;
1352 sw_prod = prod & MAX_KWQ_IDX;
1353 for (i = 0; i < num_wqes; i++) {
1354 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1355 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1356 prod++;
1357 sw_prod = prod & MAX_KWQ_IDX;
1358 }
1359 cp->kwq_prod_idx = prod;
1360
1361 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1362
1363 spin_unlock_bh(&cp->cnic_ulp_lock);
1364 return 0;
1365}
1366
Michael Chan71034ba2009-10-10 13:46:59 +00001367static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1368 union l5cm_specific_data *l5_data)
1369{
1370 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1371 dma_addr_t map;
1372
1373 map = ctx->kwqe_data_mapping;
1374 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1375 l5_data->phy_address.hi = (u64) map >> 32;
1376 return ctx->kwqe_data;
1377}
1378
1379static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1380 u32 type, union l5cm_specific_data *l5_data)
1381{
1382 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001383 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001384 struct l5cm_spe kwqe;
1385 struct kwqe_16 *kwq[1];
Michael Chan68d7c1a2011-01-05 15:14:13 +00001386 u16 type_16;
Michael Chan71034ba2009-10-10 13:46:59 +00001387 int ret;
1388
1389 kwqe.hdr.conn_and_cmd_data =
1390 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chan5e65789f2013-09-02 11:42:29 -07001391 BNX2X_HW_CID(bp, cid)));
Michael Chan68d7c1a2011-01-05 15:14:13 +00001392
1393 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
Michael Chana5b3c4a2013-09-02 11:42:31 -07001394 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
Michael Chan68d7c1a2011-01-05 15:14:13 +00001395 SPE_HDR_FUNCTION_ID;
1396
1397 kwqe.hdr.type = cpu_to_le16(type_16);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001398 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001399 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1400 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1401
1402 kwq[0] = (struct kwqe_16 *) &kwqe;
1403
1404 spin_lock_bh(&cp->cnic_ulp_lock);
1405 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1406 spin_unlock_bh(&cp->cnic_ulp_lock);
1407
1408 if (ret == 1)
1409 return 0;
1410
Michael Chan23021c22012-01-04 12:12:28 +00001411 return ret;
Michael Chan71034ba2009-10-10 13:46:59 +00001412}
1413
1414static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1415 struct kcqe *cqes[], u32 num_cqes)
1416{
1417 struct cnic_local *cp = dev->cnic_priv;
1418 struct cnic_ulp_ops *ulp_ops;
1419
1420 rcu_read_lock();
1421 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1422 if (likely(ulp_ops)) {
1423 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1424 cqes, num_cqes);
1425 }
1426 rcu_read_unlock();
1427}
1428
Eddie Waib3bd2d62013-07-28 19:03:58 -07001429static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1430 int en_tcp_dack)
1431{
Eddie Waib3bd2d62013-07-28 19:03:58 -07001432 struct bnx2x *bp = netdev_priv(dev->netdev);
1433 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1434 u16 tstorm_flags = 0;
1435
1436 if (time_stamps) {
1437 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1438 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1439 }
1440 if (en_tcp_dack)
1441 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1442
1443 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07001444 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
Eddie Waib3bd2d62013-07-28 19:03:58 -07001445
1446 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07001447 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
Eddie Waib3bd2d62013-07-28 19:03:58 -07001448}
1449
Michael Chan71034ba2009-10-10 13:46:59 +00001450static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1451{
1452 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001453 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001454 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001455 int hq_bds, pages;
Michael Chana5b3c4a2013-09-02 11:42:31 -07001456 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001457
1458 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1459 cp->num_ccells = req1->num_ccells_per_conn;
1460 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1461 cp->num_iscsi_tasks;
1462 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1463 BNX2X_ISCSI_R2TQE_SIZE;
1464 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001465 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1466 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001467 cp->num_cqs = req1->num_cqs;
1468
1469 if (!dev->max_iscsi_conn)
1470 return 0;
1471
1472 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001473 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001474 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001475 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001476 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001477 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001478 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001479 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001480 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001481 req1->num_tasks_per_conn);
1482
1483 /* init Ustorm RAM */
1484 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001485 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001486 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001487 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001488 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001489 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001490 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001491 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001492 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001493 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001494 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001495 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001496 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001497 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001498 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001499 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1500
1501 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001502 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001503 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001504 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001505 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001506 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001507 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001508 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001509 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001510 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001511 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001512 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001513 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001514 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1515
1516 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001517 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001518 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001519 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001520 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001521 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001522 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001523 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001524 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001525 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001526 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001527 hq_bds);
1528
Eddie Waib3bd2d62013-07-28 19:03:58 -07001529 cnic_bnx2x_set_tcp_options(dev,
1530 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1531 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1532
Michael Chan71034ba2009-10-10 13:46:59 +00001533 return 0;
1534}
1535
1536static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1537{
1538 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
Michael Chan68c64d22012-12-06 10:33:11 +00001539 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07001540 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001541 struct iscsi_kcqe kcqe;
1542 struct kcqe *cqes[1];
1543
1544 memset(&kcqe, 0, sizeof(kcqe));
1545 if (!dev->max_iscsi_conn) {
1546 kcqe.completion_status =
1547 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1548 goto done;
1549 }
1550
1551 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001552 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001553 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001554 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001555 req2->error_bit_map[1]);
1556
1557 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001558 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001559 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001560 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001561 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001562 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001563 req2->error_bit_map[1]);
1564
1565 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001566 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001567
1568 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1569
1570done:
1571 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1572 cqes[0] = (struct kcqe *) &kcqe;
1573 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1574
1575 return 0;
1576}
1577
1578static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1579{
1580 struct cnic_local *cp = dev->cnic_priv;
1581 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1582
1583 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1584 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1585
1586 cnic_free_dma(dev, &iscsi->hq_info);
1587 cnic_free_dma(dev, &iscsi->r2tq_info);
1588 cnic_free_dma(dev, &iscsi->task_array_info);
Michael Chane1928c82010-12-23 07:43:04 +00001589 cnic_free_id(&cp->cid_tbl, ctx->cid);
1590 } else {
1591 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001592 }
Michael Chane1928c82010-12-23 07:43:04 +00001593
Michael Chan71034ba2009-10-10 13:46:59 +00001594 ctx->cid = 0;
1595}
1596
1597static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1598{
1599 u32 cid;
1600 int ret, pages;
1601 struct cnic_local *cp = dev->cnic_priv;
1602 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1603 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1604
Michael Chane1928c82010-12-23 07:43:04 +00001605 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1606 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1607 if (cid == -1) {
1608 ret = -ENOMEM;
1609 goto error;
1610 }
1611 ctx->cid = cid;
1612 return 0;
1613 }
1614
Michael Chan71034ba2009-10-10 13:46:59 +00001615 cid = cnic_alloc_new_id(&cp->cid_tbl);
1616 if (cid == -1) {
1617 ret = -ENOMEM;
1618 goto error;
1619 }
1620
1621 ctx->cid = cid;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001622 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001623
1624 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1625 if (ret)
1626 goto error;
1627
Michael Chanbe1fefc2014-03-17 19:19:07 -08001628 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001629 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1630 if (ret)
1631 goto error;
1632
Michael Chanbe1fefc2014-03-17 19:19:07 -08001633 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001634 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1635 if (ret)
1636 goto error;
1637
1638 return 0;
1639
1640error:
1641 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1642 return ret;
1643}
1644
1645static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1646 struct regpair *ctx_addr)
1647{
1648 struct cnic_local *cp = dev->cnic_priv;
1649 struct cnic_eth_dev *ethdev = cp->ethdev;
1650 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1651 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1652 unsigned long align_off = 0;
1653 dma_addr_t ctx_map;
1654 void *ctx;
1655
1656 if (cp->ctx_align) {
1657 unsigned long mask = cp->ctx_align - 1;
1658
1659 if (cp->ctx_arr[blk].mapping & mask)
1660 align_off = cp->ctx_align -
1661 (cp->ctx_arr[blk].mapping & mask);
1662 }
1663 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1664 (off * BNX2X_CONTEXT_MEM_SIZE);
1665 ctx = cp->ctx_arr[blk].ctx + align_off +
1666 (off * BNX2X_CONTEXT_MEM_SIZE);
1667 if (init)
1668 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1669
1670 ctx_addr->lo = ctx_map & 0xffffffff;
1671 ctx_addr->hi = (u64) ctx_map >> 32;
1672 return ctx;
1673}
1674
1675static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1676 u32 num)
1677{
1678 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001679 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001680 struct iscsi_kwqe_conn_offload1 *req1 =
1681 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1682 struct iscsi_kwqe_conn_offload2 *req2 =
1683 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1684 struct iscsi_kwqe_conn_offload3 *req3;
1685 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1686 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1687 u32 cid = ctx->cid;
Michael Chan5e65789f2013-09-02 11:42:29 -07001688 u32 hw_cid = BNX2X_HW_CID(bp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001689 struct iscsi_context *ictx;
1690 struct regpair context_addr;
1691 int i, j, n = 2, n_max;
Michael Chan5bf945a2013-09-02 11:42:30 -07001692 u8 port = BP_PORT(bp);
Michael Chan71034ba2009-10-10 13:46:59 +00001693
1694 ctx->ctx_flags = 0;
1695 if (!req2->num_additional_wqes)
1696 return -EINVAL;
1697
1698 n_max = req2->num_additional_wqes + 2;
1699
1700 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1701 if (ictx == NULL)
1702 return -ENOMEM;
1703
1704 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1705
1706 ictx->xstorm_ag_context.hq_prod = 1;
1707
1708 ictx->xstorm_st_context.iscsi.first_burst_length =
1709 ISCSI_DEF_FIRST_BURST_LEN;
1710 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1711 ISCSI_DEF_MAX_RECV_SEG_LEN;
1712 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1713 req1->sq_page_table_addr_lo;
1714 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1715 req1->sq_page_table_addr_hi;
1716 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1717 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1718 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1719 iscsi->hq_info.pgtbl_map & 0xffffffff;
1720 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1721 (u64) iscsi->hq_info.pgtbl_map >> 32;
1722 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1723 iscsi->hq_info.pgtbl[0];
1724 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1725 iscsi->hq_info.pgtbl[1];
1726 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1727 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1728 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1729 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1730 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1731 iscsi->r2tq_info.pgtbl[0];
1732 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1733 iscsi->r2tq_info.pgtbl[1];
1734 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1735 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1736 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1737 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1738 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1739 BNX2X_ISCSI_PBL_NOT_CACHED;
1740 ictx->xstorm_st_context.iscsi.flags.flags |=
1741 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1742 ictx->xstorm_st_context.iscsi.flags.flags |=
1743 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001744 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1745 ETH_P_8021Q;
Michael Chan104a43e2013-09-02 11:42:28 -07001746 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
Michael Chan5bf945a2013-09-02 11:42:30 -07001747 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001748
1749 port = 0;
1750 }
1751 ictx->xstorm_st_context.common.flags =
1752 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1753 ictx->xstorm_st_context.common.flags =
1754 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00001755
1756 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1757 /* TSTORM requires the base address of RQ DB & not PTE */
1758 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
Michael Chanbe1fefc2014-03-17 19:19:07 -08001759 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
Michael Chan71034ba2009-10-10 13:46:59 +00001760 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1761 req2->rq_page_table_addr_hi;
1762 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1763 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1764 ictx->tstorm_st_context.tcp.flags2 |=
1765 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001766 ictx->tstorm_st_context.tcp.ooo_support_mode =
1767 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001768
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001769 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001770
1771 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001772 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001773 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001774 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001775 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1776 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1777 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1778 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1779 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1780 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1781 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1782 iscsi->r2tq_info.pgtbl[0];
1783 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1784 iscsi->r2tq_info.pgtbl[1];
1785 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1786 req1->cq_page_table_addr_lo;
1787 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1788 req1->cq_page_table_addr_hi;
1789 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1790 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1791 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1792 ictx->ustorm_st_context.task_pbe_cache_index =
1793 BNX2X_ISCSI_PBL_NOT_CACHED;
1794 ictx->ustorm_st_context.task_pdu_cache_index =
1795 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1796
1797 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1798 if (j == 3) {
1799 if (n >= n_max)
1800 break;
1801 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1802 j = 0;
1803 }
1804 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1805 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1806 req3->qp_first_pte[j].hi;
1807 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1808 req3->qp_first_pte[j].lo;
1809 }
1810
1811 ictx->ustorm_st_context.task_pbl_base.lo =
1812 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1813 ictx->ustorm_st_context.task_pbl_base.hi =
1814 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1815 ictx->ustorm_st_context.tce_phy_addr.lo =
1816 iscsi->task_array_info.pgtbl[0];
1817 ictx->ustorm_st_context.tce_phy_addr.hi =
1818 iscsi->task_array_info.pgtbl[1];
1819 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1820 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1821 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1822 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1823 ISCSI_DEF_MAX_BURST_LEN;
1824 ictx->ustorm_st_context.negotiated_rx |=
1825 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1826 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1827
1828 ictx->cstorm_st_context.hq_pbl_base.lo =
1829 iscsi->hq_info.pgtbl_map & 0xffffffff;
1830 ictx->cstorm_st_context.hq_pbl_base.hi =
1831 (u64) iscsi->hq_info.pgtbl_map >> 32;
1832 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1833 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1834 ictx->cstorm_st_context.task_pbl_base.lo =
1835 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1836 ictx->cstorm_st_context.task_pbl_base.hi =
1837 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1838 /* CSTORM and USTORM initialization is different, CSTORM requires
1839 * CQ DB base & not PTE addr */
1840 ictx->cstorm_st_context.cq_db_base.lo =
Michael Chanbe1fefc2014-03-17 19:19:07 -08001841 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
Michael Chan71034ba2009-10-10 13:46:59 +00001842 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1843 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1844 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1845 for (i = 0; i < cp->num_cqs; i++) {
1846 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1847 ISCSI_INITIAL_SN;
1848 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1849 ISCSI_INITIAL_SN;
1850 }
1851
1852 ictx->xstorm_ag_context.cdu_reserved =
1853 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1854 ISCSI_CONNECTION_TYPE);
1855 ictx->ustorm_ag_context.cdu_usage =
1856 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1857 ISCSI_CONNECTION_TYPE);
1858 return 0;
1859
1860}
1861
1862static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1863 u32 num, int *work)
1864{
1865 struct iscsi_kwqe_conn_offload1 *req1;
1866 struct iscsi_kwqe_conn_offload2 *req2;
1867 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001868 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chanfdf24082010-10-13 14:06:47 +00001869 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001870 struct iscsi_kcqe kcqe;
1871 struct kcqe *cqes[1];
1872 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001873 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001874
1875 if (num < 2) {
1876 *work = num;
1877 return -EINVAL;
1878 }
1879
1880 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1881 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1882 if ((num - 2) < req2->num_additional_wqes) {
1883 *work = num;
1884 return -EINVAL;
1885 }
Joe Perches779bb412010-11-14 17:04:37 +00001886 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001887
1888 l5_cid = req1->iscsi_conn_id;
1889 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1890 return -EINVAL;
1891
1892 memset(&kcqe, 0, sizeof(kcqe));
1893 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1894 kcqe.iscsi_conn_id = l5_cid;
1895 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1896
Michael Chanfdf24082010-10-13 14:06:47 +00001897 ctx = &cp->ctx_tbl[l5_cid];
1898 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1899 kcqe.completion_status =
1900 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1901 goto done;
1902 }
1903
Michael Chan71034ba2009-10-10 13:46:59 +00001904 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1905 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001906 goto done;
1907 }
1908 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1909 if (ret) {
1910 atomic_dec(&cp->iscsi_conn);
1911 ret = 0;
1912 goto done;
1913 }
1914 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1915 if (ret < 0) {
1916 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1917 atomic_dec(&cp->iscsi_conn);
1918 goto done;
1919 }
1920
1921 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chan5e65789f2013-09-02 11:42:29 -07001922 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001923
1924done:
1925 cqes[0] = (struct kcqe *) &kcqe;
1926 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
Michael Chan23021c22012-01-04 12:12:28 +00001927 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001928}
1929
1930
1931static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1932{
1933 struct cnic_local *cp = dev->cnic_priv;
1934 struct iscsi_kwqe_conn_update *req =
1935 (struct iscsi_kwqe_conn_update *) kwqe;
1936 void *data;
1937 union l5cm_specific_data l5_data;
1938 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1939 int ret;
1940
1941 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1942 return -EINVAL;
1943
1944 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1945 if (!data)
1946 return -ENOMEM;
1947
1948 memcpy(data, kwqe, sizeof(struct kwqe));
1949
1950 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1951 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1952 return ret;
1953}
1954
Michael Chana2c9e762010-10-13 14:06:46 +00001955static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001956{
1957 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001958 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001959 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001960 union l5cm_specific_data l5_data;
1961 int ret;
Michael Chan68d7c1a2011-01-05 15:14:13 +00001962 u32 hw_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001963
Michael Chan71034ba2009-10-10 13:46:59 +00001964 init_waitqueue_head(&ctx->waitq);
1965 ctx->wait_cond = 0;
1966 memset(&l5_data, 0, sizeof(l5_data));
Michael Chan5e65789f2013-09-02 11:42:29 -07001967 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001968
1969 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00001970 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001971
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001972 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00001973 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001974 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1975 return -EBUSY;
1976 }
Michael Chan71034ba2009-10-10 13:46:59 +00001977
Michael Chandcc7e3a2011-08-26 09:45:40 +00001978 return 0;
Michael Chana2c9e762010-10-13 14:06:46 +00001979}
1980
1981static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 struct iscsi_kwqe_conn_destroy *req =
1985 (struct iscsi_kwqe_conn_destroy *) kwqe;
1986 u32 l5_cid = req->reserved0;
1987 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1988 int ret = 0;
1989 struct iscsi_kcqe kcqe;
1990 struct kcqe *cqes[1];
1991
1992 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1993 goto skip_cfc_delete;
1994
Michael Chanfdf24082010-10-13 14:06:47 +00001995 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1996 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1997
1998 if (delta > (2 * HZ))
1999 delta = 0;
2000
2001 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2002 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2003 goto destroy_reply;
2004 }
Michael Chana2c9e762010-10-13 14:06:46 +00002005
2006 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2007
Michael Chan71034ba2009-10-10 13:46:59 +00002008skip_cfc_delete:
2009 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2010
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002011 if (!ret) {
2012 atomic_dec(&cp->iscsi_conn);
2013 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2014 }
Michael Chan71034ba2009-10-10 13:46:59 +00002015
Michael Chanfdf24082010-10-13 14:06:47 +00002016destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00002017 memset(&kcqe, 0, sizeof(kcqe));
2018 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2019 kcqe.iscsi_conn_id = l5_cid;
2020 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2021 kcqe.iscsi_conn_context_id = req->context_id;
2022
2023 cqes[0] = (struct kcqe *) &kcqe;
2024 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2025
Michael Chan23021c22012-01-04 12:12:28 +00002026 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00002027}
2028
2029static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2030 struct l4_kwq_connect_req1 *kwqe1,
2031 struct l4_kwq_connect_req3 *kwqe3,
2032 struct l5cm_active_conn_buffer *conn_buf)
2033{
2034 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2035 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2036 &conn_buf->xstorm_conn_buffer;
2037 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2038 &conn_buf->tstorm_conn_buffer;
2039 struct regpair context_addr;
2040 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2041 struct in6_addr src_ip, dst_ip;
2042 int i;
2043 u32 *addrp;
2044
2045 addrp = (u32 *) &conn_addr->local_ip_addr;
2046 for (i = 0; i < 4; i++, addrp++)
2047 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2048
2049 addrp = (u32 *) &conn_addr->remote_ip_addr;
2050 for (i = 0; i < 4; i++, addrp++)
2051 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2052
2053 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2054
2055 xstorm_buf->context_addr.hi = context_addr.hi;
2056 xstorm_buf->context_addr.lo = context_addr.lo;
2057 xstorm_buf->mss = 0xffff;
2058 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2059 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2060 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2061 xstorm_buf->pseudo_header_checksum =
2062 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2063
Michael Chan71034ba2009-10-10 13:46:59 +00002064 if (kwqe3->ka_timeout) {
2065 tstorm_buf->ka_enable = 1;
2066 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2067 tstorm_buf->ka_interval = kwqe3->ka_interval;
2068 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2069 }
Michael Chan71034ba2009-10-10 13:46:59 +00002070 tstorm_buf->max_rt_time = 0xffffffff;
2071}
2072
2073static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2074{
Michael Chan68c64d22012-12-06 10:33:11 +00002075 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07002076 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00002077 u8 *mac = dev->mac_addr;
2078
2079 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002080 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00002081 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002082 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002084 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00002085 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002086 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002087 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002088 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00002089 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002090 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002091
2092 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002093 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002094 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002095 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002096 mac[4]);
2097 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002098 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002099 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002100 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002101 mac[2]);
2102 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002103 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002105 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002106 mac[0]);
2107}
2108
Michael Chan71034ba2009-10-10 13:46:59 +00002109static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2110 u32 num, int *work)
2111{
2112 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002113 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00002114 struct l4_kwq_connect_req1 *kwqe1 =
2115 (struct l4_kwq_connect_req1 *) wqes[0];
2116 struct l4_kwq_connect_req3 *kwqe3;
2117 struct l5cm_active_conn_buffer *conn_buf;
2118 struct l5cm_conn_addr_params *conn_addr;
2119 union l5cm_specific_data l5_data;
2120 u32 l5_cid = kwqe1->pg_cid;
2121 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2122 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2123 int ret;
2124
2125 if (num < 2) {
2126 *work = num;
2127 return -EINVAL;
2128 }
2129
2130 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2131 *work = 3;
2132 else
2133 *work = 2;
2134
2135 if (num < *work) {
2136 *work = num;
2137 return -EINVAL;
2138 }
2139
2140 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002141 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002142 return -ENOMEM;
2143 }
2144 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2145 if (!conn_buf)
2146 return -ENOMEM;
2147
2148 memset(conn_buf, 0, sizeof(*conn_buf));
2149
2150 conn_addr = &conn_buf->conn_addr_buf;
2151 conn_addr->remote_addr_0 = csk->ha[0];
2152 conn_addr->remote_addr_1 = csk->ha[1];
2153 conn_addr->remote_addr_2 = csk->ha[2];
2154 conn_addr->remote_addr_3 = csk->ha[3];
2155 conn_addr->remote_addr_4 = csk->ha[4];
2156 conn_addr->remote_addr_5 = csk->ha[5];
2157
2158 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2159 struct l4_kwq_connect_req2 *kwqe2 =
2160 (struct l4_kwq_connect_req2 *) wqes[1];
2161
2162 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2163 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2164 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2165
2166 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2167 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2168 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2169 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2170 }
2171 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2172
2173 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2174 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2175 conn_addr->local_tcp_port = kwqe1->src_port;
2176 conn_addr->remote_tcp_port = kwqe1->dst_port;
2177
2178 conn_addr->pmtu = kwqe3->pmtu;
2179 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2180
2181 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07002182 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002183
Michael Chan71034ba2009-10-10 13:46:59 +00002184 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2185 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2186 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002187 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002188
2189 return ret;
2190}
2191
2192static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2193{
2194 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2195 union l5cm_specific_data l5_data;
2196 int ret;
2197
2198 memset(&l5_data, 0, sizeof(l5_data));
2199 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2200 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2201 return ret;
2202}
2203
2204static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2205{
2206 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2207 union l5cm_specific_data l5_data;
2208 int ret;
2209
2210 memset(&l5_data, 0, sizeof(l5_data));
2211 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2212 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2213 return ret;
2214}
2215static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2216{
2217 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2218 struct l4_kcq kcqe;
2219 struct kcqe *cqes[1];
2220
2221 memset(&kcqe, 0, sizeof(kcqe));
2222 kcqe.pg_host_opaque = req->host_opaque;
2223 kcqe.pg_cid = req->host_opaque;
2224 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2225 cqes[0] = (struct kcqe *) &kcqe;
2226 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2227 return 0;
2228}
2229
2230static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2231{
2232 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2233 struct l4_kcq kcqe;
2234 struct kcqe *cqes[1];
2235
2236 memset(&kcqe, 0, sizeof(kcqe));
2237 kcqe.pg_host_opaque = req->pg_host_opaque;
2238 kcqe.pg_cid = req->pg_cid;
2239 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2240 cqes[0] = (struct kcqe *) &kcqe;
2241 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2242 return 0;
2243}
2244
Michael Chane1928c82010-12-23 07:43:04 +00002245static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2246{
2247 struct fcoe_kwqe_stat *req;
2248 struct fcoe_stat_ramrod_params *fcoe_stat;
2249 union l5cm_specific_data l5_data;
2250 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002251 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002252 int ret;
2253 u32 cid;
2254
2255 req = (struct fcoe_kwqe_stat *) kwqe;
Michael Chan5e65789f2013-09-02 11:42:29 -07002256 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Michael Chane1928c82010-12-23 07:43:04 +00002257
2258 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2259 if (!fcoe_stat)
2260 return -ENOMEM;
2261
2262 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2263 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2264
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002265 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002266 FCOE_CONNECTION_TYPE, &l5_data);
2267 return ret;
2268}
2269
2270static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2271 u32 num, int *work)
2272{
2273 int ret;
2274 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002275 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002276 u32 cid;
2277 struct fcoe_init_ramrod_params *fcoe_init;
2278 struct fcoe_kwqe_init1 *req1;
2279 struct fcoe_kwqe_init2 *req2;
2280 struct fcoe_kwqe_init3 *req3;
2281 union l5cm_specific_data l5_data;
2282
2283 if (num < 3) {
2284 *work = num;
2285 return -EINVAL;
2286 }
2287 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2288 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2289 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2290 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2291 *work = 1;
2292 return -EINVAL;
2293 }
2294 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2295 *work = 2;
2296 return -EINVAL;
2297 }
2298
2299 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2300 netdev_err(dev->netdev, "fcoe_init size too big\n");
2301 return -ENOMEM;
2302 }
2303 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2304 if (!fcoe_init)
2305 return -ENOMEM;
2306
2307 memset(fcoe_init, 0, sizeof(*fcoe_init));
2308 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2309 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2310 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002311 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2312 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2313 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
Michael Chane1928c82010-12-23 07:43:04 +00002314
2315 fcoe_init->sb_num = cp->status_blk_num;
2316 fcoe_init->eq_prod = MAX_KCQ_IDX;
2317 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2318 cp->kcq2.sw_prod_idx = 0;
2319
Michael Chan5e65789f2013-09-02 11:42:29 -07002320 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002321 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002322 FCOE_CONNECTION_TYPE, &l5_data);
2323 *work = 3;
2324 return ret;
2325}
2326
2327static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2328 u32 num, int *work)
2329{
2330 int ret = 0;
2331 u32 cid = -1, l5_cid;
2332 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002333 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002334 struct fcoe_kwqe_conn_offload1 *req1;
2335 struct fcoe_kwqe_conn_offload2 *req2;
2336 struct fcoe_kwqe_conn_offload3 *req3;
2337 struct fcoe_kwqe_conn_offload4 *req4;
2338 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2339 struct cnic_context *ctx;
2340 struct fcoe_context *fctx;
2341 struct regpair ctx_addr;
2342 union l5cm_specific_data l5_data;
2343 struct fcoe_kcqe kcqe;
2344 struct kcqe *cqes[1];
2345
2346 if (num < 4) {
2347 *work = num;
2348 return -EINVAL;
2349 }
2350 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2351 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2352 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2353 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2354
2355 *work = 4;
2356
2357 l5_cid = req1->fcoe_conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002358 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002359 goto err_reply;
2360
2361 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2362
2363 ctx = &cp->ctx_tbl[l5_cid];
2364 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2365 goto err_reply;
2366
2367 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2368 if (ret) {
2369 ret = 0;
2370 goto err_reply;
2371 }
2372 cid = ctx->cid;
2373
2374 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2375 if (fctx) {
Michael Chan5e65789f2013-09-02 11:42:29 -07002376 u32 hw_cid = BNX2X_HW_CID(bp, cid);
Michael Chane1928c82010-12-23 07:43:04 +00002377 u32 val;
2378
2379 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2380 FCOE_CONNECTION_TYPE);
2381 fctx->xstorm_ag_context.cdu_reserved = val;
2382 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2383 FCOE_CONNECTION_TYPE);
2384 fctx->ustorm_ag_context.cdu_usage = val;
2385 }
2386 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2387 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2388 goto err_reply;
2389 }
2390 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2391 if (!fcoe_offload)
2392 goto err_reply;
2393
2394 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2395 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2396 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2397 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2398 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2399
Michael Chan5e65789f2013-09-02 11:42:29 -07002400 cid = BNX2X_HW_CID(bp, cid);
Michael Chane1928c82010-12-23 07:43:04 +00002401 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2402 FCOE_CONNECTION_TYPE, &l5_data);
2403 if (!ret)
2404 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2405
2406 return ret;
2407
2408err_reply:
2409 if (cid != -1)
2410 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2411
2412 memset(&kcqe, 0, sizeof(kcqe));
2413 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2414 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2415 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2416
2417 cqes[0] = (struct kcqe *) &kcqe;
2418 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2419 return ret;
2420}
2421
2422static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2423{
2424 struct fcoe_kwqe_conn_enable_disable *req;
2425 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2426 union l5cm_specific_data l5_data;
2427 int ret;
2428 u32 cid, l5_cid;
2429 struct cnic_local *cp = dev->cnic_priv;
2430
2431 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2432 cid = req->context_id;
2433 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2434
2435 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2436 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2437 return -ENOMEM;
2438 }
2439 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2440 if (!fcoe_enable)
2441 return -ENOMEM;
2442
2443 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2444 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2445 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2446 FCOE_CONNECTION_TYPE, &l5_data);
2447 return ret;
2448}
2449
2450static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2451{
2452 struct fcoe_kwqe_conn_enable_disable *req;
2453 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2454 union l5cm_specific_data l5_data;
2455 int ret;
2456 u32 cid, l5_cid;
2457 struct cnic_local *cp = dev->cnic_priv;
2458
2459 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2460 cid = req->context_id;
2461 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002462 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002463 return -EINVAL;
2464
2465 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2466
2467 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2468 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2469 return -ENOMEM;
2470 }
2471 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2472 if (!fcoe_disable)
2473 return -ENOMEM;
2474
2475 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2476 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2477 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2478 FCOE_CONNECTION_TYPE, &l5_data);
2479 return ret;
2480}
2481
2482static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2483{
2484 struct fcoe_kwqe_conn_destroy *req;
2485 union l5cm_specific_data l5_data;
2486 int ret;
2487 u32 cid, l5_cid;
2488 struct cnic_local *cp = dev->cnic_priv;
2489 struct cnic_context *ctx;
2490 struct fcoe_kcqe kcqe;
2491 struct kcqe *cqes[1];
2492
2493 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2494 cid = req->context_id;
2495 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002496 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002497 return -EINVAL;
2498
2499 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2500
2501 ctx = &cp->ctx_tbl[l5_cid];
2502
2503 init_waitqueue_head(&ctx->waitq);
2504 ctx->wait_cond = 0;
2505
Michael Chandcc7e3a2011-08-26 09:45:40 +00002506 memset(&kcqe, 0, sizeof(kcqe));
2507 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
Michael Chane1928c82010-12-23 07:43:04 +00002508 memset(&l5_data, 0, sizeof(l5_data));
2509 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2510 FCOE_CONNECTION_TYPE, &l5_data);
2511 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00002512 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2513 if (ctx->wait_cond)
2514 kcqe.completion_status = 0;
Michael Chane1928c82010-12-23 07:43:04 +00002515 }
2516
Michael Chandcc7e3a2011-08-26 09:45:40 +00002517 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2518 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2519
Michael Chane1928c82010-12-23 07:43:04 +00002520 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2521 kcqe.fcoe_conn_id = req->conn_id;
2522 kcqe.fcoe_conn_context_id = cid;
2523
2524 cqes[0] = (struct kcqe *) &kcqe;
2525 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2526 return ret;
2527}
2528
Michael Chan74e49bb2011-07-20 14:55:23 +00002529static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2530{
2531 struct cnic_local *cp = dev->cnic_priv;
2532 u32 i;
2533
2534 for (i = start_cid; i < cp->max_cid_space; i++) {
2535 struct cnic_context *ctx = &cp->ctx_tbl[i];
2536 int j;
2537
2538 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2539 msleep(10);
2540
2541 for (j = 0; j < 5; j++) {
2542 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2543 break;
2544 msleep(20);
2545 }
2546
2547 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2548 netdev_warn(dev->netdev, "CID %x not deleted\n",
2549 ctx->cid);
2550 }
2551}
2552
Michael Chane1928c82010-12-23 07:43:04 +00002553static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2554{
2555 struct fcoe_kwqe_destroy *req;
2556 union l5cm_specific_data l5_data;
2557 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002558 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002559 int ret;
2560 u32 cid;
2561
Michael Chan74e49bb2011-07-20 14:55:23 +00002562 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2563
Michael Chane1928c82010-12-23 07:43:04 +00002564 req = (struct fcoe_kwqe_destroy *) kwqe;
Michael Chan5e65789f2013-09-02 11:42:29 -07002565 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Michael Chane1928c82010-12-23 07:43:04 +00002566
2567 memset(&l5_data, 0, sizeof(l5_data));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002568 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002569 FCOE_CONNECTION_TYPE, &l5_data);
2570 return ret;
2571}
2572
Michael Chan23021c22012-01-04 12:12:28 +00002573static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2574{
2575 struct cnic_local *cp = dev->cnic_priv;
2576 struct kcqe kcqe;
2577 struct kcqe *cqes[1];
2578 u32 cid;
2579 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2580 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
Michael Chan3238a9b2012-02-05 15:24:40 +00002581 u32 kcqe_op;
Michael Chan23021c22012-01-04 12:12:28 +00002582 int ulp_type;
2583
2584 cid = kwqe->kwqe_info0;
2585 memset(&kcqe, 0, sizeof(kcqe));
2586
Michael Chan3238a9b2012-02-05 15:24:40 +00002587 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2588 u32 l5_cid = 0;
2589
2590 ulp_type = CNIC_ULP_FCOE;
2591 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2592 struct fcoe_kwqe_conn_enable_disable *req;
2593
2594 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2595 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2596 cid = req->context_id;
2597 l5_cid = req->conn_id;
2598 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2599 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2600 } else {
2601 return;
2602 }
2603 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2604 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
Michael Chan8ec3e702012-03-21 15:38:34 +00002605 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan3238a9b2012-02-05 15:24:40 +00002606 kcqe.kcqe_info2 = cid;
2607 kcqe.kcqe_info0 = l5_cid;
2608
2609 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
Michael Chan23021c22012-01-04 12:12:28 +00002610 ulp_type = CNIC_ULP_ISCSI;
2611 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2612 cid = kwqe->kwqe_info1;
2613
2614 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2615 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
Michael Chan8ec3e702012-03-21 15:38:34 +00002616 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
Michael Chan23021c22012-01-04 12:12:28 +00002617 kcqe.kcqe_info2 = cid;
2618 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2619
2620 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2621 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002622
2623 ulp_type = CNIC_ULP_L4;
2624 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2625 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2626 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2627 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2628 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2629 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2630 else
2631 return;
2632
2633 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2634 KCQE_FLAGS_LAYER_MASK_L4;
Michael Chan8ec3e702012-03-21 15:38:34 +00002635 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan23021c22012-01-04 12:12:28 +00002636 l4kcqe->cid = cid;
2637 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2638 } else {
2639 return;
2640 }
2641
Joe Perches64699332012-06-04 12:44:16 +00002642 cqes[0] = &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002643 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2644}
2645
Michael Chane1928c82010-12-23 07:43:04 +00002646static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2647 struct kwqe *wqes[], u32 num_wqes)
Michael Chan71034ba2009-10-10 13:46:59 +00002648{
2649 int i, work, ret;
2650 u32 opcode;
2651 struct kwqe *kwqe;
2652
2653 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2654 return -EAGAIN; /* bnx2 is down */
2655
2656 for (i = 0; i < num_wqes; ) {
2657 kwqe = wqes[i];
2658 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2659 work = 1;
2660
2661 switch (opcode) {
2662 case ISCSI_KWQE_OPCODE_INIT1:
2663 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2664 break;
2665 case ISCSI_KWQE_OPCODE_INIT2:
2666 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2667 break;
2668 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2669 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2670 num_wqes - i, &work);
2671 break;
2672 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2673 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2674 break;
2675 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2676 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2677 break;
2678 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2679 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2680 &work);
2681 break;
2682 case L4_KWQE_OPCODE_VALUE_CLOSE:
2683 ret = cnic_bnx2x_close(dev, kwqe);
2684 break;
2685 case L4_KWQE_OPCODE_VALUE_RESET:
2686 ret = cnic_bnx2x_reset(dev, kwqe);
2687 break;
2688 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2689 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2690 break;
2691 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2692 ret = cnic_bnx2x_update_pg(dev, kwqe);
2693 break;
2694 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2695 ret = 0;
2696 break;
2697 default:
2698 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002699 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2700 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002701 break;
2702 }
Michael Chan23021c22012-01-04 12:12:28 +00002703 if (ret < 0) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002704 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2705 opcode);
Michael Chan23021c22012-01-04 12:12:28 +00002706
2707 /* Possibly bnx2x parity error, send completion
2708 * to ulp drivers with error code to speed up
2709 * cleanup and reset recovery.
2710 */
2711 if (ret == -EIO || ret == -EAGAIN)
2712 cnic_bnx2x_kwqe_err(dev, kwqe);
2713 }
Michael Chan71034ba2009-10-10 13:46:59 +00002714 i += work;
2715 }
2716 return 0;
2717}
2718
Michael Chane1928c82010-12-23 07:43:04 +00002719static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2720 struct kwqe *wqes[], u32 num_wqes)
2721{
Michael Chan104a43e2013-09-02 11:42:28 -07002722 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002723 int i, work, ret;
2724 u32 opcode;
2725 struct kwqe *kwqe;
2726
2727 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2728 return -EAGAIN; /* bnx2 is down */
2729
Michael Chan104a43e2013-09-02 11:42:28 -07002730 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
Michael Chane1928c82010-12-23 07:43:04 +00002731 return -EINVAL;
2732
2733 for (i = 0; i < num_wqes; ) {
2734 kwqe = wqes[i];
2735 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2736 work = 1;
2737
2738 switch (opcode) {
2739 case FCOE_KWQE_OPCODE_INIT1:
2740 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2741 num_wqes - i, &work);
2742 break;
2743 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2744 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2745 num_wqes - i, &work);
2746 break;
2747 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2748 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2749 break;
2750 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2751 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2752 break;
2753 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2754 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2755 break;
2756 case FCOE_KWQE_OPCODE_DESTROY:
2757 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2758 break;
2759 case FCOE_KWQE_OPCODE_STAT:
2760 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2761 break;
2762 default:
2763 ret = 0;
2764 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2765 opcode);
2766 break;
2767 }
Michael Chan3238a9b2012-02-05 15:24:40 +00002768 if (ret < 0) {
Michael Chane1928c82010-12-23 07:43:04 +00002769 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2770 opcode);
Michael Chan3238a9b2012-02-05 15:24:40 +00002771
2772 /* Possibly bnx2x parity error, send completion
2773 * to ulp drivers with error code to speed up
2774 * cleanup and reset recovery.
2775 */
2776 if (ret == -EIO || ret == -EAGAIN)
2777 cnic_bnx2x_kwqe_err(dev, kwqe);
2778 }
Michael Chane1928c82010-12-23 07:43:04 +00002779 i += work;
2780 }
2781 return 0;
2782}
2783
2784static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2785 u32 num_wqes)
2786{
2787 int ret = -EINVAL;
2788 u32 layer_code;
2789
2790 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2791 return -EAGAIN; /* bnx2x is down */
2792
2793 if (!num_wqes)
2794 return 0;
2795
2796 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2797 switch (layer_code) {
2798 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2799 case KWQE_FLAGS_LAYER_MASK_L4:
2800 case KWQE_FLAGS_LAYER_MASK_L2:
2801 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2802 break;
2803
2804 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2805 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2806 break;
2807 }
2808 return ret;
2809}
2810
2811static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2812{
2813 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2814 return KCQE_FLAGS_LAYER_MASK_L4;
2815
2816 return opflag & KCQE_FLAGS_LAYER_MASK;
2817}
2818
Michael Chana4636962009-06-08 18:14:43 -07002819static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2820{
2821 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002822 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002823
2824 i = 0;
2825 j = 1;
2826 while (num_cqes) {
2827 struct cnic_ulp_ops *ulp_ops;
2828 int ulp_type;
2829 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
Michael Chane1928c82010-12-23 07:43:04 +00002830 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002831
2832 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002833 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002834
2835 while (j < num_cqes) {
2836 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2837
Michael Chane1928c82010-12-23 07:43:04 +00002838 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
Michael Chana4636962009-06-08 18:14:43 -07002839 break;
2840
2841 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002842 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002843 j++;
2844 }
2845
2846 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2847 ulp_type = CNIC_ULP_RDMA;
2848 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2849 ulp_type = CNIC_ULP_ISCSI;
Michael Chane1928c82010-12-23 07:43:04 +00002850 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2851 ulp_type = CNIC_ULP_FCOE;
Michael Chana4636962009-06-08 18:14:43 -07002852 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2853 ulp_type = CNIC_ULP_L4;
2854 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2855 goto end;
2856 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002857 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2858 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002859 goto end;
2860 }
2861
2862 rcu_read_lock();
2863 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2864 if (likely(ulp_ops)) {
2865 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2866 cp->completed_kcq + i, j);
2867 }
2868 rcu_read_unlock();
2869end:
2870 num_cqes -= j;
2871 i += j;
2872 j = 1;
2873 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002874 if (unlikely(comp))
2875 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002876}
2877
Michael Chan644b9d42010-06-24 14:58:40 +00002878static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002879{
2880 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002881 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002882 struct kcqe *kcqe;
2883 int kcqe_cnt = 0, last_cnt = 0;
2884
Michael Chan644b9d42010-06-24 14:58:40 +00002885 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002886 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002887 hw_prod = *info->hw_prod_idx_ptr;
Michael Chan59e51372011-06-14 01:32:38 +00002888 hw_prod = info->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002889
2890 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002891 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002892 cp->completed_kcq[kcqe_cnt++] = kcqe;
Michael Chan59e51372011-06-14 01:32:38 +00002893 i = info->next_idx(i);
Michael Chana4636962009-06-08 18:14:43 -07002894 ri = i & MAX_KCQ_IDX;
2895 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2896 last_cnt = kcqe_cnt;
2897 last = i;
2898 }
2899 }
2900
Michael Chan644b9d42010-06-24 14:58:40 +00002901 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002902 return last_cnt;
2903}
2904
Michael Chan48f753d2010-05-18 11:32:53 +00002905static int cnic_l2_completion(struct cnic_local *cp)
2906{
2907 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002908 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002909 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chanbe1fefc2014-03-17 19:19:07 -08002910 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002911 u32 cmd;
2912 int comp = 0;
2913
2914 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2915 return 0;
2916
2917 hw_cons = *cp->rx_cons_ptr;
2918 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2919 hw_cons++;
2920
2921 sw_cons = cp->rx_cons;
2922 while (sw_cons != hw_cons) {
2923 u8 cqe_fp_flags;
2924
2925 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2926 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2927 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2928 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2929 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2930 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2931 cmd == RAMROD_CMD_ID_ETH_HALT)
2932 comp++;
2933 }
2934 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2935 }
2936 return comp;
2937}
2938
Michael Chan86b53602009-10-10 13:46:57 +00002939static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002940{
Michael Chan541a7812010-10-06 03:17:22 +00002941 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002942 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002943
Michael Chan541a7812010-10-06 03:17:22 +00002944 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002945 return;
2946
Michael Chan541a7812010-10-06 03:17:22 +00002947 rx_cons = *cp->rx_cons_ptr;
2948 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002949 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002950 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2951 comp = cnic_l2_completion(cp);
2952
Michael Chana4636962009-06-08 18:14:43 -07002953 cp->tx_cons = tx_cons;
2954 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002955
Michael Chancd801532010-10-13 14:06:49 +00002956 if (cp->udev)
2957 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002958 }
Michael Chan48f753d2010-05-18 11:32:53 +00002959 if (comp)
2960 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002961}
2962
Michael Chanb177a5d52010-06-24 14:58:41 +00002963static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002964{
Michael Chana4636962009-06-08 18:14:43 -07002965 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002966 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002967 int kcqe_cnt;
2968
Michael Chan107c3f42011-03-02 13:00:49 +00002969 /* status block index must be read before reading other fields */
2970 rmb();
Michael Chana4636962009-06-08 18:14:43 -07002971 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2972
Michael Chan644b9d42010-06-24 14:58:40 +00002973 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002974
2975 service_kcqes(dev, kcqe_cnt);
2976
2977 /* Tell compiler that status_blk fields can change. */
2978 barrier();
Michael Chan93736652011-06-08 19:29:32 +00002979 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2980 /* status block index must be read first */
2981 rmb();
2982 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002983 }
2984
Michael Chan644b9d42010-06-24 14:58:40 +00002985 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002986
Michael Chan86b53602009-10-10 13:46:57 +00002987 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002988
Michael Chana4636962009-06-08 18:14:43 -07002989 return status_idx;
2990}
2991
Michael Chanb177a5d52010-06-24 14:58:41 +00002992static int cnic_service_bnx2(void *data, void *status_blk)
2993{
2994 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002995
Michael Chaneaaa6e92010-12-23 08:38:30 +00002996 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2997 struct status_block *sblk = status_blk;
2998
2999 return sblk->status_idx;
3000 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003001
3002 return cnic_service_bnx2_queues(dev);
3003}
3004
Michael Chana4636962009-06-08 18:14:43 -07003005static void cnic_service_bnx2_msix(unsigned long data)
3006{
3007 struct cnic_dev *dev = (struct cnic_dev *) data;
3008 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003009
Michael Chanb177a5d52010-06-24 14:58:41 +00003010 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07003011
Michael Chana4636962009-06-08 18:14:43 -07003012 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3013 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3014}
3015
Michael Chan66fee9e2010-06-24 14:58:38 +00003016static void cnic_doirq(struct cnic_dev *dev)
3017{
3018 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00003019
3020 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00003021 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3022
Michael Chan66fee9e2010-06-24 14:58:38 +00003023 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00003024 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00003025
3026 tasklet_schedule(&cp->cnic_irq_task);
3027 }
3028}
3029
Michael Chana4636962009-06-08 18:14:43 -07003030static irqreturn_t cnic_irq(int irq, void *dev_instance)
3031{
3032 struct cnic_dev *dev = dev_instance;
3033 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003034
3035 if (cp->ack_int)
3036 cp->ack_int(dev);
3037
Michael Chan66fee9e2010-06-24 14:58:38 +00003038 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07003039
3040 return IRQ_HANDLED;
3041}
3042
Michael Chan71034ba2009-10-10 13:46:59 +00003043static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3044 u16 index, u8 op, u8 update)
3045{
Michael Chan5bf945a2013-09-02 11:42:30 -07003046 struct bnx2x *bp = netdev_priv(dev->netdev);
3047 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
Michael Chan71034ba2009-10-10 13:46:59 +00003048 COMMAND_REG_INT_ACK);
3049 struct igu_ack_register igu_ack;
3050
3051 igu_ack.status_block_index = index;
3052 igu_ack.sb_id_and_flags =
3053 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3054 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3055 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3056 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3057
3058 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3059}
3060
Michael Chanee87a822010-10-13 14:06:51 +00003061static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3062 u16 index, u8 op, u8 update)
3063{
3064 struct igu_regular cmd_data;
3065 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3066
3067 cmd_data.sb_id_and_flags =
3068 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3069 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3070 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3071 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3072
3073
3074 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3075}
3076
Michael Chan71034ba2009-10-10 13:46:59 +00003077static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3078{
3079 struct cnic_local *cp = dev->cnic_priv;
3080
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003081 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00003082 IGU_INT_DISABLE, 0);
3083}
3084
Michael Chanee87a822010-10-13 14:06:51 +00003085static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3086{
3087 struct cnic_local *cp = dev->cnic_priv;
3088
3089 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3090 IGU_INT_DISABLE, 0);
3091}
3092
Michael Chan8cc0e022012-09-08 06:01:03 +00003093static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3094{
3095 struct cnic_local *cp = dev->cnic_priv;
3096
3097 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3098 IGU_INT_ENABLE, 1);
3099}
3100
3101static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3102{
3103 struct cnic_local *cp = dev->cnic_priv;
3104
3105 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3106 IGU_INT_ENABLE, 1);
3107}
3108
Michael Chanb177a5d52010-06-24 14:58:41 +00003109static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00003110{
Michael Chanb177a5d52010-06-24 14:58:41 +00003111 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00003112 int kcqe_cnt;
3113
Michael Chan107c3f42011-03-02 13:00:49 +00003114 /* status block index must be read before reading the KCQ */
3115 rmb();
Michael Chanb177a5d52010-06-24 14:58:41 +00003116 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00003117
3118 service_kcqes(dev, kcqe_cnt);
3119
3120 /* Tell compiler that sblk fields can change. */
3121 barrier();
Michael Chan71034ba2009-10-10 13:46:59 +00003122
Michael Chanb177a5d52010-06-24 14:58:41 +00003123 last_status = *info->status_idx_ptr;
Michael Chan107c3f42011-03-02 13:00:49 +00003124 /* status block index must be read before reading the KCQ */
3125 rmb();
Michael Chan71034ba2009-10-10 13:46:59 +00003126 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003127 return last_status;
3128}
3129
3130static void cnic_service_bnx2x_bh(unsigned long data)
3131{
3132 struct cnic_dev *dev = (struct cnic_dev *) data;
3133 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48a30562013-09-18 01:50:39 -07003134 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan0197b082011-03-02 13:00:50 +00003135 u32 status_idx, new_status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00003136
3137 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3138 return;
3139
Michael Chan0197b082011-03-02 13:00:50 +00003140 while (1) {
3141 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00003142
Michael Chan0197b082011-03-02 13:00:50 +00003143 CNIC_WR16(dev, cp->kcq1.io_addr,
3144 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00003145
Michael Chan48a30562013-09-18 01:50:39 -07003146 if (!CNIC_SUPPORTS_FCOE(bp)) {
Michael Chan8cc0e022012-09-08 06:01:03 +00003147 cp->arm_int(dev, status_idx);
Michael Chan0197b082011-03-02 13:00:50 +00003148 break;
3149 }
3150
3151 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3152
3153 if (new_status_idx != status_idx)
3154 continue;
Michael Chane21ba412010-12-23 07:43:03 +00003155
3156 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3157 MAX_KCQ_IDX);
3158
Michael Chanee87a822010-10-13 14:06:51 +00003159 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3160 status_idx, IGU_INT_ENABLE, 1);
Michael Chan0197b082011-03-02 13:00:50 +00003161
3162 break;
Michael Chane21ba412010-12-23 07:43:03 +00003163 }
Michael Chan71034ba2009-10-10 13:46:59 +00003164}
3165
3166static int cnic_service_bnx2x(void *data, void *status_blk)
3167{
3168 struct cnic_dev *dev = data;
3169 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00003170
Michael Chan66fee9e2010-06-24 14:58:38 +00003171 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3172 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00003173
Michael Chan66fee9e2010-06-24 14:58:38 +00003174 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003175
3176 return 0;
3177}
3178
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003179static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3180{
3181 struct cnic_ulp_ops *ulp_ops;
3182
3183 if (if_type == CNIC_ULP_ISCSI)
3184 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3185
3186 mutex_lock(&cnic_lock);
3187 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3188 lockdep_is_held(&cnic_lock));
3189 if (!ulp_ops) {
3190 mutex_unlock(&cnic_lock);
3191 return;
3192 }
3193 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3194 mutex_unlock(&cnic_lock);
3195
3196 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3197 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3198
3199 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3200}
3201
Michael Chana4636962009-06-08 18:14:43 -07003202static void cnic_ulp_stop(struct cnic_dev *dev)
3203{
3204 struct cnic_local *cp = dev->cnic_priv;
3205 int if_type;
3206
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003207 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3208 cnic_ulp_stop_one(cp, if_type);
Michael Chana4636962009-06-08 18:14:43 -07003209}
3210
3211static void cnic_ulp_start(struct cnic_dev *dev)
3212{
3213 struct cnic_local *cp = dev->cnic_priv;
3214 int if_type;
3215
Michael Chana4636962009-06-08 18:14:43 -07003216 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3217 struct cnic_ulp_ops *ulp_ops;
3218
Michael Chan681dbd72009-08-14 15:49:46 +00003219 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003220 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3221 lockdep_is_held(&cnic_lock));
Michael Chan681dbd72009-08-14 15:49:46 +00003222 if (!ulp_ops || !ulp_ops->cnic_start) {
3223 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003224 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00003225 }
3226 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3227 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003228
3229 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3230 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00003231
3232 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07003233 }
Michael Chana4636962009-06-08 18:14:43 -07003234}
3235
Barak Witkowski1d187b32011-12-05 22:41:50 +00003236static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3237{
3238 struct cnic_local *cp = dev->cnic_priv;
3239 struct cnic_ulp_ops *ulp_ops;
3240 int rc;
3241
3242 mutex_lock(&cnic_lock);
Michael Chanf7bd12d2014-03-17 19:19:06 -08003243 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3244 lockdep_is_held(&cnic_lock));
Barak Witkowski1d187b32011-12-05 22:41:50 +00003245 if (ulp_ops && ulp_ops->cnic_get_stats)
3246 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3247 else
3248 rc = -ENODEV;
3249 mutex_unlock(&cnic_lock);
3250 return rc;
3251}
3252
Michael Chana4636962009-06-08 18:14:43 -07003253static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3254{
3255 struct cnic_dev *dev = data;
Barak Witkowski1d187b32011-12-05 22:41:50 +00003256 int ulp_type = CNIC_ULP_ISCSI;
Michael Chana4636962009-06-08 18:14:43 -07003257
3258 switch (info->cmd) {
3259 case CNIC_CTL_STOP_CMD:
3260 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003261
3262 cnic_ulp_stop(dev);
3263 cnic_stop_hw(dev);
3264
Michael Chana4636962009-06-08 18:14:43 -07003265 cnic_put(dev);
3266 break;
3267 case CNIC_CTL_START_CMD:
3268 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003269
3270 if (!cnic_start_hw(dev))
3271 cnic_ulp_start(dev);
3272
Michael Chana4636962009-06-08 18:14:43 -07003273 cnic_put(dev);
3274 break;
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003275 case CNIC_CTL_STOP_ISCSI_CMD: {
3276 struct cnic_local *cp = dev->cnic_priv;
3277 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3278 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3279 break;
3280 }
Michael Chan71034ba2009-10-10 13:46:59 +00003281 case CNIC_CTL_COMPLETION_CMD: {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003282 struct cnic_ctl_completion *comp = &info->data.comp;
3283 u32 cid = BNX2X_SW_CID(comp->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00003284 u32 l5_cid;
3285 struct cnic_local *cp = dev->cnic_priv;
3286
Michael Chana2028b232012-06-27 15:08:19 +00003287 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3288 break;
3289
Michael Chan71034ba2009-10-10 13:46:59 +00003290 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3291 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3292
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003293 if (unlikely(comp->error)) {
3294 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3295 netdev_err(dev->netdev,
3296 "CID %x CFC delete comp error %x\n",
3297 cid, comp->error);
3298 }
3299
Michael Chan71034ba2009-10-10 13:46:59 +00003300 ctx->wait_cond = 1;
3301 wake_up(&ctx->waitq);
3302 }
3303 break;
3304 }
Barak Witkowski1d187b32011-12-05 22:41:50 +00003305 case CNIC_CTL_FCOE_STATS_GET_CMD:
3306 ulp_type = CNIC_ULP_FCOE;
3307 /* fall through */
3308 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3309 cnic_hold(dev);
3310 cnic_copy_ulp_stats(dev, ulp_type);
3311 cnic_put(dev);
3312 break;
3313
Michael Chana4636962009-06-08 18:14:43 -07003314 default:
3315 return -EINVAL;
3316 }
3317 return 0;
3318}
3319
3320static void cnic_ulp_init(struct cnic_dev *dev)
3321{
3322 int i;
3323 struct cnic_local *cp = dev->cnic_priv;
3324
Michael Chana4636962009-06-08 18:14:43 -07003325 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3326 struct cnic_ulp_ops *ulp_ops;
3327
Michael Chan7fc1ece2009-08-14 15:49:47 +00003328 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003329 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003330 if (!ulp_ops || !ulp_ops->cnic_init) {
3331 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003332 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003333 }
3334 ulp_get(ulp_ops);
3335 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003336
3337 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3338 ulp_ops->cnic_init(dev);
3339
Michael Chan7fc1ece2009-08-14 15:49:47 +00003340 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003341 }
Michael Chana4636962009-06-08 18:14:43 -07003342}
3343
3344static void cnic_ulp_exit(struct cnic_dev *dev)
3345{
3346 int i;
3347 struct cnic_local *cp = dev->cnic_priv;
3348
Michael Chana4636962009-06-08 18:14:43 -07003349 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3350 struct cnic_ulp_ops *ulp_ops;
3351
Michael Chan7fc1ece2009-08-14 15:49:47 +00003352 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003353 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003354 if (!ulp_ops || !ulp_ops->cnic_exit) {
3355 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003356 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003357 }
3358 ulp_get(ulp_ops);
3359 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003360
3361 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3362 ulp_ops->cnic_exit(dev);
3363
Michael Chan7fc1ece2009-08-14 15:49:47 +00003364 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003365 }
Michael Chana4636962009-06-08 18:14:43 -07003366}
3367
3368static int cnic_cm_offload_pg(struct cnic_sock *csk)
3369{
3370 struct cnic_dev *dev = csk->dev;
3371 struct l4_kwq_offload_pg *l4kwqe;
3372 struct kwqe *wqes[1];
3373
3374 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3375 memset(l4kwqe, 0, sizeof(*l4kwqe));
3376 wqes[0] = (struct kwqe *) l4kwqe;
3377
3378 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3379 l4kwqe->flags =
3380 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3381 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3382
3383 l4kwqe->da0 = csk->ha[0];
3384 l4kwqe->da1 = csk->ha[1];
3385 l4kwqe->da2 = csk->ha[2];
3386 l4kwqe->da3 = csk->ha[3];
3387 l4kwqe->da4 = csk->ha[4];
3388 l4kwqe->da5 = csk->ha[5];
3389
3390 l4kwqe->sa0 = dev->mac_addr[0];
3391 l4kwqe->sa1 = dev->mac_addr[1];
3392 l4kwqe->sa2 = dev->mac_addr[2];
3393 l4kwqe->sa3 = dev->mac_addr[3];
3394 l4kwqe->sa4 = dev->mac_addr[4];
3395 l4kwqe->sa5 = dev->mac_addr[5];
3396
3397 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00003398 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07003399 l4kwqe->host_opaque = csk->l5_cid;
3400
3401 if (csk->vlan_id) {
3402 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3403 l4kwqe->vlan_tag = csk->vlan_id;
3404 l4kwqe->l2hdr_nbytes += 4;
3405 }
3406
3407 return dev->submit_kwqes(dev, wqes, 1);
3408}
3409
3410static int cnic_cm_update_pg(struct cnic_sock *csk)
3411{
3412 struct cnic_dev *dev = csk->dev;
3413 struct l4_kwq_update_pg *l4kwqe;
3414 struct kwqe *wqes[1];
3415
3416 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3417 memset(l4kwqe, 0, sizeof(*l4kwqe));
3418 wqes[0] = (struct kwqe *) l4kwqe;
3419
3420 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3421 l4kwqe->flags =
3422 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3423 l4kwqe->pg_cid = csk->pg_cid;
3424
3425 l4kwqe->da0 = csk->ha[0];
3426 l4kwqe->da1 = csk->ha[1];
3427 l4kwqe->da2 = csk->ha[2];
3428 l4kwqe->da3 = csk->ha[3];
3429 l4kwqe->da4 = csk->ha[4];
3430 l4kwqe->da5 = csk->ha[5];
3431
3432 l4kwqe->pg_host_opaque = csk->l5_cid;
3433 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3434
3435 return dev->submit_kwqes(dev, wqes, 1);
3436}
3437
3438static int cnic_cm_upload_pg(struct cnic_sock *csk)
3439{
3440 struct cnic_dev *dev = csk->dev;
3441 struct l4_kwq_upload *l4kwqe;
3442 struct kwqe *wqes[1];
3443
3444 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3445 memset(l4kwqe, 0, sizeof(*l4kwqe));
3446 wqes[0] = (struct kwqe *) l4kwqe;
3447
3448 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3449 l4kwqe->flags =
3450 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3451 l4kwqe->cid = csk->pg_cid;
3452
3453 return dev->submit_kwqes(dev, wqes, 1);
3454}
3455
3456static int cnic_cm_conn_req(struct cnic_sock *csk)
3457{
3458 struct cnic_dev *dev = csk->dev;
3459 struct l4_kwq_connect_req1 *l4kwqe1;
3460 struct l4_kwq_connect_req2 *l4kwqe2;
3461 struct l4_kwq_connect_req3 *l4kwqe3;
3462 struct kwqe *wqes[3];
3463 u8 tcp_flags = 0;
3464 int num_wqes = 2;
3465
3466 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3467 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3468 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3469 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3470 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3471 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3472
3473 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3474 l4kwqe3->flags =
3475 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3476 l4kwqe3->ka_timeout = csk->ka_timeout;
3477 l4kwqe3->ka_interval = csk->ka_interval;
3478 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3479 l4kwqe3->tos = csk->tos;
3480 l4kwqe3->ttl = csk->ttl;
3481 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3482 l4kwqe3->pmtu = csk->mtu;
3483 l4kwqe3->rcv_buf = csk->rcv_buf;
3484 l4kwqe3->snd_buf = csk->snd_buf;
3485 l4kwqe3->seed = csk->seed;
3486
3487 wqes[0] = (struct kwqe *) l4kwqe1;
3488 if (test_bit(SK_F_IPV6, &csk->flags)) {
3489 wqes[1] = (struct kwqe *) l4kwqe2;
3490 wqes[2] = (struct kwqe *) l4kwqe3;
3491 num_wqes = 3;
3492
3493 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3494 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3495 l4kwqe2->flags =
3496 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3497 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3498 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3499 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3500 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3501 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3502 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3503 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3504 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3505 sizeof(struct tcphdr);
3506 } else {
3507 wqes[1] = (struct kwqe *) l4kwqe3;
3508 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3509 sizeof(struct tcphdr);
3510 }
3511
3512 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3513 l4kwqe1->flags =
3514 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3515 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3516 l4kwqe1->cid = csk->cid;
3517 l4kwqe1->pg_cid = csk->pg_cid;
3518 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3519 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3520 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3521 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3522 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3523 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3524 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3525 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3526 if (csk->tcp_flags & SK_TCP_NAGLE)
3527 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3528 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3529 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3530 if (csk->tcp_flags & SK_TCP_SACK)
3531 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3532 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3533 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3534
3535 l4kwqe1->tcp_flags = tcp_flags;
3536
3537 return dev->submit_kwqes(dev, wqes, num_wqes);
3538}
3539
3540static int cnic_cm_close_req(struct cnic_sock *csk)
3541{
3542 struct cnic_dev *dev = csk->dev;
3543 struct l4_kwq_close_req *l4kwqe;
3544 struct kwqe *wqes[1];
3545
3546 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3547 memset(l4kwqe, 0, sizeof(*l4kwqe));
3548 wqes[0] = (struct kwqe *) l4kwqe;
3549
3550 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3551 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3552 l4kwqe->cid = csk->cid;
3553
3554 return dev->submit_kwqes(dev, wqes, 1);
3555}
3556
3557static int cnic_cm_abort_req(struct cnic_sock *csk)
3558{
3559 struct cnic_dev *dev = csk->dev;
3560 struct l4_kwq_reset_req *l4kwqe;
3561 struct kwqe *wqes[1];
3562
3563 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3564 memset(l4kwqe, 0, sizeof(*l4kwqe));
3565 wqes[0] = (struct kwqe *) l4kwqe;
3566
3567 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3568 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3569 l4kwqe->cid = csk->cid;
3570
3571 return dev->submit_kwqes(dev, wqes, 1);
3572}
3573
3574static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3575 u32 l5_cid, struct cnic_sock **csk, void *context)
3576{
3577 struct cnic_local *cp = dev->cnic_priv;
3578 struct cnic_sock *csk1;
3579
3580 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3581 return -EINVAL;
3582
Michael Chanfdf24082010-10-13 14:06:47 +00003583 if (cp->ctx_tbl) {
3584 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3585
3586 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3587 return -EAGAIN;
3588 }
3589
Michael Chana4636962009-06-08 18:14:43 -07003590 csk1 = &cp->csk_tbl[l5_cid];
3591 if (atomic_read(&csk1->ref_count))
3592 return -EAGAIN;
3593
3594 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3595 return -EBUSY;
3596
3597 csk1->dev = dev;
3598 csk1->cid = cid;
3599 csk1->l5_cid = l5_cid;
3600 csk1->ulp_type = ulp_type;
3601 csk1->context = context;
3602
3603 csk1->ka_timeout = DEF_KA_TIMEOUT;
3604 csk1->ka_interval = DEF_KA_INTERVAL;
3605 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3606 csk1->tos = DEF_TOS;
3607 csk1->ttl = DEF_TTL;
3608 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3609 csk1->rcv_buf = DEF_RCV_BUF;
3610 csk1->snd_buf = DEF_SND_BUF;
3611 csk1->seed = DEF_SEED;
Eddie Wai6cdcdbb2013-07-28 19:03:57 -07003612 csk1->tcp_flags = 0;
Michael Chana4636962009-06-08 18:14:43 -07003613
3614 *csk = csk1;
3615 return 0;
3616}
3617
3618static void cnic_cm_cleanup(struct cnic_sock *csk)
3619{
3620 if (csk->src_port) {
3621 struct cnic_dev *dev = csk->dev;
3622 struct cnic_local *cp = dev->cnic_priv;
3623
Michael Chan9b093362010-12-23 07:42:56 +00003624 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07003625 csk->src_port = 0;
3626 }
3627}
3628
3629static void cnic_close_conn(struct cnic_sock *csk)
3630{
3631 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3632 cnic_cm_upload_pg(csk);
3633 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3634 }
3635 cnic_cm_cleanup(csk);
3636}
3637
3638static int cnic_cm_destroy(struct cnic_sock *csk)
3639{
3640 if (!cnic_in_use(csk))
3641 return -EINVAL;
3642
3643 csk_hold(csk);
3644 clear_bit(SK_F_INUSE, &csk->flags);
3645 smp_mb__after_clear_bit();
3646 while (atomic_read(&csk->ref_count) != 1)
3647 msleep(1);
3648 cnic_cm_cleanup(csk);
3649
3650 csk->flags = 0;
3651 csk_put(csk);
3652 return 0;
3653}
3654
3655static inline u16 cnic_get_vlan(struct net_device *dev,
3656 struct net_device **vlan_dev)
3657{
3658 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3659 *vlan_dev = vlan_dev_real_dev(dev);
3660 return vlan_dev_vlan_id(dev);
3661 }
3662 *vlan_dev = dev;
3663 return 0;
3664}
3665
3666static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3667 struct dst_entry **dst)
3668{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003669#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07003670 struct rtable *rt;
3671
David S. Miller78fbfd82011-03-12 00:00:52 -05003672 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3673 if (!IS_ERR(rt)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07003674 *dst = &rt->dst;
David S. Miller78fbfd82011-03-12 00:00:52 -05003675 return 0;
3676 }
3677 return PTR_ERR(rt);
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003678#else
3679 return -ENETUNREACH;
3680#endif
Michael Chana4636962009-06-08 18:14:43 -07003681}
3682
3683static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3684 struct dst_entry **dst)
3685{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003686#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
David S. Miller4c9483b2011-03-12 16:22:43 -05003687 struct flowi6 fl6;
Michael Chana4636962009-06-08 18:14:43 -07003688
David S. Miller4c9483b2011-03-12 16:22:43 -05003689 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003690 fl6.daddr = dst_addr->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -05003691 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3692 fl6.flowi6_oif = dst_addr->sin6_scope_id;
Michael Chana4636962009-06-08 18:14:43 -07003693
David S. Miller4c9483b2011-03-12 16:22:43 -05003694 *dst = ip6_route_output(&init_net, NULL, &fl6);
RongQing.Li05417432012-02-21 22:10:50 +00003695 if ((*dst)->error) {
3696 dst_release(*dst);
3697 *dst = NULL;
3698 return -ENETUNREACH;
3699 } else
Michael Chana4636962009-06-08 18:14:43 -07003700 return 0;
3701#endif
3702
3703 return -ENETUNREACH;
3704}
3705
3706static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3707 int ulp_type)
3708{
3709 struct cnic_dev *dev = NULL;
3710 struct dst_entry *dst;
3711 struct net_device *netdev = NULL;
3712 int err = -ENETUNREACH;
3713
3714 if (dst_addr->sin_family == AF_INET)
3715 err = cnic_get_v4_route(dst_addr, &dst);
3716 else if (dst_addr->sin_family == AF_INET6) {
3717 struct sockaddr_in6 *dst_addr6 =
3718 (struct sockaddr_in6 *) dst_addr;
3719
3720 err = cnic_get_v6_route(dst_addr6, &dst);
3721 } else
3722 return NULL;
3723
3724 if (err)
3725 return NULL;
3726
3727 if (!dst->dev)
3728 goto done;
3729
3730 cnic_get_vlan(dst->dev, &netdev);
3731
3732 dev = cnic_from_netdev(netdev);
3733
3734done:
3735 dst_release(dst);
3736 if (dev)
3737 cnic_put(dev);
3738 return dev;
3739}
3740
3741static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3742{
3743 struct cnic_dev *dev = csk->dev;
3744 struct cnic_local *cp = dev->cnic_priv;
3745
3746 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3747}
3748
3749static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3750{
3751 struct cnic_dev *dev = csk->dev;
3752 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003753 int is_v6, rc = 0;
3754 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003755 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003756 __be16 local_port;
3757 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003758
3759 if (saddr->local.v6.sin6_family == AF_INET6 &&
3760 saddr->remote.v6.sin6_family == AF_INET6)
3761 is_v6 = 1;
3762 else if (saddr->local.v4.sin_family == AF_INET &&
3763 saddr->remote.v4.sin_family == AF_INET)
3764 is_v6 = 0;
3765 else
3766 return -EINVAL;
3767
3768 clear_bit(SK_F_IPV6, &csk->flags);
3769
3770 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003771 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003772 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003773
3774 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3775 sizeof(struct in6_addr));
3776 csk->dst_port = saddr->remote.v6.sin6_port;
3777 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003778
3779 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003780 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003781
3782 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3783 csk->dst_port = saddr->remote.v4.sin_port;
3784 local_port = saddr->local.v4.sin_port;
3785 }
3786
Michael Chanc76284a2010-02-24 14:42:07 +00003787 csk->vlan_id = 0;
3788 csk->mtu = dev->netdev->mtu;
3789 if (dst && dst->dev) {
3790 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3791 if (realdev == dev->netdev) {
3792 csk->vlan_id = vlan;
3793 csk->mtu = dst_mtu(dst);
3794 }
3795 }
Michael Chana4636962009-06-08 18:14:43 -07003796
Michael Chan9b093362010-12-23 07:42:56 +00003797 port_id = be16_to_cpu(local_port);
3798 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3799 port_id < CNIC_LOCAL_PORT_MAX) {
3800 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3801 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003802 } else
Michael Chan9b093362010-12-23 07:42:56 +00003803 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003804
Michael Chan9b093362010-12-23 07:42:56 +00003805 if (!port_id) {
3806 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3807 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003808 rc = -ENOMEM;
3809 goto err_out;
3810 }
Michael Chan9b093362010-12-23 07:42:56 +00003811 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003812 }
3813 csk->src_port = local_port;
3814
Michael Chana4636962009-06-08 18:14:43 -07003815err_out:
3816 dst_release(dst);
3817 return rc;
3818}
3819
3820static void cnic_init_csk_state(struct cnic_sock *csk)
3821{
3822 csk->state = 0;
3823 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3824 clear_bit(SK_F_CLOSING, &csk->flags);
3825}
3826
3827static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3828{
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003829 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003830 int err = 0;
3831
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003832 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3833 return -EOPNOTSUPP;
3834
Michael Chana4636962009-06-08 18:14:43 -07003835 if (!cnic_in_use(csk))
3836 return -EINVAL;
3837
3838 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3839 return -EINVAL;
3840
3841 cnic_init_csk_state(csk);
3842
3843 err = cnic_get_route(csk, saddr);
3844 if (err)
3845 goto err_out;
3846
3847 err = cnic_resolve_addr(csk, saddr);
3848 if (!err)
3849 return 0;
3850
3851err_out:
3852 clear_bit(SK_F_CONNECT_START, &csk->flags);
3853 return err;
3854}
3855
3856static int cnic_cm_abort(struct cnic_sock *csk)
3857{
3858 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003859 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003860
3861 if (!cnic_in_use(csk))
3862 return -EINVAL;
3863
3864 if (cnic_abort_prep(csk))
3865 return cnic_cm_abort_req(csk);
3866
3867 /* Getting here means that we haven't started connect, or
Eddie Wai0d650ec2012-12-05 10:10:15 +00003868 * connect was not successful, or it has been reset by the target.
Michael Chana4636962009-06-08 18:14:43 -07003869 */
3870
Michael Chana4636962009-06-08 18:14:43 -07003871 cp->close_conn(csk, opcode);
Eddie Wai0d650ec2012-12-05 10:10:15 +00003872 if (csk->state != opcode) {
3873 /* Wait for remote reset sequence to complete */
3874 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3875 msleep(1);
3876
Michael Chan7b34a462010-06-15 08:57:03 +00003877 return -EALREADY;
Eddie Wai0d650ec2012-12-05 10:10:15 +00003878 }
Michael Chana4636962009-06-08 18:14:43 -07003879
3880 return 0;
3881}
3882
3883static int cnic_cm_close(struct cnic_sock *csk)
3884{
3885 if (!cnic_in_use(csk))
3886 return -EINVAL;
3887
3888 if (cnic_close_prep(csk)) {
3889 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3890 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003891 } else {
Eddie Wai0d650ec2012-12-05 10:10:15 +00003892 /* Wait for remote reset sequence to complete */
3893 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3894 msleep(1);
3895
Michael Chaned99daa52010-06-15 08:57:00 +00003896 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003897 }
3898 return 0;
3899}
3900
3901static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3902 u8 opcode)
3903{
3904 struct cnic_ulp_ops *ulp_ops;
3905 int ulp_type = csk->ulp_type;
3906
3907 rcu_read_lock();
3908 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3909 if (ulp_ops) {
3910 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3911 ulp_ops->cm_connect_complete(csk);
3912 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3913 ulp_ops->cm_close_complete(csk);
3914 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3915 ulp_ops->cm_remote_abort(csk);
3916 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3917 ulp_ops->cm_abort_complete(csk);
3918 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3919 ulp_ops->cm_remote_close(csk);
3920 }
3921 rcu_read_unlock();
3922}
3923
3924static int cnic_cm_set_pg(struct cnic_sock *csk)
3925{
3926 if (cnic_offld_prep(csk)) {
3927 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3928 cnic_cm_update_pg(csk);
3929 else
3930 cnic_cm_offload_pg(csk);
3931 }
3932 return 0;
3933}
3934
3935static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3936{
3937 struct cnic_local *cp = dev->cnic_priv;
3938 u32 l5_cid = kcqe->pg_host_opaque;
3939 u8 opcode = kcqe->op_code;
3940 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3941
3942 csk_hold(csk);
3943 if (!cnic_in_use(csk))
3944 goto done;
3945
3946 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3947 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3948 goto done;
3949 }
Eddie Waia9736c02010-02-24 14:42:04 +00003950 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3951 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3952 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3953 cnic_cm_upcall(cp, csk,
3954 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3955 goto done;
3956 }
3957
Michael Chana4636962009-06-08 18:14:43 -07003958 csk->pg_cid = kcqe->pg_cid;
3959 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3960 cnic_cm_conn_req(csk);
3961
3962done:
3963 csk_put(csk);
3964}
3965
Michael Chane1928c82010-12-23 07:43:04 +00003966static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3967{
3968 struct cnic_local *cp = dev->cnic_priv;
3969 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3970 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3971 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3972
3973 ctx->timestamp = jiffies;
3974 ctx->wait_cond = 1;
3975 wake_up(&ctx->waitq);
3976}
3977
Michael Chana4636962009-06-08 18:14:43 -07003978static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3979{
3980 struct cnic_local *cp = dev->cnic_priv;
3981 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3982 u8 opcode = l4kcqe->op_code;
3983 u32 l5_cid;
3984 struct cnic_sock *csk;
3985
Michael Chane1928c82010-12-23 07:43:04 +00003986 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3987 cnic_process_fcoe_term_conn(dev, kcqe);
3988 return;
3989 }
Michael Chana4636962009-06-08 18:14:43 -07003990 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3991 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3992 cnic_cm_process_offld_pg(dev, l4kcqe);
3993 return;
3994 }
3995
3996 l5_cid = l4kcqe->conn_id;
3997 if (opcode & 0x80)
3998 l5_cid = l4kcqe->cid;
3999 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4000 return;
4001
4002 csk = &cp->csk_tbl[l5_cid];
4003 csk_hold(csk);
4004
4005 if (!cnic_in_use(csk)) {
4006 csk_put(csk);
4007 return;
4008 }
4009
4010 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00004011 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4012 if (l4kcqe->status != 0) {
4013 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4014 cnic_cm_upcall(cp, csk,
4015 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4016 }
4017 break;
Michael Chana4636962009-06-08 18:14:43 -07004018 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4019 if (l4kcqe->status == 0)
4020 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
Michael Chan8ec3e702012-03-21 15:38:34 +00004021 else if (l4kcqe->status ==
4022 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Jeffrey Huang0cb1f4b2012-02-08 17:33:56 +00004023 set_bit(SK_F_HW_ERR, &csk->flags);
Michael Chana4636962009-06-08 18:14:43 -07004024
4025 smp_mb__before_clear_bit();
4026 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4027 cnic_cm_upcall(cp, csk, opcode);
4028 break;
4029
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004030 case L5CM_RAMROD_CMD_ID_CLOSE: {
4031 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4032
4033 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4034 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4035 l4kcqe->status, l5kcqe->completion_status);
Eddie Wai7bc910f2012-06-27 15:08:22 +00004036 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4037 /* Fall through */
4038 } else {
4039 break;
4040 }
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004041 }
Michael Chana4636962009-06-08 18:14:43 -07004042 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07004043 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4044 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00004045 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4046 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chan8ec3e702012-03-21 15:38:34 +00004047 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Michael Chan23021c22012-01-04 12:12:28 +00004048 set_bit(SK_F_HW_ERR, &csk->flags);
4049
Michael Chana4636962009-06-08 18:14:43 -07004050 cp->close_conn(csk, opcode);
4051 break;
4052
4053 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
Michael Chan101c40c2011-06-08 19:29:33 +00004054 /* after we already sent CLOSE_REQ */
4055 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4056 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4057 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4058 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4059 else
4060 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004061 break;
4062 }
4063 csk_put(csk);
4064}
4065
4066static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4067{
4068 struct cnic_dev *dev = data;
4069 int i;
4070
4071 for (i = 0; i < num; i++)
4072 cnic_cm_process_kcqe(dev, kcqe[i]);
4073}
4074
4075static struct cnic_ulp_ops cm_ulp_ops = {
4076 .indicate_kcqes = cnic_cm_indicate_kcqe,
4077};
4078
4079static void cnic_cm_free_mem(struct cnic_dev *dev)
4080{
4081 struct cnic_local *cp = dev->cnic_priv;
4082
4083 kfree(cp->csk_tbl);
4084 cp->csk_tbl = NULL;
4085 cnic_free_id_tbl(&cp->csk_port_tbl);
4086}
4087
4088static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4089{
4090 struct cnic_local *cp = dev->cnic_priv;
Eddie Wai11f23aa2011-06-08 19:29:34 +00004091 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07004092
4093 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4094 GFP_KERNEL);
4095 if (!cp->csk_tbl)
4096 return -ENOMEM;
4097
Akinobu Mitae00adf32013-05-07 16:18:15 -07004098 port_id = prandom_u32();
Eddie Wai11f23aa2011-06-08 19:29:34 +00004099 port_id %= CNIC_LOCAL_PORT_RANGE;
Michael Chana4636962009-06-08 18:14:43 -07004100 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
Eddie Wai11f23aa2011-06-08 19:29:34 +00004101 CNIC_LOCAL_PORT_MIN, port_id)) {
Michael Chana4636962009-06-08 18:14:43 -07004102 cnic_cm_free_mem(dev);
4103 return -ENOMEM;
4104 }
4105 return 0;
4106}
4107
4108static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4109{
Michael Chan943189f2010-06-15 08:57:02 +00004110 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4111 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4112 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4113 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00004114 }
Michael Chan943189f2010-06-15 08:57:02 +00004115
4116 /* 1. If event opcode matches the expected event in csk->state
Michael Chan101c40c2011-06-08 19:29:33 +00004117 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4118 * event
Michael Chan7b34a462010-06-15 08:57:03 +00004119 * 3. If the expected event is 0, meaning the connection was never
4120 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00004121 */
Michael Chan7b34a462010-06-15 08:57:03 +00004122 if (opcode == csk->state || csk->state == 0 ||
Michael Chan101c40c2011-06-08 19:29:33 +00004123 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4124 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
Michael Chan7b34a462010-06-15 08:57:03 +00004125 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4126 if (csk->state == 0)
4127 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07004128 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00004129 }
Michael Chana4636962009-06-08 18:14:43 -07004130 }
4131 return 0;
4132}
4133
4134static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4135{
4136 struct cnic_dev *dev = csk->dev;
4137 struct cnic_local *cp = dev->cnic_priv;
4138
Michael Chana1e621b2010-06-15 08:57:01 +00004139 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4140 cnic_cm_upcall(cp, csk, opcode);
4141 return;
4142 }
4143
Michael Chana4636962009-06-08 18:14:43 -07004144 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00004145 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00004146 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00004147 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004148}
4149
4150static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4151{
4152}
4153
4154static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4155{
4156 u32 seed;
4157
Akinobu Mitae00adf32013-05-07 16:18:15 -07004158 seed = prandom_u32();
Michael Chana4636962009-06-08 18:14:43 -07004159 cnic_ctx_wr(dev, 45, 0, seed);
4160 return 0;
4161}
4162
Michael Chan71034ba2009-10-10 13:46:59 +00004163static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4164{
4165 struct cnic_dev *dev = csk->dev;
4166 struct cnic_local *cp = dev->cnic_priv;
4167 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4168 union l5cm_specific_data l5_data;
4169 u32 cmd = 0;
4170 int close_complete = 0;
4171
4172 switch (opcode) {
4173 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4174 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4175 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00004176 if (cnic_ready_to_close(csk, opcode)) {
Michael Chan23021c22012-01-04 12:12:28 +00004177 if (test_bit(SK_F_HW_ERR, &csk->flags))
4178 close_complete = 1;
4179 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
Michael Chan7b34a462010-06-15 08:57:03 +00004180 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4181 else
4182 close_complete = 1;
4183 }
Michael Chan71034ba2009-10-10 13:46:59 +00004184 break;
4185 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4186 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4187 break;
4188 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4189 close_complete = 1;
4190 break;
4191 }
4192 if (cmd) {
4193 memset(&l5_data, 0, sizeof(l5_data));
4194
4195 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4196 &l5_data);
4197 } else if (close_complete) {
4198 ctx->timestamp = jiffies;
4199 cnic_close_conn(csk);
4200 cnic_cm_upcall(cp, csk, csk->state);
4201 }
4202}
4203
4204static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4205{
Michael Chanfdf24082010-10-13 14:06:47 +00004206 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00004207
4208 if (!cp->ctx_tbl)
4209 return;
4210
4211 if (!netif_running(dev->netdev))
4212 return;
4213
Michael Chan74e49bb2011-07-20 14:55:23 +00004214 cnic_bnx2x_delete_wait(dev, 0);
Michael Chanfdf24082010-10-13 14:06:47 +00004215
4216 cancel_delayed_work(&cp->delete_task);
4217 flush_workqueue(cnic_wq);
4218
4219 if (atomic_read(&cp->iscsi_conn) != 0)
4220 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4221 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00004222}
4223
4224static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4225{
Michael Chan68c64d22012-12-06 10:33:11 +00004226 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07004227 u32 pfid = bp->pfid;
Michael Chan5bf945a2013-09-02 11:42:30 -07004228 u32 port = BP_PORT(bp);
Michael Chan71034ba2009-10-10 13:46:59 +00004229
4230 cnic_init_bnx2x_mac(dev);
Eddie Waib3bd2d62013-07-28 19:03:58 -07004231 cnic_bnx2x_set_tcp_options(dev, 0, 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004232
4233 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004234 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004235
4236 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004237 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004238 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004239 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00004240 DEF_MAX_DA_COUNT);
4241
4242 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004243 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00004244 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004245 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00004246 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004247 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00004248 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004249 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00004250
Michael Chan14203982010-10-06 03:16:06 +00004251 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004252 DEF_MAX_CWND);
4253 return 0;
4254}
4255
Michael Chanfdf24082010-10-13 14:06:47 +00004256static void cnic_delete_task(struct work_struct *work)
4257{
4258 struct cnic_local *cp;
4259 struct cnic_dev *dev;
4260 u32 i;
4261 int need_resched = 0;
4262
4263 cp = container_of(work, struct cnic_local, delete_task.work);
4264 dev = cp->dev;
4265
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004266 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4267 struct drv_ctl_info info;
4268
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004269 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004270
4271 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4272 cp->ethdev->drv_ctl(dev->netdev, &info);
4273 }
4274
Michael Chanfdf24082010-10-13 14:06:47 +00004275 for (i = 0; i < cp->max_cid_space; i++) {
4276 struct cnic_context *ctx = &cp->ctx_tbl[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004277 int err;
Michael Chanfdf24082010-10-13 14:06:47 +00004278
4279 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4280 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4281 continue;
4282
4283 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4284 need_resched = 1;
4285 continue;
4286 }
4287
4288 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4289 continue;
4290
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004291 err = cnic_bnx2x_destroy_ramrod(dev, i);
Michael Chanfdf24082010-10-13 14:06:47 +00004292
4293 cnic_free_bnx2x_conn_resc(dev, i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004294 if (!err) {
4295 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4296 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00004297
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004298 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4299 }
Michael Chanfdf24082010-10-13 14:06:47 +00004300 }
4301
4302 if (need_resched)
4303 queue_delayed_work(cnic_wq, &cp->delete_task,
4304 msecs_to_jiffies(10));
4305
4306}
4307
Michael Chana4636962009-06-08 18:14:43 -07004308static int cnic_cm_open(struct cnic_dev *dev)
4309{
4310 struct cnic_local *cp = dev->cnic_priv;
4311 int err;
4312
4313 err = cnic_cm_alloc_mem(dev);
4314 if (err)
4315 return err;
4316
4317 err = cp->start_cm(dev);
4318
4319 if (err)
4320 goto err_out;
4321
Michael Chanfdf24082010-10-13 14:06:47 +00004322 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4323
Michael Chana4636962009-06-08 18:14:43 -07004324 dev->cm_create = cnic_cm_create;
4325 dev->cm_destroy = cnic_cm_destroy;
4326 dev->cm_connect = cnic_cm_connect;
4327 dev->cm_abort = cnic_cm_abort;
4328 dev->cm_close = cnic_cm_close;
4329 dev->cm_select_dev = cnic_cm_select_dev;
4330
4331 cp->ulp_handle[CNIC_ULP_L4] = dev;
4332 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4333 return 0;
4334
4335err_out:
4336 cnic_cm_free_mem(dev);
4337 return err;
4338}
4339
4340static int cnic_cm_shutdown(struct cnic_dev *dev)
4341{
4342 struct cnic_local *cp = dev->cnic_priv;
4343 int i;
4344
Michael Chana4636962009-06-08 18:14:43 -07004345 if (!cp->csk_tbl)
4346 return 0;
4347
4348 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4349 struct cnic_sock *csk = &cp->csk_tbl[i];
4350
4351 clear_bit(SK_F_INUSE, &csk->flags);
4352 cnic_cm_cleanup(csk);
4353 }
4354 cnic_cm_free_mem(dev);
4355
4356 return 0;
4357}
4358
4359static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4360{
Michael Chana4636962009-06-08 18:14:43 -07004361 u32 cid_addr;
4362 int i;
4363
Michael Chana4636962009-06-08 18:14:43 -07004364 cid_addr = GET_CID_ADDR(cid);
4365
4366 for (i = 0; i < CTX_SIZE; i += 4)
4367 cnic_ctx_wr(dev, cid_addr, i, 0);
4368}
4369
4370static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4371{
4372 struct cnic_local *cp = dev->cnic_priv;
4373 int ret = 0, i;
4374 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4375
Michael Chan4ce45e02012-12-06 10:33:10 +00004376 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004377 return 0;
4378
4379 for (i = 0; i < cp->ctx_blks; i++) {
4380 int j;
4381 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4382 u32 val;
4383
Michael Chanbe1fefc2014-03-17 19:19:07 -08004384 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07004385
4386 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4387 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4388 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4389 (u64) cp->ctx_arr[i].mapping >> 32);
4390 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4391 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4392 for (j = 0; j < 10; j++) {
4393
4394 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4395 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4396 break;
4397 udelay(5);
4398 }
4399 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4400 ret = -EBUSY;
4401 break;
4402 }
4403 }
4404 return ret;
4405}
4406
4407static void cnic_free_irq(struct cnic_dev *dev)
4408{
4409 struct cnic_local *cp = dev->cnic_priv;
4410 struct cnic_eth_dev *ethdev = cp->ethdev;
4411
4412 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4413 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004414 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07004415 free_irq(ethdev->irq_arr[0].vector, dev);
4416 }
4417}
4418
Michael Chan6e0dc642010-10-13 14:06:44 +00004419static int cnic_request_irq(struct cnic_dev *dev)
4420{
4421 struct cnic_local *cp = dev->cnic_priv;
4422 struct cnic_eth_dev *ethdev = cp->ethdev;
4423 int err;
4424
4425 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4426 if (err)
4427 tasklet_disable(&cp->cnic_irq_task);
4428
4429 return err;
4430}
4431
Michael Chana4636962009-06-08 18:14:43 -07004432static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4433{
4434 struct cnic_local *cp = dev->cnic_priv;
4435 struct cnic_eth_dev *ethdev = cp->ethdev;
4436
4437 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4438 int err, i = 0;
4439 int sblk_num = cp->status_blk_num;
4440 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4441 BNX2_HC_SB_CONFIG_1;
4442
4443 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4444
4445 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4446 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4447 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4448
Michael Chana4dde3a2010-02-24 14:42:08 +00004449 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00004450 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07004451 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004452 err = cnic_request_irq(dev);
4453 if (err)
Michael Chana4636962009-06-08 18:14:43 -07004454 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00004455
Michael Chana4dde3a2010-02-24 14:42:08 +00004456 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07004457 i < 10) {
4458 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4459 1 << (11 + sblk_num));
4460 udelay(10);
4461 i++;
4462 barrier();
4463 }
Michael Chana4dde3a2010-02-24 14:42:08 +00004464 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07004465 cnic_free_irq(dev);
4466 goto failed;
4467 }
4468
4469 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00004470 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004471 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4472 int i = 0;
4473
4474 while (sblk->status_completion_producer_index && i < 10) {
4475 CNIC_WR(dev, BNX2_HC_COMMAND,
4476 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4477 udelay(10);
4478 i++;
4479 barrier();
4480 }
4481 if (sblk->status_completion_producer_index)
4482 goto failed;
4483
4484 }
4485 return 0;
4486
4487failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00004488 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07004489 return -EBUSY;
4490}
4491
4492static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4493{
4494 struct cnic_local *cp = dev->cnic_priv;
4495 struct cnic_eth_dev *ethdev = cp->ethdev;
4496
4497 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4498 return;
4499
4500 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4502}
4503
4504static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4505{
4506 struct cnic_local *cp = dev->cnic_priv;
4507 struct cnic_eth_dev *ethdev = cp->ethdev;
4508
4509 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4510 return;
4511
4512 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4513 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4514 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4515 synchronize_irq(ethdev->irq_arr[0].vector);
4516}
4517
4518static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4519{
4520 struct cnic_local *cp = dev->cnic_priv;
4521 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004522 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004523 u32 cid_addr, tx_cid, sb_id;
4524 u32 val, offset0, offset1, offset2, offset3;
4525 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004526 struct bnx2_tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00004527 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00004528 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004529
4530 sb_id = cp->status_blk_num;
4531 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07004532 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4533 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004534 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004535
4536 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07004537 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4538 (TX_TSS_CID << 7));
4539 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4540 }
4541 cp->tx_cons = *cp->tx_cons_ptr;
4542
4543 cid_addr = GET_CID_ADDR(tx_cid);
Michael Chan4ce45e02012-12-06 10:33:10 +00004544 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -07004545 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4546
4547 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4548 cnic_ctx_wr(dev, cid_addr2, i, 0);
4549
4550 offset0 = BNX2_L2CTX_TYPE_XI;
4551 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4552 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4553 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4554 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07004555 cnic_init_context(dev, tx_cid);
4556 cnic_init_context(dev, tx_cid + 1);
4557
Michael Chana4636962009-06-08 18:14:43 -07004558 offset0 = BNX2_L2CTX_TYPE;
4559 offset1 = BNX2_L2CTX_CMD_TYPE;
4560 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4561 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4562 }
4563 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4564 cnic_ctx_wr(dev, cid_addr, offset0, val);
4565
4566 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4567 cnic_ctx_wr(dev, cid_addr, offset1, val);
4568
Joe Perches43d620c2011-06-16 19:08:06 +00004569 txbd = udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07004570
Michael Chancd801532010-10-13 14:06:49 +00004571 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004572 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004573 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4574 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4575 }
Michael Chancd801532010-10-13 14:06:49 +00004576 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004577 cnic_ctx_wr(dev, cid_addr, offset2, val);
4578 txbd->tx_bd_haddr_hi = val;
4579
Michael Chancd801532010-10-13 14:06:49 +00004580 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004581 cnic_ctx_wr(dev, cid_addr, offset3, val);
4582 txbd->tx_bd_haddr_lo = val;
4583}
4584
4585static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4586{
4587 struct cnic_local *cp = dev->cnic_priv;
4588 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004589 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004590 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4591 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004592 struct bnx2_rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00004593 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00004594 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07004595
4596 sb_id = cp->status_blk_num;
4597 cnic_init_context(dev, 2);
4598 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4599 coal_reg = BNX2_HC_COMMAND;
4600 coal_val = CNIC_RD(dev, coal_reg);
4601 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004602 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004603
4604 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4605 coal_reg = BNX2_HC_COALESCE_NOW;
4606 coal_val = 1 << (11 + sb_id);
4607 }
4608 i = 0;
4609 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4610 CNIC_WR(dev, coal_reg, coal_val);
4611 udelay(10);
4612 i++;
4613 barrier();
4614 }
4615 cp->rx_cons = *cp->rx_cons_ptr;
4616
4617 cid_addr = GET_CID_ADDR(2);
4618 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4619 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4620 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4621
4622 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07004623 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07004624 else
Michael Chand0549382009-10-28 03:41:59 -07004625 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004626 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4627
Michael Chanbe1fefc2014-03-17 19:19:07 -08004628 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
Michael Chan2bc40782012-12-06 10:33:09 +00004629 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004630 dma_addr_t buf_map;
4631 int n = (i % cp->l2_rx_ring_size) + 1;
4632
Michael Chancd801532010-10-13 14:06:49 +00004633 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07004634 rxbd->rx_bd_len = cp->l2_single_buf_size;
4635 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4636 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4637 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4638 }
Michael Chanbe1fefc2014-03-17 19:19:07 -08004639 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004640 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4641 rxbd->rx_bd_haddr_hi = val;
4642
Michael Chanbe1fefc2014-03-17 19:19:07 -08004643 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004644 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4645 rxbd->rx_bd_haddr_lo = val;
4646
4647 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4648 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4649}
4650
4651static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4652{
4653 struct kwqe *wqes[1], l2kwqe;
4654
4655 memset(&l2kwqe, 0, sizeof(l2kwqe));
4656 wqes[0] = &l2kwqe;
Michael Chane1928c82010-12-23 07:43:04 +00004657 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
Michael Chana4636962009-06-08 18:14:43 -07004658 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4659 KWQE_OPCODE_SHIFT) | 2;
4660 dev->submit_kwqes(dev, wqes, 1);
4661}
4662
4663static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4664{
4665 struct cnic_local *cp = dev->cnic_priv;
4666 u32 val;
4667
4668 val = cp->func << 2;
4669
4670 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4671
4672 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4673 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4674 dev->mac_addr[0] = (u8) (val >> 8);
4675 dev->mac_addr[1] = (u8) val;
4676
4677 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4678
4679 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4680 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4681 dev->mac_addr[2] = (u8) (val >> 24);
4682 dev->mac_addr[3] = (u8) (val >> 16);
4683 dev->mac_addr[4] = (u8) (val >> 8);
4684 dev->mac_addr[5] = (u8) val;
4685
4686 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4687
4688 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
Michael Chan4ce45e02012-12-06 10:33:10 +00004689 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004690 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4691
4692 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4693 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4694 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4695}
4696
4697static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4698{
4699 struct cnic_local *cp = dev->cnic_priv;
4700 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00004701 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00004702 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07004703 int err;
4704
4705 cnic_set_bnx2_mac(dev);
4706
4707 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4708 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
Michael Chanbe1fefc2014-03-17 19:19:07 -08004709 if (CNIC_PAGE_BITS > 12)
Michael Chana4636962009-06-08 18:14:43 -07004710 val |= (12 - 8) << 4;
4711 else
Michael Chanbe1fefc2014-03-17 19:19:07 -08004712 val |= (CNIC_PAGE_BITS - 8) << 4;
Michael Chana4636962009-06-08 18:14:43 -07004713
4714 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4715
4716 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4717 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4718 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4719
4720 err = cnic_setup_5709_context(dev, 1);
4721 if (err)
4722 return err;
4723
4724 cnic_init_context(dev, KWQ_CID);
4725 cnic_init_context(dev, KCQ_CID);
4726
Michael Chane6c28892010-06-24 14:58:39 +00004727 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07004728 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4729
4730 cp->max_kwq_idx = MAX_KWQ_IDX;
4731 cp->kwq_prod_idx = 0;
4732 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00004733 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07004734
Michael Chan4ce45e02012-12-06 10:33:10 +00004735 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
Michael Chana4636962009-06-08 18:14:43 -07004736 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4737 else
4738 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4739
4740 /* Initialize the kernel work queue context. */
4741 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chanbe1fefc2014-03-17 19:19:07 -08004742 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004743 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004744
Michael Chanbe1fefc2014-03-17 19:19:07 -08004745 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004746 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004747
Michael Chanbe1fefc2014-03-17 19:19:07 -08004748 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004749 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004750
4751 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00004752 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004753
4754 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00004755 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004756
Michael Chane6c28892010-06-24 14:58:39 +00004757 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4758 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07004759
Michael Chane6c28892010-06-24 14:58:39 +00004760 cp->kcq1.sw_prod_idx = 0;
4761 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004762 &sblk->status_completion_producer_index;
Michael Chane6c28892010-06-24 14:58:39 +00004763
Joe Perches64699332012-06-04 12:44:16 +00004764 cp->kcq1.status_idx_ptr = &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004765
4766 /* Initialize the kernel complete queue context. */
4767 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chanbe1fefc2014-03-17 19:19:07 -08004768 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004769 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004770
Michael Chanbe1fefc2014-03-17 19:19:07 -08004771 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004772 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004773
Michael Chanbe1fefc2014-03-17 19:19:07 -08004774 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004775 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004776
Michael Chane6c28892010-06-24 14:58:39 +00004777 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4778 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004779
Michael Chane6c28892010-06-24 14:58:39 +00004780 val = (u32) cp->kcq1.dma.pgtbl_map;
4781 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004782
4783 cp->int_num = 0;
4784 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004785 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004786 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004787 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004788
Michael Chane6c28892010-06-24 14:58:39 +00004789 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004790 &msblk->status_completion_producer_index;
4791 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4792 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004793 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004794 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4795 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004796 }
4797
4798 /* Enable Commnad Scheduler notification when we write to the
4799 * host producer index of the kernel contexts. */
4800 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4801
4802 /* Enable Command Scheduler notification when we write to either
4803 * the Send Queue or Receive Queue producer indexes of the kernel
4804 * bypass contexts. */
4805 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4806 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4807
4808 /* Notify COM when the driver post an application buffer. */
4809 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4810
4811 /* Set the CP and COM doorbells. These two processors polls the
4812 * doorbell for a non zero value before running. This must be done
4813 * after setting up the kernel queue contexts. */
4814 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4815 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4816
4817 cnic_init_bnx2_tx_ring(dev);
4818 cnic_init_bnx2_rx_ring(dev);
4819
4820 err = cnic_init_bnx2_irq(dev);
4821 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004822 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004823 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4824 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4825 return err;
4826 }
4827
Michael Chanad9b4352013-01-23 03:21:52 +00004828 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4829
Michael Chana4636962009-06-08 18:14:43 -07004830 return 0;
4831}
4832
Michael Chan71034ba2009-10-10 13:46:59 +00004833static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4834{
4835 struct cnic_local *cp = dev->cnic_priv;
4836 struct cnic_eth_dev *ethdev = cp->ethdev;
4837 u32 start_offset = ethdev->ctx_tbl_offset;
4838 int i;
4839
4840 for (i = 0; i < cp->ctx_blks; i++) {
4841 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4842 dma_addr_t map = ctx->mapping;
4843
4844 if (cp->ctx_align) {
4845 unsigned long mask = cp->ctx_align - 1;
4846
4847 map = (map + mask) & ~mask;
4848 }
4849
4850 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4851 }
4852}
4853
4854static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4855{
4856 struct cnic_local *cp = dev->cnic_priv;
4857 struct cnic_eth_dev *ethdev = cp->ethdev;
4858 int err = 0;
4859
Joe Perches164165d2009-11-19 09:30:10 +00004860 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004861 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004862 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4863 err = cnic_request_irq(dev);
4864
Michael Chan71034ba2009-10-10 13:46:59 +00004865 return err;
4866}
4867
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004868static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4869 u16 sb_id, u8 sb_index,
4870 u8 disable)
4871{
Michael Chan68c64d22012-12-06 10:33:11 +00004872 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004873
4874 u32 addr = BAR_CSTRORM_INTMEM +
4875 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4876 offsetof(struct hc_status_block_data_e1x, index_data) +
4877 sizeof(struct hc_index_data)*sb_index +
4878 offsetof(struct hc_index_data, flags);
4879 u16 flags = CNIC_RD16(dev, addr);
4880 /* clear and set */
4881 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4882 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4883 HC_INDEX_DATA_HC_ENABLED);
4884 CNIC_WR16(dev, addr, flags);
4885}
4886
Michael Chan71034ba2009-10-10 13:46:59 +00004887static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4888{
4889 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004890 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00004891 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004892
4893 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004894 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4895 offsetof(struct hc_status_block_data_e1x, index_data) +
4896 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004897 offsetof(struct hc_index_data, timeout), 64 / 4);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004898 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004899}
4900
4901static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4902{
4903}
4904
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004905static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4906 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004907{
4908 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004909 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004910 struct cnic_uio_dev *udev = cp->udev;
4911 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4912 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004913 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004914 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004915 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004916 u32 val;
4917
Michael Chanbe1fefc2014-03-17 19:19:07 -08004918 memset(txbd, 0, CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004919
Michael Chancd801532010-10-13 14:06:49 +00004920 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004921 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
Michael Chan71034ba2009-10-10 13:46:59 +00004922 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004923 struct eth_tx_parse_bd_e1x *pbd_e1x =
4924 &((txbd + 1)->parse_bd_e1x);
4925 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
Michael Chan71034ba2009-10-10 13:46:59 +00004926 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4927
4928 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4929 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4930 reg_bd->addr_hi = start_bd->addr_hi;
4931 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4932 start_bd->nbytes = cpu_to_le16(0x10);
4933 start_bd->nbd = cpu_to_le16(3);
4934 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004935 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
Michael Chan71034ba2009-10-10 13:46:59 +00004936 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4937
Michael Chan104a43e2013-09-02 11:42:28 -07004938 if (BNX2X_CHIP_IS_E2_PLUS(bp))
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004939 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
Michael Chan4ce45e02012-12-06 10:33:10 +00004940 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004941 else
Michael Chan4ce45e02012-12-06 10:33:10 +00004942 pbd_e1x->global_data = (UNICAST_ADDRESS <<
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004943 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00004944 }
Michael Chan71034ba2009-10-10 13:46:59 +00004945
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004946 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004947 txbd->next_bd.addr_hi = cpu_to_le32(val);
4948
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004949 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004950
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004951 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004952 txbd->next_bd.addr_lo = cpu_to_le32(val);
4953
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004954 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004955
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004956 /* Other ramrod params */
4957 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4958 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004959
4960 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004961 if (cli < MAX_STAT_COUNTER_ID) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004962 data->general.statistics_zero_flg = 1;
4963 data->general.statistics_en_flg = 1;
4964 data->general.statistics_counter_id = cli;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004965 }
Michael Chan71034ba2009-10-10 13:46:59 +00004966
4967 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004968 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004969}
4970
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004971static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4972 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004973{
4974 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004975 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004976 struct cnic_uio_dev *udev = cp->udev;
4977 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chanbe1fefc2014-03-17 19:19:07 -08004978 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004979 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chanbe1fefc2014-03-17 19:19:07 -08004980 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004981 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004982 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004983 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan104a43e2013-09-02 11:42:28 -07004984 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004985 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004986 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004987
4988 /* General data */
4989 data->general.client_id = cli;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004990 data->general.activate_flg = 1;
4991 data->general.sp_client_id = cli;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004992 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
Michael Chana5b3c4a2013-09-02 11:42:31 -07004993 data->general.func_id = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004994
4995 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4996 dma_addr_t buf_map;
4997 int n = (i % cp->l2_rx_ring_size) + 1;
4998
Michael Chancd801532010-10-13 14:06:49 +00004999 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00005000 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5001 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5002 }
Michael Chan71034ba2009-10-10 13:46:59 +00005003
Michael Chanbe1fefc2014-03-17 19:19:07 -08005004 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005005 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005006 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005007
Michael Chanbe1fefc2014-03-17 19:19:07 -08005008 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005009 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005010 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005011
5012 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Michael Chanbe1fefc2014-03-17 19:19:07 -08005013 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005014 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005015 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005016
Michael Chanbe1fefc2014-03-17 19:19:07 -08005017 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005018 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005019 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005020
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005021 /* Other ramrod params */
5022 data->rx.client_qzone_id = cl_qzone_id;
5023 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5024 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00005025
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005026 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00005027
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005028 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005029 data->rx.outer_vlan_removal_enable_flg = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005030 data->rx.silent_vlan_removal_flg = 1;
5031 data->rx.silent_vlan_value = 0;
5032 data->rx.silent_vlan_mask = 0xffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005033
5034 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005035 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00005036 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005037}
5038
Michael Chane21ba412010-12-23 07:43:03 +00005039static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5040{
5041 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005042 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07005043 u32 pfid = bp->pfid;
Michael Chane21ba412010-12-23 07:43:03 +00005044
5045 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5046 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5047 cp->kcq1.sw_prod_idx = 0;
5048
Michael Chan104a43e2013-09-02 11:42:28 -07005049 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005050 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5051
5052 cp->kcq1.hw_prod_idx_ptr =
5053 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5054 cp->kcq1.status_idx_ptr =
5055 &sb->sb.running_index[SM_RX_ID];
5056 } else {
5057 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5058
5059 cp->kcq1.hw_prod_idx_ptr =
5060 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5061 cp->kcq1.status_idx_ptr =
5062 &sb->sb.running_index[SM_RX_ID];
5063 }
5064
Michael Chan104a43e2013-09-02 11:42:28 -07005065 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005066 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5067
5068 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5069 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5070 cp->kcq2.sw_prod_idx = 0;
5071 cp->kcq2.hw_prod_idx_ptr =
5072 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5073 cp->kcq2.status_idx_ptr =
5074 &sb->sb.running_index[SM_RX_ID];
5075 }
5076}
5077
Michael Chan71034ba2009-10-10 13:46:59 +00005078static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5079{
5080 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005081 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005082 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan68c64d22012-12-06 10:33:11 +00005083 int func, ret;
Michael Chan14203982010-10-06 03:16:06 +00005084 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00005085
Michael Chana9e0a4f2012-01-04 12:12:27 +00005086 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
Michael Chan68c64d22012-12-06 10:33:11 +00005087 cp->func = bp->pf_num;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005088
Michael Chan68c64d22012-12-06 10:33:11 +00005089 func = CNIC_FUNC(cp);
Michael Chana5b3c4a2013-09-02 11:42:31 -07005090 pfid = bp->pfid;
Michael Chan14203982010-10-06 03:16:06 +00005091
Michael Chan71034ba2009-10-10 13:46:59 +00005092 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005093 cp->iscsi_start_cid, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005094
5095 if (ret)
5096 return -ENOMEM;
5097
Michael Chan104a43e2013-09-02 11:42:28 -07005098 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00005099 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005100 cp->fcoe_start_cid, 0);
Michael Chane1928c82010-12-23 07:43:04 +00005101
5102 if (ret)
5103 return -ENOMEM;
5104 }
5105
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005106 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5107
Michael Chane21ba412010-12-23 07:43:03 +00005108 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005109
Michael Chan71034ba2009-10-10 13:46:59 +00005110 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00005111 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00005112 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005113 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005114 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005115 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005116 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005117 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005118 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005119 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005120 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005121 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005122 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005123 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005124 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005125 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005126 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005127 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00005128 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005129 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00005130 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005131 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005132 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00005133
Michael Chan71034ba2009-10-10 13:46:59 +00005134 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005135 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00005136 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5137 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005138 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00005139 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005141 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5142 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5143
Michael Chan71034ba2009-10-10 13:46:59 +00005144 cnic_setup_bnx2x_context(dev);
5145
Michael Chan71034ba2009-10-10 13:46:59 +00005146 ret = cnic_init_bnx2x_irq(dev);
5147 if (ret)
5148 return ret;
5149
Michael Chanad9b4352013-01-23 03:21:52 +00005150 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chan71034ba2009-10-10 13:46:59 +00005151 return 0;
5152}
5153
Michael Chan86b53602009-10-10 13:46:57 +00005154static void cnic_init_rings(struct cnic_dev *dev)
5155{
Michael Chan541a7812010-10-06 03:17:22 +00005156 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005157 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00005158 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00005159
5160 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5161 return;
5162
Michael Chan86b53602009-10-10 13:46:57 +00005163 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5164 cnic_init_bnx2_tx_ring(dev);
5165 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00005166 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00005167 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005168 u32 cli = cp->ethdev->iscsi_l2_client_id;
5169 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan68d7c1a2011-01-05 15:14:13 +00005170 u32 cl_qzone_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005171 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00005172 union l5cm_specific_data l5_data;
5173 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chane1dd8832011-07-13 17:24:19 +00005174 u32 off, i, *cid_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005175
5176 rx_prods.bd_prod = 0;
5177 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5178 barrier();
5179
Michael Chan104a43e2013-09-02 11:42:28 -07005180 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005181
Michael Chanc7596b72009-12-02 15:15:35 +00005182 off = BAR_USTRORM_INTMEM +
Michael Chan104a43e2013-09-02 11:42:28 -07005183 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
Michael Chanee87a822010-10-13 14:06:51 +00005184 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
Michael Chan5bf945a2013-09-02 11:42:30 -07005185 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00005186
5187 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00005188 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00005189
Michael Chan48f753d2010-05-18 11:32:53 +00005190 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5191
Michael Chancd801532010-10-13 14:06:49 +00005192 data = udev->l2_buf;
Michael Chane1dd8832011-07-13 17:24:19 +00005193 cid_ptr = udev->l2_buf + 12;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005194
5195 memset(data, 0, sizeof(*data));
5196
5197 cnic_init_bnx2x_tx_ring(dev, data);
5198 cnic_init_bnx2x_rx_ring(dev, data);
5199
Michael Chancd801532010-10-13 14:06:49 +00005200 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5201 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005202
Michael Chan541a7812010-10-06 03:17:22 +00005203 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5204
Michael Chan71034ba2009-10-10 13:46:59 +00005205 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005206 cid, ETH_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005207
Michael Chan48f753d2010-05-18 11:32:53 +00005208 i = 0;
5209 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5210 ++i < 10)
5211 msleep(1);
5212
5213 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5214 netdev_err(dev->netdev,
5215 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005216 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00005217 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chanf78afb32013-09-18 01:50:38 -07005218 *cid_ptr = cid >> 4;
5219 *(cid_ptr + 1) = cid * bp->db_size;
Eddie Waid15e2a92013-12-31 23:18:34 -08005220 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
Michael Chan86b53602009-10-10 13:46:57 +00005221 }
5222}
5223
5224static void cnic_shutdown_rings(struct cnic_dev *dev)
5225{
Michael Chan541a7812010-10-06 03:17:22 +00005226 struct cnic_local *cp = dev->cnic_priv;
Michael Chane1dd8832011-07-13 17:24:19 +00005227 struct cnic_uio_dev *udev = cp->udev;
5228 void *rx_ring;
Michael Chan541a7812010-10-06 03:17:22 +00005229
5230 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5231 return;
5232
Michael Chan86b53602009-10-10 13:46:57 +00005233 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5234 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005235 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005236 u32 cli = cp->ethdev->iscsi_l2_client_id;
5237 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00005238 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00005239 int i;
Michael Chan71034ba2009-10-10 13:46:59 +00005240
Michael Chan5159fdc2010-12-23 07:42:59 +00005241 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00005242
Michael Chan48f753d2010-05-18 11:32:53 +00005243 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5244
Michael Chan8b065b62009-12-02 15:15:36 +00005245 l5_data.phy_address.lo = cli;
5246 l5_data.phy_address.hi = 0;
5247 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00005248 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00005249 i = 0;
5250 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5251 ++i < 10)
5252 msleep(1);
5253
5254 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5255 netdev_err(dev->netdev,
5256 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005257 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00005258
5259 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005260 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005261 cid, NONE_CONNECTION_TYPE, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00005262 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00005263 }
Michael Chan541a7812010-10-06 03:17:22 +00005264 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chanbe1fefc2014-03-17 19:19:07 -08005265 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5266 memset(rx_ring, 0, CNIC_PAGE_SIZE);
Michael Chan86b53602009-10-10 13:46:57 +00005267}
5268
Michael Chana3059b12009-08-14 15:49:44 +00005269static int cnic_register_netdev(struct cnic_dev *dev)
5270{
5271 struct cnic_local *cp = dev->cnic_priv;
5272 struct cnic_eth_dev *ethdev = cp->ethdev;
5273 int err;
5274
5275 if (!ethdev)
5276 return -ENODEV;
5277
5278 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5279 return 0;
5280
5281 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5282 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00005283 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00005284
Michael Chan9e9402e2013-08-02 11:28:23 -07005285 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5286 * can change after firmware is downloaded.
5287 */
5288 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5289 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5290 dev->max_iscsi_conn = 0;
5291
Michael Chana3059b12009-08-14 15:49:44 +00005292 return err;
5293}
5294
5295static void cnic_unregister_netdev(struct cnic_dev *dev)
5296{
5297 struct cnic_local *cp = dev->cnic_priv;
5298 struct cnic_eth_dev *ethdev = cp->ethdev;
5299
5300 if (!ethdev)
5301 return;
5302
5303 ethdev->drv_unregister_cnic(dev->netdev);
5304}
5305
Michael Chana4636962009-06-08 18:14:43 -07005306static int cnic_start_hw(struct cnic_dev *dev)
5307{
5308 struct cnic_local *cp = dev->cnic_priv;
5309 struct cnic_eth_dev *ethdev = cp->ethdev;
5310 int err;
5311
5312 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5313 return -EALREADY;
5314
Michael Chana4636962009-06-08 18:14:43 -07005315 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07005316 pci_dev_get(dev->pcidev);
5317 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00005318 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07005319 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5320
5321 err = cp->alloc_resc(dev);
5322 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005323 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005324 goto err1;
5325 }
5326
5327 err = cp->start_hw(dev);
5328 if (err)
5329 goto err1;
5330
5331 err = cnic_cm_open(dev);
5332 if (err)
5333 goto err1;
5334
5335 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5336
5337 cp->enable_int(dev);
5338
5339 return 0;
5340
5341err1:
Michael Chana4636962009-06-08 18:14:43 -07005342 cp->free_resc(dev);
5343 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07005344 return err;
5345}
5346
5347static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5348{
Michael Chana4636962009-06-08 18:14:43 -07005349 cnic_disable_bnx2_int_sync(dev);
5350
5351 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5352 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5353
5354 cnic_init_context(dev, KWQ_CID);
5355 cnic_init_context(dev, KCQ_CID);
5356
5357 cnic_setup_5709_context(dev, 0);
5358 cnic_free_irq(dev);
5359
Michael Chana4636962009-06-08 18:14:43 -07005360 cnic_free_resc(dev);
5361}
5362
Michael Chan71034ba2009-10-10 13:46:59 +00005363
5364static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5365{
5366 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005367 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancaa9e932012-12-05 10:10:14 +00005368 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5369 u32 sb_id = cp->status_blk_num;
5370 u32 idx_off, syn_off;
Michael Chan71034ba2009-10-10 13:46:59 +00005371
5372 cnic_free_irq(dev);
Michael Chancaa9e932012-12-05 10:10:14 +00005373
Michael Chan104a43e2013-09-02 11:42:28 -07005374 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chancaa9e932012-12-05 10:10:14 +00005375 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5376 (hc_index * sizeof(u16));
5377
5378 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5379 } else {
5380 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5381 (hc_index * sizeof(u16));
5382
5383 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5384 }
5385 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5386 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5387 idx_off, 0);
5388
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005389 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00005390 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07005391 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00005392 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005393 cnic_free_resc(dev);
5394}
5395
Michael Chana4636962009-06-08 18:14:43 -07005396static void cnic_stop_hw(struct cnic_dev *dev)
5397{
5398 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5399 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00005400 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07005401
Michael Chan48f753d2010-05-18 11:32:53 +00005402 /* Need to wait for the ring shutdown event to complete
5403 * before clearing the CNIC_UP flag.
5404 */
Michael Chan82346a72012-09-08 06:01:05 +00005405 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00005406 msleep(100);
5407 i++;
5408 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005409 cnic_shutdown_rings(dev);
Michael Chana2028b232012-06-27 15:08:19 +00005410 cp->stop_cm(dev);
Michael Chanad9b4352013-01-23 03:21:52 +00005411 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chana4636962009-06-08 18:14:43 -07005412 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
Eric Dumazet2cfa5a02011-11-23 07:09:32 +00005413 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
Michael Chana4636962009-06-08 18:14:43 -07005414 synchronize_rcu();
5415 cnic_cm_shutdown(dev);
5416 cp->stop_hw(dev);
5417 pci_dev_put(dev->pcidev);
5418 }
5419}
5420
5421static void cnic_free_dev(struct cnic_dev *dev)
5422{
5423 int i = 0;
5424
5425 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5426 msleep(100);
5427 i++;
5428 }
5429 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00005430 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07005431
Joe Perchesddf79b22010-02-17 15:01:54 +00005432 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005433 dev_put(dev->netdev);
5434 kfree(dev);
5435}
5436
5437static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5438 struct pci_dev *pdev)
5439{
5440 struct cnic_dev *cdev;
5441 struct cnic_local *cp;
5442 int alloc_size;
5443
5444 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5445
Joe Perchesb2adaca2013-02-03 17:43:58 +00005446 cdev = kzalloc(alloc_size, GFP_KERNEL);
5447 if (cdev == NULL)
Michael Chana4636962009-06-08 18:14:43 -07005448 return NULL;
Michael Chana4636962009-06-08 18:14:43 -07005449
5450 cdev->netdev = dev;
5451 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5452 cdev->register_device = cnic_register_device;
5453 cdev->unregister_device = cnic_unregister_device;
5454 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5455
5456 cp = cdev->cnic_priv;
5457 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07005458 cp->l2_single_buf_size = 0x400;
5459 cp->l2_rx_ring_size = 3;
5460
5461 spin_lock_init(&cp->cnic_ulp_lock);
5462
Joe Perchesddf79b22010-02-17 15:01:54 +00005463 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005464
5465 return cdev;
5466}
5467
5468static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5469{
5470 struct pci_dev *pdev;
5471 struct cnic_dev *cdev;
5472 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005473 struct bnx2 *bp = netdev_priv(dev);
Michael Chana4636962009-06-08 18:14:43 -07005474 struct cnic_eth_dev *ethdev = NULL;
Michael Chana4636962009-06-08 18:14:43 -07005475
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005476 if (bp->cnic_probe)
5477 ethdev = (bp->cnic_probe)(dev);
5478
Michael Chana4636962009-06-08 18:14:43 -07005479 if (!ethdev)
5480 return NULL;
5481
5482 pdev = ethdev->pdev;
5483 if (!pdev)
5484 return NULL;
5485
5486 dev_hold(dev);
5487 pci_dev_get(pdev);
Sergei Shtylyovff938e42011-02-28 11:57:33 -08005488 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5489 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5490 (pdev->revision < 0x10)) {
5491 pci_dev_put(pdev);
5492 goto cnic_err;
Michael Chana4636962009-06-08 18:14:43 -07005493 }
5494 pci_dev_put(pdev);
5495
5496 cdev = cnic_alloc_dev(dev, pdev);
5497 if (cdev == NULL)
5498 goto cnic_err;
5499
5500 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5501 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5502
5503 cp = cdev->cnic_priv;
5504 cp->ethdev = ethdev;
5505 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005506 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07005507
Michael Chan7625eb22011-06-08 19:29:36 +00005508 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5509
Michael Chana4636962009-06-08 18:14:43 -07005510 cp->cnic_ops = &cnic_bnx2_ops;
5511 cp->start_hw = cnic_start_bnx2_hw;
5512 cp->stop_hw = cnic_stop_bnx2_hw;
5513 cp->setup_pgtbl = cnic_setup_page_tbl;
5514 cp->alloc_resc = cnic_alloc_bnx2_resc;
5515 cp->free_resc = cnic_free_resc;
5516 cp->start_cm = cnic_cm_init_bnx2_hw;
5517 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5518 cp->enable_int = cnic_enable_bnx2_int;
5519 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5520 cp->close_conn = cnic_close_bnx2_conn;
Michael Chana4636962009-06-08 18:14:43 -07005521 return cdev;
5522
5523cnic_err:
5524 dev_put(dev);
5525 return NULL;
5526}
5527
Michael Chan71034ba2009-10-10 13:46:59 +00005528static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5529{
5530 struct pci_dev *pdev;
5531 struct cnic_dev *cdev;
5532 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005533 struct bnx2x *bp = netdev_priv(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005534 struct cnic_eth_dev *ethdev = NULL;
Michael Chan71034ba2009-10-10 13:46:59 +00005535
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005536 if (bp->cnic_probe)
5537 ethdev = bp->cnic_probe(dev);
5538
Michael Chan71034ba2009-10-10 13:46:59 +00005539 if (!ethdev)
5540 return NULL;
5541
5542 pdev = ethdev->pdev;
5543 if (!pdev)
5544 return NULL;
5545
5546 dev_hold(dev);
5547 cdev = cnic_alloc_dev(dev, pdev);
5548 if (cdev == NULL) {
5549 dev_put(dev);
5550 return NULL;
5551 }
5552
5553 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5554 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5555
5556 cp = cdev->cnic_priv;
5557 cp->ethdev = ethdev;
5558 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005559 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00005560
Barak Witkowski1d187b32011-12-05 22:41:50 +00005561 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5562
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005563 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5564 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
Michael Chan104a43e2013-09-02 11:42:28 -07005565 if (CNIC_SUPPORTS_FCOE(bp)) {
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005566 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
Bhanu Prakash Gollapudi0eb43b42013-04-22 19:22:30 +00005567 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5568 }
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005569
Michael Chandc219a22011-08-26 09:45:39 +00005570 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5571 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5572
Joe Perchesd458cdf2013-10-01 19:04:40 -07005573 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005574
Michael Chan71034ba2009-10-10 13:46:59 +00005575 cp->cnic_ops = &cnic_bnx2x_ops;
5576 cp->start_hw = cnic_start_bnx2x_hw;
5577 cp->stop_hw = cnic_stop_bnx2x_hw;
5578 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5579 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5580 cp->free_resc = cnic_free_resc;
5581 cp->start_cm = cnic_cm_init_bnx2x_hw;
5582 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5583 cp->enable_int = cnic_enable_bnx2x_int;
5584 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chan104a43e2013-09-02 11:42:28 -07005585 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chanee87a822010-10-13 14:06:51 +00005586 cp->ack_int = cnic_ack_bnx2x_e2_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005587 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5588 } else {
Michael Chanee87a822010-10-13 14:06:51 +00005589 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005590 cp->arm_int = cnic_arm_bnx2x_msix;
5591 }
Michael Chan71034ba2009-10-10 13:46:59 +00005592 cp->close_conn = cnic_close_bnx2x_conn;
Michael Chan71034ba2009-10-10 13:46:59 +00005593 return cdev;
5594}
5595
Michael Chana4636962009-06-08 18:14:43 -07005596static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5597{
5598 struct ethtool_drvinfo drvinfo;
5599 struct cnic_dev *cdev = NULL;
5600
5601 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5602 memset(&drvinfo, 0, sizeof(drvinfo));
5603 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5604
5605 if (!strcmp(drvinfo.driver, "bnx2"))
5606 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005607 if (!strcmp(drvinfo.driver, "bnx2x"))
5608 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07005609 if (cdev) {
5610 write_lock(&cnic_dev_lock);
5611 list_add(&cdev->list, &cnic_dev_list);
5612 write_unlock(&cnic_dev_lock);
5613 }
5614 }
5615 return cdev;
5616}
5617
Michael Chan415199f2011-07-20 14:55:24 +00005618static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5619 u16 vlan_id)
5620{
5621 int if_type;
5622
Michael Chan415199f2011-07-20 14:55:24 +00005623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5624 struct cnic_ulp_ops *ulp_ops;
5625 void *ctx;
5626
Michael Chan20f30c22014-06-02 23:08:46 -07005627 mutex_lock(&cnic_lock);
5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
Michael Chan415199f2011-07-20 14:55:24 +00005632 continue;
Michael Chan20f30c22014-06-02 23:08:46 -07005633 }
Michael Chan415199f2011-07-20 14:55:24 +00005634
5635 ctx = cp->ulp_handle[if_type];
5636
Michael Chan20f30c22014-06-02 23:08:46 -07005637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
Michael Chan415199f2011-07-20 14:55:24 +00005640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
Michael Chan20f30c22014-06-02 23:08:46 -07005641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chan415199f2011-07-20 14:55:24 +00005643 }
Michael Chan415199f2011-07-20 14:55:24 +00005644}
5645
Ben Hutchings1aa8b472012-07-10 10:56:59 +00005646/* netdev event handler */
Michael Chana4636962009-06-08 18:14:43 -07005647static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5648 void *ptr)
5649{
Jiri Pirko351638e2013-05-28 01:30:21 +00005650 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
Michael Chana4636962009-06-08 18:14:43 -07005651 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -07005652 int new_dev = 0;
5653
5654 dev = cnic_from_netdev(netdev);
5655
Michael Chan415fb872013-07-28 19:03:55 -07005656 if (!dev && event == NETDEV_REGISTER) {
Michael Chana4636962009-06-08 18:14:43 -07005657 /* Check for the hot-plug device */
5658 dev = is_cnic_dev(netdev);
5659 if (dev) {
5660 new_dev = 1;
5661 cnic_hold(dev);
5662 }
5663 }
5664 if (dev) {
5665 struct cnic_local *cp = dev->cnic_priv;
5666
5667 if (new_dev)
5668 cnic_ulp_init(dev);
5669 else if (event == NETDEV_UNREGISTER)
5670 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07005671
Michael Chan415fb872013-07-28 19:03:55 -07005672 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00005673 if (cnic_register_netdev(dev) != 0) {
5674 cnic_put(dev);
5675 goto done;
5676 }
Michael Chana4636962009-06-08 18:14:43 -07005677 if (!cnic_start_hw(dev))
5678 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07005679 }
5680
Michael Chan415199f2011-07-20 14:55:24 +00005681 cnic_rcv_netevent(cp, event, 0);
Michael Chana4636962009-06-08 18:14:43 -07005682
5683 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005684 cnic_ulp_stop(dev);
5685 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005686 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005687 } else if (event == NETDEV_UNREGISTER) {
5688 write_lock(&cnic_dev_lock);
5689 list_del_init(&dev->list);
5690 write_unlock(&cnic_dev_lock);
5691
5692 cnic_put(dev);
5693 cnic_free_dev(dev);
5694 goto done;
5695 }
5696 cnic_put(dev);
Michael Chan415199f2011-07-20 14:55:24 +00005697 } else {
5698 struct net_device *realdev;
5699 u16 vid;
5700
5701 vid = cnic_get_vlan(netdev, &realdev);
5702 if (realdev) {
5703 dev = cnic_from_netdev(realdev);
5704 if (dev) {
5705 vid |= VLAN_TAG_PRESENT;
5706 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5707 cnic_put(dev);
5708 }
5709 }
Michael Chana4636962009-06-08 18:14:43 -07005710 }
5711done:
5712 return NOTIFY_DONE;
5713}
5714
5715static struct notifier_block cnic_netdev_notifier = {
5716 .notifier_call = cnic_netdev_event
5717};
5718
5719static void cnic_release(void)
5720{
Michael Chana3ceeeb2010-10-13 14:06:50 +00005721 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005722
Michael Chana3ceeeb2010-10-13 14:06:50 +00005723 while (!list_empty(&cnic_udev_list)) {
5724 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5725 list);
5726 cnic_free_uio(udev);
5727 }
Michael Chana4636962009-06-08 18:14:43 -07005728}
5729
5730static int __init cnic_init(void)
5731{
5732 int rc = 0;
5733
Joe Perchesddf79b22010-02-17 15:01:54 +00005734 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005735
5736 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5737 if (rc) {
5738 cnic_release();
5739 return rc;
5740 }
5741
Michael Chanfdf24082010-10-13 14:06:47 +00005742 cnic_wq = create_singlethread_workqueue("cnic_wq");
5743 if (!cnic_wq) {
5744 cnic_release();
5745 unregister_netdevice_notifier(&cnic_netdev_notifier);
5746 return -ENOMEM;
5747 }
5748
Michael Chana4636962009-06-08 18:14:43 -07005749 return 0;
5750}
5751
5752static void __exit cnic_exit(void)
5753{
5754 unregister_netdevice_notifier(&cnic_netdev_notifier);
5755 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005756 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005757}
5758
5759module_init(cnic_init);
5760module_exit(cnic_exit);