blob: 09f3fefcbf9ce405839e6f5893174a79dc91c5b8 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chanc3661282014-03-17 19:19:08 -08003 * Copyright (c) 2006-2014 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040030#include <linux/prefetch.h>
Michael Chan973e5742011-07-13 17:24:17 +000031#include <linux/random.h>
Michael Chana4636962009-06-08 18:14:43 -070032#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33#define BCM_VLAN 1
34#endif
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/route.h>
38#include <net/ipv6.h>
39#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070040#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070041#include <scsi/iscsi_if.h>
42
Michael Chan4bd9b0ff2012-12-06 10:33:12 +000043#define BCM_CNIC 1
Michael Chana4636962009-06-08 18:14:43 -070044#include "cnic_if.h"
45#include "bnx2.h"
Michael Chan68c64d22012-12-06 10:33:11 +000046#include "bnx2x/bnx2x.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000047#include "bnx2x/bnx2x_reg.h"
48#include "bnx2x/bnx2x_fw_defs.h"
49#include "bnx2x/bnx2x_hsi.h"
Jeff Kirsheradfc5212011-04-07 06:03:04 -070050#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
51#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chan8ec3e702012-03-21 15:38:34 +000052#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
Michael Chana4636962009-06-08 18:14:43 -070053#include "cnic.h"
54#include "cnic_defs.h"
55
Michael Chan68c64d22012-12-06 10:33:11 +000056#define CNIC_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070057
Bill Pemberton047fc562012-12-03 09:24:23 -050058static char version[] =
Michael Chan68c64d22012-12-06 10:33:11 +000059 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
Michael Chana4636962009-06-08 18:14:43 -070060
61MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
62 "Chen (zongxi@broadcom.com");
63MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
64MODULE_LICENSE("GPL");
65MODULE_VERSION(CNIC_MODULE_VERSION);
66
Michael Chan8adc92402010-12-23 07:42:57 +000067/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070068static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000069static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070070static DEFINE_RWLOCK(cnic_dev_lock);
71static DEFINE_MUTEX(cnic_lock);
72
Eric Dumazet13707f92011-01-26 19:28:23 +000073static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
74
75/* helper function, assuming cnic_lock is held */
76static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
77{
78 return rcu_dereference_protected(cnic_ulp_tbl[type],
79 lockdep_is_held(&cnic_lock));
80}
Michael Chana4636962009-06-08 18:14:43 -070081
82static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000083static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070084static int cnic_ctl(void *, struct cnic_ctl_info *);
85
86static struct cnic_ops cnic_bnx2_ops = {
87 .cnic_owner = THIS_MODULE,
88 .cnic_handler = cnic_service_bnx2,
89 .cnic_ctl = cnic_ctl,
90};
91
Michael Chan71034ba2009-10-10 13:46:59 +000092static struct cnic_ops cnic_bnx2x_ops = {
93 .cnic_owner = THIS_MODULE,
94 .cnic_handler = cnic_service_bnx2x,
95 .cnic_ctl = cnic_ctl,
96};
97
Michael Chanfdf24082010-10-13 14:06:47 +000098static struct workqueue_struct *cnic_wq;
99
Michael Chan86b53602009-10-10 13:46:57 +0000100static void cnic_shutdown_rings(struct cnic_dev *);
101static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -0700102static int cnic_cm_set_pg(struct cnic_sock *);
103
104static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
105{
Michael Chancd801532010-10-13 14:06:49 +0000106 struct cnic_uio_dev *udev = uinfo->priv;
107 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -0700108
109 if (!capable(CAP_NET_ADMIN))
110 return -EPERM;
111
Michael Chancd801532010-10-13 14:06:49 +0000112 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700113 return -EBUSY;
114
Michael Chan86b53602009-10-10 13:46:57 +0000115 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000116 dev = udev->dev;
117
Michael Chana3ceeeb2010-10-13 14:06:50 +0000118 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000119 rtnl_unlock();
120 return -ENODEV;
121 }
122
Michael Chancd801532010-10-13 14:06:49 +0000123 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700124
Michael Chana3ceeeb2010-10-13 14:06:50 +0000125 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000126 cnic_init_rings(dev);
127 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700128
129 return 0;
130}
131
132static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
133{
Michael Chancd801532010-10-13 14:06:49 +0000134 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000135
Michael Chancd801532010-10-13 14:06:49 +0000136 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700137 return 0;
138}
139
140static inline void cnic_hold(struct cnic_dev *dev)
141{
142 atomic_inc(&dev->ref_count);
143}
144
145static inline void cnic_put(struct cnic_dev *dev)
146{
147 atomic_dec(&dev->ref_count);
148}
149
150static inline void csk_hold(struct cnic_sock *csk)
151{
152 atomic_inc(&csk->ref_count);
153}
154
155static inline void csk_put(struct cnic_sock *csk)
156{
157 atomic_dec(&csk->ref_count);
158}
159
160static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
161{
162 struct cnic_dev *cdev;
163
164 read_lock(&cnic_dev_lock);
165 list_for_each_entry(cdev, &cnic_dev_list, list) {
166 if (netdev == cdev->netdev) {
167 cnic_hold(cdev);
168 read_unlock(&cnic_dev_lock);
169 return cdev;
170 }
171 }
172 read_unlock(&cnic_dev_lock);
173 return NULL;
174}
175
Michael Chan7fc1ece2009-08-14 15:49:47 +0000176static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
177{
178 atomic_inc(&ulp_ops->ref_count);
179}
180
181static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
182{
183 atomic_dec(&ulp_ops->ref_count);
184}
185
Michael Chana4636962009-06-08 18:14:43 -0700186static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191 struct drv_ctl_io *io = &info.data.io;
192
193 info.cmd = DRV_CTL_CTX_WR_CMD;
194 io->cid_addr = cid_addr;
195 io->offset = off;
196 io->data = val;
197 ethdev->drv_ctl(dev->netdev, &info);
198}
199
Michael Chan71034ba2009-10-10 13:46:59 +0000200static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
201{
202 struct cnic_local *cp = dev->cnic_priv;
203 struct cnic_eth_dev *ethdev = cp->ethdev;
204 struct drv_ctl_info info;
205 struct drv_ctl_io *io = &info.data.io;
206
207 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
208 io->offset = off;
209 io->dma_addr = addr;
210 ethdev->drv_ctl(dev->netdev, &info);
211}
212
213static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
214{
215 struct cnic_local *cp = dev->cnic_priv;
216 struct cnic_eth_dev *ethdev = cp->ethdev;
217 struct drv_ctl_info info;
218 struct drv_ctl_l2_ring *ring = &info.data.ring;
219
220 if (start)
221 info.cmd = DRV_CTL_START_L2_CMD;
222 else
223 info.cmd = DRV_CTL_STOP_L2_CMD;
224
225 ring->cid = cid;
226 ring->client_id = cl_id;
227 ethdev->drv_ctl(dev->netdev, &info);
228}
229
Michael Chana4636962009-06-08 18:14:43 -0700230static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
231{
232 struct cnic_local *cp = dev->cnic_priv;
233 struct cnic_eth_dev *ethdev = cp->ethdev;
234 struct drv_ctl_info info;
235 struct drv_ctl_io *io = &info.data.io;
236
237 info.cmd = DRV_CTL_IO_WR_CMD;
238 io->offset = off;
239 io->data = val;
240 ethdev->drv_ctl(dev->netdev, &info);
241}
242
243static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
244{
245 struct cnic_local *cp = dev->cnic_priv;
246 struct cnic_eth_dev *ethdev = cp->ethdev;
247 struct drv_ctl_info info;
248 struct drv_ctl_io *io = &info.data.io;
249
250 info.cmd = DRV_CTL_IO_RD_CMD;
251 io->offset = off;
252 ethdev->drv_ctl(dev->netdev, &info);
253 return io->data;
254}
255
Barak Witkowski1d187b32011-12-05 22:41:50 +0000256static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
257{
258 struct cnic_local *cp = dev->cnic_priv;
259 struct cnic_eth_dev *ethdev = cp->ethdev;
260 struct drv_ctl_info info;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000261 struct fcoe_capabilities *fcoe_cap =
262 &info.data.register_data.fcoe_features;
Barak Witkowski1d187b32011-12-05 22:41:50 +0000263
Barak Witkowski2e499d32012-06-26 01:31:19 +0000264 if (reg) {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000265 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000266 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
267 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
268 } else {
Barak Witkowski1d187b32011-12-05 22:41:50 +0000269 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
Barak Witkowski2e499d32012-06-26 01:31:19 +0000270 }
Barak Witkowski1d187b32011-12-05 22:41:50 +0000271
272 info.data.ulp_type = ulp_type;
273 ethdev->drv_ctl(dev->netdev, &info);
274}
275
Michael Chana4636962009-06-08 18:14:43 -0700276static int cnic_in_use(struct cnic_sock *csk)
277{
278 return test_bit(SK_F_INUSE, &csk->flags);
279}
280
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000281static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700282{
283 struct cnic_local *cp = dev->cnic_priv;
284 struct cnic_eth_dev *ethdev = cp->ethdev;
285 struct drv_ctl_info info;
286
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000287 info.cmd = cmd;
288 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700289 ethdev->drv_ctl(dev->netdev, &info);
290}
291
Michael Chan71034ba2009-10-10 13:46:59 +0000292static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
293{
294 u32 i;
295
Michael Chana2028b232012-06-27 15:08:19 +0000296 if (!cp->ctx_tbl)
297 return -EINVAL;
298
Michael Chan520efdf2010-06-24 14:58:37 +0000299 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000300 if (cp->ctx_tbl[i].cid == cid) {
301 *l5_cid = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
Michael Chana4636962009-06-08 18:14:43 -0700308static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
309 struct cnic_sock *csk)
310{
311 struct iscsi_path path_req;
312 char *buf = NULL;
313 u16 len = 0;
314 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
315 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000316 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000317 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700318
Michael Chancd801532010-10-13 14:06:49 +0000319 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700320 return -ENODEV;
321
322 if (csk) {
323 len = sizeof(path_req);
324 buf = (char *) &path_req;
325 memset(&path_req, 0, len);
326
327 msg_type = ISCSI_KEVENT_PATH_REQ;
328 path_req.handle = (u64) csk->l5_cid;
329 if (test_bit(SK_F_IPV6, &csk->flags)) {
330 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
331 sizeof(struct in6_addr));
332 path_req.ip_addr_len = 16;
333 } else {
334 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
335 sizeof(struct in_addr));
336 path_req.ip_addr_len = 4;
337 }
338 path_req.vlan_id = csk->vlan_id;
339 path_req.pmtu = csk->mtu;
340 }
341
Michael Chan939b82e2010-12-23 07:42:58 +0000342 while (retry < 3) {
343 rc = 0;
344 rcu_read_lock();
Michael Chanf7bd12d2014-03-17 19:19:06 -0800345 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
Michael Chan939b82e2010-12-23 07:42:58 +0000346 if (ulp_ops)
347 rc = ulp_ops->iscsi_nl_send_msg(
348 cp->ulp_handle[CNIC_ULP_ISCSI],
349 msg_type, buf, len);
350 rcu_read_unlock();
351 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
352 break;
353
354 msleep(100);
355 retry++;
356 }
Michael Chan558e4c72011-07-13 17:24:20 +0000357 return rc;
Michael Chana4636962009-06-08 18:14:43 -0700358}
359
Eddie Wai42ecbb82010-12-23 07:43:02 +0000360static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
361
Michael Chana4636962009-06-08 18:14:43 -0700362static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
363 char *buf, u16 len)
364{
365 int rc = -EINVAL;
366
367 switch (msg_type) {
368 case ISCSI_UEVENT_PATH_UPDATE: {
369 struct cnic_local *cp;
370 u32 l5_cid;
371 struct cnic_sock *csk;
372 struct iscsi_path *path_resp;
373
374 if (len < sizeof(*path_resp))
375 break;
376
377 path_resp = (struct iscsi_path *) buf;
378 cp = dev->cnic_priv;
379 l5_cid = (u32) path_resp->handle;
380 if (l5_cid >= MAX_CM_SK_TBL_SZ)
381 break;
382
Michael Chand02a5e62010-02-24 14:42:06 +0000383 rcu_read_lock();
384 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
385 rc = -ENODEV;
386 rcu_read_unlock();
387 break;
388 }
Michael Chana4636962009-06-08 18:14:43 -0700389 csk = &cp->csk_tbl[l5_cid];
390 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000391 if (cnic_in_use(csk) &&
392 test_bit(SK_F_CONNECT_START, &csk->flags)) {
393
Eddie Wai4cbbb042012-02-08 17:33:57 +0000394 csk->vlan_id = path_resp->vlan_id;
395
Joe Perchesd458cdf2013-10-01 19:04:40 -0700396 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
Michael Chana4636962009-06-08 18:14:43 -0700397 if (test_bit(SK_F_IPV6, &csk->flags))
398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
399 sizeof(struct in6_addr));
400 else
401 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
402 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000403
404 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700405 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000406 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
407 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
408
409 cnic_cm_upcall(cp, csk,
410 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
411 clear_bit(SK_F_CONNECT_START, &csk->flags);
412 }
Michael Chana4636962009-06-08 18:14:43 -0700413 }
414 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000415 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700416 rc = 0;
417 }
418 }
419
420 return rc;
421}
422
423static int cnic_offld_prep(struct cnic_sock *csk)
424{
425 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 return 0;
427
428 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
429 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
430 return 0;
431 }
432
433 return 1;
434}
435
436static int cnic_close_prep(struct cnic_sock *csk)
437{
438 clear_bit(SK_F_CONNECT_START, &csk->flags);
439 smp_mb__after_clear_bit();
440
441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
443 msleep(1);
444
445 return 1;
446 }
447 return 0;
448}
449
450static int cnic_abort_prep(struct cnic_sock *csk)
451{
452 clear_bit(SK_F_CONNECT_START, &csk->flags);
453 smp_mb__after_clear_bit();
454
455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 msleep(1);
457
458 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
459 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
460 return 1;
461 }
462
463 return 0;
464}
465
466int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
467{
468 struct cnic_dev *dev;
469
roel kluin0d37f362009-11-02 06:53:44 +0000470 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000471 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700472 return -EINVAL;
473 }
474 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000475 if (cnic_ulp_tbl_prot(ulp_type)) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000476 pr_err("%s: Type %d has already been registered\n",
477 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700478 mutex_unlock(&cnic_lock);
479 return -EBUSY;
480 }
481
482 read_lock(&cnic_dev_lock);
483 list_for_each_entry(dev, &cnic_dev_list, list) {
484 struct cnic_local *cp = dev->cnic_priv;
485
486 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
487 }
488 read_unlock(&cnic_dev_lock);
489
Michael Chan7fc1ece2009-08-14 15:49:47 +0000490 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
492 mutex_unlock(&cnic_lock);
493
494 /* Prevent race conditions with netdev_event */
495 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700496 list_for_each_entry(dev, &cnic_dev_list, list) {
497 struct cnic_local *cp = dev->cnic_priv;
498
499 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
500 ulp_ops->cnic_init(dev);
501 }
Michael Chana4636962009-06-08 18:14:43 -0700502 rtnl_unlock();
503
504 return 0;
505}
506
507int cnic_unregister_driver(int ulp_type)
508{
509 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000510 struct cnic_ulp_ops *ulp_ops;
511 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700512
roel kluin0d37f362009-11-02 06:53:44 +0000513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000514 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700515 return -EINVAL;
516 }
517 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000518 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000519 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000520 pr_err("%s: Type %d has not been registered\n",
521 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700522 goto out_unlock;
523 }
524 read_lock(&cnic_dev_lock);
525 list_for_each_entry(dev, &cnic_dev_list, list) {
526 struct cnic_local *cp = dev->cnic_priv;
527
528 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000529 pr_err("%s: Type %d still has devices registered\n",
530 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700531 read_unlock(&cnic_dev_lock);
532 goto out_unlock;
533 }
534 }
535 read_unlock(&cnic_dev_lock);
536
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000537 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700538
539 mutex_unlock(&cnic_lock);
540 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000541 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
542 msleep(100);
543 i++;
544 }
545
546 if (atomic_read(&ulp_ops->ref_count) != 0)
Julia Lawall022f0972012-07-08 01:37:43 +0000547 pr_warn("%s: Failed waiting for ref count to go to zero\n",
548 __func__);
Michael Chana4636962009-06-08 18:14:43 -0700549 return 0;
550
551out_unlock:
552 mutex_unlock(&cnic_lock);
553 return -EINVAL;
554}
555
556static int cnic_start_hw(struct cnic_dev *);
557static void cnic_stop_hw(struct cnic_dev *);
558
559static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
560 void *ulp_ctx)
561{
562 struct cnic_local *cp = dev->cnic_priv;
563 struct cnic_ulp_ops *ulp_ops;
564
roel kluin0d37f362009-11-02 06:53:44 +0000565 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000566 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700567 return -EINVAL;
568 }
569 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +0000570 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000571 pr_err("%s: Driver with type %d has not been registered\n",
572 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700573 mutex_unlock(&cnic_lock);
574 return -EAGAIN;
575 }
576 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000577 pr_err("%s: Type %d has already been registered to this device\n",
578 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700579 mutex_unlock(&cnic_lock);
580 return -EBUSY;
581 }
582
583 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
584 cp->ulp_handle[ulp_type] = ulp_ctx;
Eric Dumazet13707f92011-01-26 19:28:23 +0000585 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700586 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
587 cnic_hold(dev);
588
589 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
590 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
591 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
592
593 mutex_unlock(&cnic_lock);
594
Barak Witkowski1d187b32011-12-05 22:41:50 +0000595 cnic_ulp_ctl(dev, ulp_type, true);
596
Michael Chana4636962009-06-08 18:14:43 -0700597 return 0;
598
599}
600EXPORT_SYMBOL(cnic_register_driver);
601
602static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
603{
604 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000605 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700606
roel kluin0d37f362009-11-02 06:53:44 +0000607 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700609 return -EINVAL;
610 }
611 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
Michael Chana4636962009-06-08 18:14:43 -0700614 cnic_put(dev);
615 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000616 pr_err("%s: device not registered to this ulp type %d\n",
617 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700618 mutex_unlock(&cnic_lock);
619 return -EINVAL;
620 }
621 mutex_unlock(&cnic_lock);
622
Michael Chan42bb8d52011-01-03 15:21:46 +0000623 if (ulp_type == CNIC_ULP_ISCSI)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Barak Witkowski2e499d32012-06-26 01:31:19 +0000625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL;
Michael Chan42bb8d52011-01-03 15:21:46 +0000627
Michael Chana4636962009-06-08 18:14:43 -0700628 synchronize_rcu();
629
Michael Chan681dbd72009-08-14 15:49:46 +0000630 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
631 i < 20) {
632 msleep(100);
633 i++;
634 }
635 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000636 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000637
Barak Witkowski1d187b32011-12-05 22:41:50 +0000638 cnic_ulp_ctl(dev, ulp_type, false);
639
Michael Chana4636962009-06-08 18:14:43 -0700640 return 0;
641}
642EXPORT_SYMBOL(cnic_unregister_driver);
643
Eddie Wai11f23aa2011-06-08 19:29:34 +0000644static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
645 u32 next)
Michael Chana4636962009-06-08 18:14:43 -0700646{
647 id_tbl->start = start_id;
648 id_tbl->max = size;
Eddie Wai11f23aa2011-06-08 19:29:34 +0000649 id_tbl->next = next;
Michael Chana4636962009-06-08 18:14:43 -0700650 spin_lock_init(&id_tbl->lock);
651 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
652 if (!id_tbl->table)
653 return -ENOMEM;
654
655 return 0;
656}
657
658static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
659{
660 kfree(id_tbl->table);
661 id_tbl->table = NULL;
662}
663
664static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
665{
666 int ret = -1;
667
668 id -= id_tbl->start;
669 if (id >= id_tbl->max)
670 return ret;
671
672 spin_lock(&id_tbl->lock);
673 if (!test_bit(id, id_tbl->table)) {
674 set_bit(id, id_tbl->table);
675 ret = 0;
676 }
677 spin_unlock(&id_tbl->lock);
678 return ret;
679}
680
681/* Returns -1 if not successful */
682static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
683{
684 u32 id;
685
686 spin_lock(&id_tbl->lock);
687 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
688 if (id >= id_tbl->max) {
689 id = -1;
690 if (id_tbl->next != 0) {
691 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
692 if (id >= id_tbl->next)
693 id = -1;
694 }
695 }
696
697 if (id < id_tbl->max) {
698 set_bit(id, id_tbl->table);
699 id_tbl->next = (id + 1) & (id_tbl->max - 1);
700 id += id_tbl->start;
701 }
702
703 spin_unlock(&id_tbl->lock);
704
705 return id;
706}
707
708static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
709{
710 if (id == -1)
711 return;
712
713 id -= id_tbl->start;
714 if (id >= id_tbl->max)
715 return;
716
717 clear_bit(id, id_tbl->table);
718}
719
720static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
721{
722 int i;
723
724 if (!dma->pg_arr)
725 return;
726
727 for (i = 0; i < dma->num_pages; i++) {
728 if (dma->pg_arr[i]) {
Michael Chanbe1fefc2014-03-17 19:19:07 -0800729 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000730 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700731 dma->pg_arr[i] = NULL;
732 }
733 }
734 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000735 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
736 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700737 dma->pgtbl = NULL;
738 }
739 kfree(dma->pg_arr);
740 dma->pg_arr = NULL;
741 dma->num_pages = 0;
742}
743
744static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
745{
746 int i;
Michael Chan51388262011-01-25 22:14:50 +0000747 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chana4636962009-06-08 18:14:43 -0700748
749 for (i = 0; i < dma->num_pages; i++) {
750 /* Each entry needs to be in big endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000751 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chana4636962009-06-08 18:14:43 -0700752 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000753 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chana4636962009-06-08 18:14:43 -0700754 page_table++;
755 }
756}
757
Michael Chan71034ba2009-10-10 13:46:59 +0000758static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
759{
760 int i;
Michael Chan51388262011-01-25 22:14:50 +0000761 __le32 *page_table = (__le32 *) dma->pgtbl;
Michael Chan71034ba2009-10-10 13:46:59 +0000762
763 for (i = 0; i < dma->num_pages; i++) {
764 /* Each entry needs to be in little endian format. */
Michael Chan51388262011-01-25 22:14:50 +0000765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +0000766 page_table++;
Michael Chan51388262011-01-25 22:14:50 +0000767 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +0000768 page_table++;
769 }
770}
771
Michael Chana4636962009-06-08 18:14:43 -0700772static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
773 int pages, int use_pg_tbl)
774{
775 int i, size;
776 struct cnic_local *cp = dev->cnic_priv;
777
778 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
779 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
780 if (dma->pg_arr == NULL)
781 return -ENOMEM;
782
783 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
784 dma->num_pages = pages;
785
786 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000787 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
Michael Chanbe1fefc2014-03-17 19:19:07 -0800788 CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000789 &dma->pg_map_arr[i],
790 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700791 if (dma->pg_arr[i] == NULL)
792 goto error;
793 }
794 if (!use_pg_tbl)
795 return 0;
796
Michael Chanbe1fefc2014-03-17 19:19:07 -0800797 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
798 ~(CNIC_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000799 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
800 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700801 if (dma->pgtbl == NULL)
802 goto error;
803
804 cp->setup_pgtbl(dev, dma);
805
806 return 0;
807
808error:
809 cnic_free_dma(dev, dma);
810 return -ENOMEM;
811}
812
Michael Chan86b53602009-10-10 13:46:57 +0000813static void cnic_free_context(struct cnic_dev *dev)
814{
815 struct cnic_local *cp = dev->cnic_priv;
816 int i;
817
818 for (i = 0; i < cp->ctx_blks; i++) {
819 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000820 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
821 cp->ctx_arr[i].ctx,
822 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000823 cp->ctx_arr[i].ctx = NULL;
824 }
825 }
826}
827
Michael Chan74dd0c42012-09-08 06:01:01 +0000828static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700829{
Michael Chancd801532010-10-13 14:06:49 +0000830 if (udev->l2_buf) {
831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
832 udev->l2_buf, udev->l2_buf_map);
833 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700834 }
835
Michael Chancd801532010-10-13 14:06:49 +0000836 if (udev->l2_ring) {
837 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
838 udev->l2_ring, udev->l2_ring_map);
839 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700840 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000841
Michael Chan74dd0c42012-09-08 06:01:01 +0000842}
843
844static void __cnic_free_uio(struct cnic_uio_dev *udev)
845{
846 uio_unregister_device(&udev->cnic_uinfo);
847
848 __cnic_free_uio_rings(udev);
849
Michael Chana3ceeeb2010-10-13 14:06:50 +0000850 pci_dev_put(udev->pdev);
851 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000852}
853
Michael Chancd801532010-10-13 14:06:49 +0000854static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000855{
Michael Chancd801532010-10-13 14:06:49 +0000856 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000857 return;
858
Michael Chana3ceeeb2010-10-13 14:06:50 +0000859 write_lock(&cnic_dev_lock);
860 list_del_init(&udev->list);
861 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000862 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000863}
864
865static void cnic_free_resc(struct cnic_dev *dev)
866{
867 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000868 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000869
Michael Chancd801532010-10-13 14:06:49 +0000870 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000871 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000872 cp->udev = NULL;
Michael Chanf81b0ac2012-09-08 06:01:02 +0000873 if (udev->uio_dev == -1)
874 __cnic_free_uio_rings(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000875 }
Michael Chana4636962009-06-08 18:14:43 -0700876
Michael Chan86b53602009-10-10 13:46:57 +0000877 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700878 kfree(cp->ctx_arr);
879 cp->ctx_arr = NULL;
880 cp->ctx_blks = 0;
881
882 cnic_free_dma(dev, &cp->gbl_buf_info);
Michael Chana4636962009-06-08 18:14:43 -0700883 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000884 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000885 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000886 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700887 kfree(cp->iscsi_tbl);
888 cp->iscsi_tbl = NULL;
889 kfree(cp->ctx_tbl);
890 cp->ctx_tbl = NULL;
891
Michael Chane1928c82010-12-23 07:43:04 +0000892 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
Michael Chana4636962009-06-08 18:14:43 -0700893 cnic_free_id_tbl(&cp->cid_tbl);
894}
895
896static int cnic_alloc_context(struct cnic_dev *dev)
897{
898 struct cnic_local *cp = dev->cnic_priv;
899
Michael Chan4ce45e02012-12-06 10:33:10 +0000900 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -0700901 int i, k, arr_size;
902
Michael Chanbe1fefc2014-03-17 19:19:07 -0800903 cp->ctx_blk_size = CNIC_PAGE_SIZE;
904 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
Michael Chana4636962009-06-08 18:14:43 -0700905 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
906 sizeof(struct cnic_ctx);
907 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
908 if (cp->ctx_arr == NULL)
909 return -ENOMEM;
910
911 k = 0;
912 for (i = 0; i < 2; i++) {
913 u32 j, reg, off, lo, hi;
914
915 if (i == 0)
916 off = BNX2_PG_CTX_MAP;
917 else
918 off = BNX2_ISCSI_CTX_MAP;
919
920 reg = cnic_reg_rd_ind(dev, off);
921 lo = reg >> 16;
922 hi = reg & 0xffff;
923 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
924 cp->ctx_arr[k].cid = j;
925 }
926
927 cp->ctx_blks = k;
928 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
929 cp->ctx_blks = 0;
930 return -ENOMEM;
931 }
932
933 for (i = 0; i < cp->ctx_blks; i++) {
934 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000935 dma_alloc_coherent(&dev->pcidev->dev,
Michael Chanbe1fefc2014-03-17 19:19:07 -0800936 CNIC_PAGE_SIZE,
Michael Chan3248e162009-12-02 15:15:39 +0000937 &cp->ctx_arr[i].mapping,
938 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700939 if (cp->ctx_arr[i].ctx == NULL)
940 return -ENOMEM;
941 }
942 }
943 return 0;
944}
945
Michael Chan59e51372011-06-14 01:32:38 +0000946static u16 cnic_bnx2_next_idx(u16 idx)
Michael Chane6c28892010-06-24 14:58:39 +0000947{
Michael Chan59e51372011-06-14 01:32:38 +0000948 return idx + 1;
949}
950
951static u16 cnic_bnx2_hw_idx(u16 idx)
952{
953 return idx;
954}
955
956static u16 cnic_bnx2x_next_idx(u16 idx)
957{
958 idx++;
959 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
960 idx++;
961
962 return idx;
963}
964
965static u16 cnic_bnx2x_hw_idx(u16 idx)
966{
967 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
968 idx++;
969 return idx;
970}
971
972static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
973 bool use_pg_tbl)
974{
975 int err, i, use_page_tbl = 0;
Michael Chane6c28892010-06-24 14:58:39 +0000976 struct kcqe **kcq;
977
Michael Chan59e51372011-06-14 01:32:38 +0000978 if (use_pg_tbl)
979 use_page_tbl = 1;
Michael Chane6c28892010-06-24 14:58:39 +0000980
Michael Chan59e51372011-06-14 01:32:38 +0000981 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
Michael Chane6c28892010-06-24 14:58:39 +0000982 if (err)
983 return err;
984
985 kcq = (struct kcqe **) info->dma.pg_arr;
986 info->kcq = kcq;
987
Michael Chan59e51372011-06-14 01:32:38 +0000988 info->next_idx = cnic_bnx2_next_idx;
989 info->hw_idx = cnic_bnx2_hw_idx;
990 if (use_pg_tbl)
Michael Chane6c28892010-06-24 14:58:39 +0000991 return 0;
992
Michael Chan59e51372011-06-14 01:32:38 +0000993 info->next_idx = cnic_bnx2x_next_idx;
994 info->hw_idx = cnic_bnx2x_hw_idx;
995
Michael Chane6c28892010-06-24 14:58:39 +0000996 for (i = 0; i < KCQ_PAGE_CNT; i++) {
997 struct bnx2x_bd_chain_next *next =
998 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
999 int j = i + 1;
1000
1001 if (j >= KCQ_PAGE_CNT)
1002 j = 0;
1003 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1004 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1005 }
1006 return 0;
1007}
1008
Michael Chan74dd0c42012-09-08 06:01:01 +00001009static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1010{
1011 struct cnic_local *cp = udev->dev->cnic_priv;
1012
1013 if (udev->l2_ring)
1014 return 0;
1015
Michael Chanbe1fefc2014-03-17 19:19:07 -08001016 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
Michael Chan74dd0c42012-09-08 06:01:01 +00001017 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1018 &udev->l2_ring_map,
1019 GFP_KERNEL | __GFP_COMP);
1020 if (!udev->l2_ring)
1021 return -ENOMEM;
1022
1023 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001024 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
Michael Chan74dd0c42012-09-08 06:01:01 +00001025 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1026 &udev->l2_buf_map,
1027 GFP_KERNEL | __GFP_COMP);
1028 if (!udev->l2_buf) {
1029 __cnic_free_uio_rings(udev);
1030 return -ENOMEM;
1031 }
1032
1033 return 0;
1034
1035}
1036
Michael Chancd801532010-10-13 14:06:49 +00001037static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +00001038{
1039 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001040 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +00001041
Michael Chana3ceeeb2010-10-13 14:06:50 +00001042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev;
Michael Chanf81b0ac2012-09-08 06:01:02 +00001046 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM;
1050 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00001051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0;
1054 }
1055 }
1056 read_unlock(&cnic_dev_lock);
1057
Michael Chancd801532010-10-13 14:06:49 +00001058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +00001060 return -ENOMEM;
1061
Michael Chancd801532010-10-13 14:06:49 +00001062 udev->uio_dev = -1;
1063
1064 udev->dev = dev;
1065 udev->pdev = dev->pcidev;
Michael Chanec0248e2009-08-26 09:49:22 +00001066
Michael Chan74dd0c42012-09-08 06:01:01 +00001067 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev;
Michael Chancd801532010-10-13 14:06:49 +00001069
Michael Chana3ceeeb2010-10-13 14:06:50 +00001070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073
1074 pci_dev_get(udev->pdev);
1075
Michael Chancd801532010-10-13 14:06:49 +00001076 cp->udev = udev;
1077
Michael Chanec0248e2009-08-26 09:49:22 +00001078 return 0;
Michael Chan74dd0c42012-09-08 06:01:01 +00001079
Jesper Juhlf7e4c972010-12-31 11:18:48 -08001080 err_udev:
1081 kfree(udev);
1082 return -ENOMEM;
Michael Chanec0248e2009-08-26 09:49:22 +00001083}
1084
Michael Chancd801532010-10-13 14:06:49 +00001085static int cnic_init_uio(struct cnic_dev *dev)
1086{
Michael Chan5e9b2db2009-08-26 09:49:23 +00001087 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00001088 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001089 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +00001090 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001091
Michael Chancd801532010-10-13 14:06:49 +00001092 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +00001093 return -ENOMEM;
1094
Michael Chancd801532010-10-13 14:06:49 +00001095 uinfo = &udev->cnic_uinfo;
1096
Michael Chanae0eef62012-06-29 09:32:45 +00001097 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1098 uinfo->mem[0].internal_addr = dev->regview;
1099 uinfo->mem[0].memtype = UIO_MEM_PHYS;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001100
Michael Chan5e9b2db2009-08-26 09:49:23 +00001101 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001102 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1103 TX_MAX_TSS_RINGS + 1);
Michael Chana4dde3a2010-02-24 14:42:08 +00001104 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chanbe1fefc2014-03-17 19:19:07 -08001105 CNIC_PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001106 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1108 else
1109 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1110
1111 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001112 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chanae0eef62012-06-29 09:32:45 +00001113 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1114
Michael Chan71034ba2009-10-10 13:46:59 +00001115 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
Michael Chanbe1fefc2014-03-17 19:19:07 -08001116 CNIC_PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001117 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001118
1119 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001120 }
1121
1122 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1123
Michael Chancd801532010-10-13 14:06:49 +00001124 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1125 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001126 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1127
Michael Chancd801532010-10-13 14:06:49 +00001128 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1129 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001130 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1131
1132 uinfo->version = CNIC_MODULE_VERSION;
1133 uinfo->irq = UIO_IRQ_CUSTOM;
1134
1135 uinfo->open = cnic_uio_open;
1136 uinfo->release = cnic_uio_close;
1137
Michael Chana3ceeeb2010-10-13 14:06:50 +00001138 if (udev->uio_dev == -1) {
1139 if (!uinfo->priv) {
1140 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001141
Michael Chana3ceeeb2010-10-13 14:06:50 +00001142 ret = uio_register_device(&udev->pdev->dev, uinfo);
1143 }
1144 } else {
1145 cnic_init_rings(dev);
1146 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001147
Michael Chancd801532010-10-13 14:06:49 +00001148 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001149}
1150
Michael Chana4636962009-06-08 18:14:43 -07001151static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1152{
1153 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001154 int ret;
1155
1156 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1157 if (ret)
1158 goto error;
1159 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1160
Michael Chan59e51372011-06-14 01:32:38 +00001161 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
Michael Chana4636962009-06-08 18:14:43 -07001162 if (ret)
1163 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001164
1165 ret = cnic_alloc_context(dev);
1166 if (ret)
1167 goto error;
1168
Michael Chancd801532010-10-13 14:06:49 +00001169 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001170 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001171 goto error;
1172
Michael Chancd801532010-10-13 14:06:49 +00001173 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001174 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001175 goto error;
1176
Michael Chana4636962009-06-08 18:14:43 -07001177 return 0;
1178
1179error:
1180 cnic_free_resc(dev);
1181 return ret;
1182}
1183
Michael Chan71034ba2009-10-10 13:46:59 +00001184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1185{
1186 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001187 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001188 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001189 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001190
Michael Chan520efdf2010-06-24 14:58:37 +00001191 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001192 blks = total_mem / ctx_blk_size;
1193 if (total_mem % ctx_blk_size)
1194 blks++;
1195
1196 if (blks > cp->ethdev->ctx_tbl_len)
1197 return -ENOMEM;
1198
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001199 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001200 if (cp->ctx_arr == NULL)
1201 return -ENOMEM;
1202
1203 cp->ctx_blks = blks;
1204 cp->ctx_blk_size = ctx_blk_size;
Michael Chan104a43e2013-09-02 11:42:28 -07001205 if (!CHIP_IS_E1(bp))
Michael Chan71034ba2009-10-10 13:46:59 +00001206 cp->ctx_align = 0;
1207 else
1208 cp->ctx_align = ctx_blk_size;
1209
1210 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1211
1212 for (i = 0; i < blks; i++) {
1213 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001214 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1215 &cp->ctx_arr[i].mapping,
1216 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001217 if (cp->ctx_arr[i].ctx == NULL)
1218 return -ENOMEM;
1219
1220 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1221 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1222 cnic_free_context(dev);
1223 cp->ctx_blk_size += cp->ctx_align;
1224 i = -1;
1225 continue;
1226 }
1227 }
1228 }
1229 return 0;
1230}
1231
1232static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1233{
1234 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001235 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan520efdf2010-06-24 14:58:37 +00001236 struct cnic_eth_dev *ethdev = cp->ethdev;
1237 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001238 int i, j, n, ret, pages;
1239 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1240
Michael Chanb37a41e2011-07-20 14:55:22 +00001241 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
Michael Chan520efdf2010-06-24 14:58:37 +00001242 cp->iscsi_start_cid = start_cid;
Michael Chane1928c82010-12-23 07:43:04 +00001243 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1244
Michael Chan104a43e2013-09-02 11:42:28 -07001245 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00001246 cp->max_cid_space += dev->max_fcoe_conn;
Michael Chane1928c82010-12-23 07:43:04 +00001247 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1248 if (!cp->fcoe_init_cid)
1249 cp->fcoe_init_cid = 0x10;
1250 }
1251
Michael Chan71034ba2009-10-10 13:46:59 +00001252 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1253 GFP_KERNEL);
1254 if (!cp->iscsi_tbl)
1255 goto error;
1256
1257 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001258 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001259 if (!cp->ctx_tbl)
1260 goto error;
1261
1262 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1263 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1264 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1265 }
1266
Michael Chane1928c82010-12-23 07:43:04 +00001267 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1269
Michael Chanbe1fefc2014-03-17 19:19:07 -08001270 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1271 CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001272
1273 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1274 if (ret)
1275 return -ENOMEM;
1276
Michael Chanbe1fefc2014-03-17 19:19:07 -08001277 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001278 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001279 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1280
1281 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1282 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1283 off;
1284
1285 if ((i % n) == (n - 1))
1286 j++;
1287 }
1288
Michael Chan59e51372011-06-14 01:32:38 +00001289 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
Michael Chan71034ba2009-10-10 13:46:59 +00001290 if (ret)
1291 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001292
Michael Chan104a43e2013-09-02 11:42:28 -07001293 if (CNIC_SUPPORTS_FCOE(bp)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001294 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
Michael Chane21ba412010-12-23 07:43:03 +00001295 if (ret)
1296 goto error;
1297 }
1298
Michael Chanbe1fefc2014-03-17 19:19:07 -08001299 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001300 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1301 if (ret)
1302 goto error;
1303
1304 ret = cnic_alloc_bnx2x_context(dev);
1305 if (ret)
1306 goto error;
1307
Michael Chan82346a72012-09-08 06:01:05 +00001308 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1309 return 0;
1310
Michael Chan71034ba2009-10-10 13:46:59 +00001311 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1312
1313 cp->l2_rx_ring_size = 15;
1314
Michael Chancd801532010-10-13 14:06:49 +00001315 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001316 if (ret)
1317 goto error;
1318
Michael Chancd801532010-10-13 14:06:49 +00001319 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001320 if (ret)
1321 goto error;
1322
1323 return 0;
1324
1325error:
1326 cnic_free_resc(dev);
1327 return -ENOMEM;
1328}
1329
Michael Chana4636962009-06-08 18:14:43 -07001330static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1331{
1332 return cp->max_kwq_idx -
1333 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1334}
1335
1336static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1337 u32 num_wqes)
1338{
1339 struct cnic_local *cp = dev->cnic_priv;
1340 struct kwqe *prod_qe;
1341 u16 prod, sw_prod, i;
1342
1343 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1344 return -EAGAIN; /* bnx2 is down */
1345
1346 spin_lock_bh(&cp->cnic_ulp_lock);
1347 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001348 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001349 spin_unlock_bh(&cp->cnic_ulp_lock);
1350 return -EAGAIN;
1351 }
1352
Michael Chan1f1332a2010-05-18 11:32:52 +00001353 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001354
1355 prod = cp->kwq_prod_idx;
1356 sw_prod = prod & MAX_KWQ_IDX;
1357 for (i = 0; i < num_wqes; i++) {
1358 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1359 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1360 prod++;
1361 sw_prod = prod & MAX_KWQ_IDX;
1362 }
1363 cp->kwq_prod_idx = prod;
1364
1365 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1366
1367 spin_unlock_bh(&cp->cnic_ulp_lock);
1368 return 0;
1369}
1370
Michael Chan71034ba2009-10-10 13:46:59 +00001371static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1372 union l5cm_specific_data *l5_data)
1373{
1374 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1375 dma_addr_t map;
1376
1377 map = ctx->kwqe_data_mapping;
1378 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1379 l5_data->phy_address.hi = (u64) map >> 32;
1380 return ctx->kwqe_data;
1381}
1382
1383static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1384 u32 type, union l5cm_specific_data *l5_data)
1385{
1386 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001387 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001388 struct l5cm_spe kwqe;
1389 struct kwqe_16 *kwq[1];
Michael Chan68d7c1a2011-01-05 15:14:13 +00001390 u16 type_16;
Michael Chan71034ba2009-10-10 13:46:59 +00001391 int ret;
1392
1393 kwqe.hdr.conn_and_cmd_data =
1394 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chan5e65789f2013-09-02 11:42:29 -07001395 BNX2X_HW_CID(bp, cid)));
Michael Chan68d7c1a2011-01-05 15:14:13 +00001396
1397 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
Michael Chana5b3c4a2013-09-02 11:42:31 -07001398 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
Michael Chan68d7c1a2011-01-05 15:14:13 +00001399 SPE_HDR_FUNCTION_ID;
1400
1401 kwqe.hdr.type = cpu_to_le16(type_16);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001402 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001403 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1404 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1405
1406 kwq[0] = (struct kwqe_16 *) &kwqe;
1407
1408 spin_lock_bh(&cp->cnic_ulp_lock);
1409 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1410 spin_unlock_bh(&cp->cnic_ulp_lock);
1411
1412 if (ret == 1)
1413 return 0;
1414
Michael Chan23021c22012-01-04 12:12:28 +00001415 return ret;
Michael Chan71034ba2009-10-10 13:46:59 +00001416}
1417
1418static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1419 struct kcqe *cqes[], u32 num_cqes)
1420{
1421 struct cnic_local *cp = dev->cnic_priv;
1422 struct cnic_ulp_ops *ulp_ops;
1423
1424 rcu_read_lock();
1425 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1426 if (likely(ulp_ops)) {
1427 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1428 cqes, num_cqes);
1429 }
1430 rcu_read_unlock();
1431}
1432
Eddie Waib3bd2d62013-07-28 19:03:58 -07001433static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1434 int en_tcp_dack)
1435{
Eddie Waib3bd2d62013-07-28 19:03:58 -07001436 struct bnx2x *bp = netdev_priv(dev->netdev);
1437 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1438 u16 tstorm_flags = 0;
1439
1440 if (time_stamps) {
1441 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1442 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1443 }
1444 if (en_tcp_dack)
1445 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1446
1447 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07001448 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
Eddie Waib3bd2d62013-07-28 19:03:58 -07001449
1450 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07001451 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
Eddie Waib3bd2d62013-07-28 19:03:58 -07001452}
1453
Michael Chan71034ba2009-10-10 13:46:59 +00001454static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1455{
1456 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00001457 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001458 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001459 int hq_bds, pages;
Michael Chana5b3c4a2013-09-02 11:42:31 -07001460 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001461
1462 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1463 cp->num_ccells = req1->num_ccells_per_conn;
1464 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1465 cp->num_iscsi_tasks;
1466 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1467 BNX2X_ISCSI_R2TQE_SIZE;
1468 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001469 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1470 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001471 cp->num_cqs = req1->num_cqs;
1472
1473 if (!dev->max_iscsi_conn)
1474 return 0;
1475
1476 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001477 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001478 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001479 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001480 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001481 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001482 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001484 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001485 req1->num_tasks_per_conn);
1486
1487 /* init Ustorm RAM */
1488 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001489 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001490 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001491 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001492 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001493 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001494 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001495 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001496 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001497 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001498 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001499 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001500 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001501 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001502 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001503 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1504
1505 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001506 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001507 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001508 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001509 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001511 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001512 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001513 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001514 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001515 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001516 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001517 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001518 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1519
1520 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001521 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chanbe1fefc2014-03-17 19:19:07 -08001522 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00001523 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chanbe1fefc2014-03-17 19:19:07 -08001524 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
Michael Chan71034ba2009-10-10 13:46:59 +00001525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001526 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001527 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001528 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001529 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001530 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001531 hq_bds);
1532
Eddie Waib3bd2d62013-07-28 19:03:58 -07001533 cnic_bnx2x_set_tcp_options(dev,
1534 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1535 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1536
Michael Chan71034ba2009-10-10 13:46:59 +00001537 return 0;
1538}
1539
1540static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1541{
1542 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
Michael Chan68c64d22012-12-06 10:33:11 +00001543 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07001544 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001545 struct iscsi_kcqe kcqe;
1546 struct kcqe *cqes[1];
1547
1548 memset(&kcqe, 0, sizeof(kcqe));
1549 if (!dev->max_iscsi_conn) {
1550 kcqe.completion_status =
1551 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1552 goto done;
1553 }
1554
1555 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001556 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001557 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001558 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001559 req2->error_bit_map[1]);
1560
1561 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001562 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001563 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001564 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001565 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001566 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001567 req2->error_bit_map[1]);
1568
1569 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001570 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001571
1572 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1573
1574done:
1575 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1576 cqes[0] = (struct kcqe *) &kcqe;
1577 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1578
1579 return 0;
1580}
1581
1582static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1583{
1584 struct cnic_local *cp = dev->cnic_priv;
1585 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1586
1587 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1588 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1589
1590 cnic_free_dma(dev, &iscsi->hq_info);
1591 cnic_free_dma(dev, &iscsi->r2tq_info);
1592 cnic_free_dma(dev, &iscsi->task_array_info);
Michael Chane1928c82010-12-23 07:43:04 +00001593 cnic_free_id(&cp->cid_tbl, ctx->cid);
1594 } else {
1595 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001596 }
Michael Chane1928c82010-12-23 07:43:04 +00001597
Michael Chan71034ba2009-10-10 13:46:59 +00001598 ctx->cid = 0;
1599}
1600
1601static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1602{
1603 u32 cid;
1604 int ret, pages;
1605 struct cnic_local *cp = dev->cnic_priv;
1606 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1607 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1608
Michael Chane1928c82010-12-23 07:43:04 +00001609 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1610 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1611 if (cid == -1) {
1612 ret = -ENOMEM;
1613 goto error;
1614 }
1615 ctx->cid = cid;
1616 return 0;
1617 }
1618
Michael Chan71034ba2009-10-10 13:46:59 +00001619 cid = cnic_alloc_new_id(&cp->cid_tbl);
1620 if (cid == -1) {
1621 ret = -ENOMEM;
1622 goto error;
1623 }
1624
1625 ctx->cid = cid;
Michael Chanbe1fefc2014-03-17 19:19:07 -08001626 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001627
1628 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1629 if (ret)
1630 goto error;
1631
Michael Chanbe1fefc2014-03-17 19:19:07 -08001632 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001633 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1634 if (ret)
1635 goto error;
1636
Michael Chanbe1fefc2014-03-17 19:19:07 -08001637 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
Michael Chan71034ba2009-10-10 13:46:59 +00001638 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1639 if (ret)
1640 goto error;
1641
1642 return 0;
1643
1644error:
1645 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1646 return ret;
1647}
1648
1649static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1650 struct regpair *ctx_addr)
1651{
1652 struct cnic_local *cp = dev->cnic_priv;
1653 struct cnic_eth_dev *ethdev = cp->ethdev;
1654 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1655 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1656 unsigned long align_off = 0;
1657 dma_addr_t ctx_map;
1658 void *ctx;
1659
1660 if (cp->ctx_align) {
1661 unsigned long mask = cp->ctx_align - 1;
1662
1663 if (cp->ctx_arr[blk].mapping & mask)
1664 align_off = cp->ctx_align -
1665 (cp->ctx_arr[blk].mapping & mask);
1666 }
1667 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1668 (off * BNX2X_CONTEXT_MEM_SIZE);
1669 ctx = cp->ctx_arr[blk].ctx + align_off +
1670 (off * BNX2X_CONTEXT_MEM_SIZE);
1671 if (init)
1672 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1673
1674 ctx_addr->lo = ctx_map & 0xffffffff;
1675 ctx_addr->hi = (u64) ctx_map >> 32;
1676 return ctx;
1677}
1678
1679static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1680 u32 num)
1681{
1682 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07001683 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001684 struct iscsi_kwqe_conn_offload1 *req1 =
1685 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1686 struct iscsi_kwqe_conn_offload2 *req2 =
1687 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1688 struct iscsi_kwqe_conn_offload3 *req3;
1689 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1690 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1691 u32 cid = ctx->cid;
Michael Chan5e65789f2013-09-02 11:42:29 -07001692 u32 hw_cid = BNX2X_HW_CID(bp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001693 struct iscsi_context *ictx;
1694 struct regpair context_addr;
1695 int i, j, n = 2, n_max;
Michael Chan5bf945a2013-09-02 11:42:30 -07001696 u8 port = BP_PORT(bp);
Michael Chan71034ba2009-10-10 13:46:59 +00001697
1698 ctx->ctx_flags = 0;
1699 if (!req2->num_additional_wqes)
1700 return -EINVAL;
1701
1702 n_max = req2->num_additional_wqes + 2;
1703
1704 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1705 if (ictx == NULL)
1706 return -ENOMEM;
1707
1708 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1709
1710 ictx->xstorm_ag_context.hq_prod = 1;
1711
1712 ictx->xstorm_st_context.iscsi.first_burst_length =
1713 ISCSI_DEF_FIRST_BURST_LEN;
1714 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1715 ISCSI_DEF_MAX_RECV_SEG_LEN;
1716 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1717 req1->sq_page_table_addr_lo;
1718 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1719 req1->sq_page_table_addr_hi;
1720 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1721 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1722 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1723 iscsi->hq_info.pgtbl_map & 0xffffffff;
1724 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1725 (u64) iscsi->hq_info.pgtbl_map >> 32;
1726 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1727 iscsi->hq_info.pgtbl[0];
1728 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1729 iscsi->hq_info.pgtbl[1];
1730 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1731 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1732 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1733 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1734 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1735 iscsi->r2tq_info.pgtbl[0];
1736 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1737 iscsi->r2tq_info.pgtbl[1];
1738 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1739 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1740 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1741 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1742 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1743 BNX2X_ISCSI_PBL_NOT_CACHED;
1744 ictx->xstorm_st_context.iscsi.flags.flags |=
1745 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1746 ictx->xstorm_st_context.iscsi.flags.flags |=
1747 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001748 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1749 ETH_P_8021Q;
Michael Chan104a43e2013-09-02 11:42:28 -07001750 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
Michael Chan5bf945a2013-09-02 11:42:30 -07001751 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752
1753 port = 0;
1754 }
1755 ictx->xstorm_st_context.common.flags =
1756 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1757 ictx->xstorm_st_context.common.flags =
1758 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00001759
1760 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1761 /* TSTORM requires the base address of RQ DB & not PTE */
1762 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
Michael Chanbe1fefc2014-03-17 19:19:07 -08001763 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
Michael Chan71034ba2009-10-10 13:46:59 +00001764 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1765 req2->rq_page_table_addr_hi;
1766 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1767 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1768 ictx->tstorm_st_context.tcp.flags2 |=
1769 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001770 ictx->tstorm_st_context.tcp.ooo_support_mode =
1771 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001772
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001773 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001774
1775 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001776 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001777 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001778 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001779 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1780 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1781 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1782 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1783 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1784 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1785 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1786 iscsi->r2tq_info.pgtbl[0];
1787 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1788 iscsi->r2tq_info.pgtbl[1];
1789 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1790 req1->cq_page_table_addr_lo;
1791 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1792 req1->cq_page_table_addr_hi;
1793 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1794 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1795 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1796 ictx->ustorm_st_context.task_pbe_cache_index =
1797 BNX2X_ISCSI_PBL_NOT_CACHED;
1798 ictx->ustorm_st_context.task_pdu_cache_index =
1799 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1800
1801 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1802 if (j == 3) {
1803 if (n >= n_max)
1804 break;
1805 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1806 j = 0;
1807 }
1808 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1809 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1810 req3->qp_first_pte[j].hi;
1811 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1812 req3->qp_first_pte[j].lo;
1813 }
1814
1815 ictx->ustorm_st_context.task_pbl_base.lo =
1816 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1817 ictx->ustorm_st_context.task_pbl_base.hi =
1818 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1819 ictx->ustorm_st_context.tce_phy_addr.lo =
1820 iscsi->task_array_info.pgtbl[0];
1821 ictx->ustorm_st_context.tce_phy_addr.hi =
1822 iscsi->task_array_info.pgtbl[1];
1823 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1824 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1825 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1826 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1827 ISCSI_DEF_MAX_BURST_LEN;
1828 ictx->ustorm_st_context.negotiated_rx |=
1829 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1830 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1831
1832 ictx->cstorm_st_context.hq_pbl_base.lo =
1833 iscsi->hq_info.pgtbl_map & 0xffffffff;
1834 ictx->cstorm_st_context.hq_pbl_base.hi =
1835 (u64) iscsi->hq_info.pgtbl_map >> 32;
1836 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1837 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1838 ictx->cstorm_st_context.task_pbl_base.lo =
1839 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1840 ictx->cstorm_st_context.task_pbl_base.hi =
1841 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1842 /* CSTORM and USTORM initialization is different, CSTORM requires
1843 * CQ DB base & not PTE addr */
1844 ictx->cstorm_st_context.cq_db_base.lo =
Michael Chanbe1fefc2014-03-17 19:19:07 -08001845 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
Michael Chan71034ba2009-10-10 13:46:59 +00001846 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1847 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1848 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1849 for (i = 0; i < cp->num_cqs; i++) {
1850 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1851 ISCSI_INITIAL_SN;
1852 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1853 ISCSI_INITIAL_SN;
1854 }
1855
1856 ictx->xstorm_ag_context.cdu_reserved =
1857 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1858 ISCSI_CONNECTION_TYPE);
1859 ictx->ustorm_ag_context.cdu_usage =
1860 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1861 ISCSI_CONNECTION_TYPE);
1862 return 0;
1863
1864}
1865
1866static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1867 u32 num, int *work)
1868{
1869 struct iscsi_kwqe_conn_offload1 *req1;
1870 struct iscsi_kwqe_conn_offload2 *req2;
1871 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001872 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chanfdf24082010-10-13 14:06:47 +00001873 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001874 struct iscsi_kcqe kcqe;
1875 struct kcqe *cqes[1];
1876 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001877 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001878
1879 if (num < 2) {
1880 *work = num;
1881 return -EINVAL;
1882 }
1883
1884 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1885 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1886 if ((num - 2) < req2->num_additional_wqes) {
1887 *work = num;
1888 return -EINVAL;
1889 }
Joe Perches779bb412010-11-14 17:04:37 +00001890 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001891
1892 l5_cid = req1->iscsi_conn_id;
1893 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1894 return -EINVAL;
1895
1896 memset(&kcqe, 0, sizeof(kcqe));
1897 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1898 kcqe.iscsi_conn_id = l5_cid;
1899 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1900
Michael Chanfdf24082010-10-13 14:06:47 +00001901 ctx = &cp->ctx_tbl[l5_cid];
1902 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1903 kcqe.completion_status =
1904 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1905 goto done;
1906 }
1907
Michael Chan71034ba2009-10-10 13:46:59 +00001908 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1909 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001910 goto done;
1911 }
1912 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1913 if (ret) {
1914 atomic_dec(&cp->iscsi_conn);
1915 ret = 0;
1916 goto done;
1917 }
1918 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1919 if (ret < 0) {
1920 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1921 atomic_dec(&cp->iscsi_conn);
1922 goto done;
1923 }
1924
1925 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chan5e65789f2013-09-02 11:42:29 -07001926 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001927
1928done:
1929 cqes[0] = (struct kcqe *) &kcqe;
1930 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
Michael Chan23021c22012-01-04 12:12:28 +00001931 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001932}
1933
1934
1935static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1936{
1937 struct cnic_local *cp = dev->cnic_priv;
1938 struct iscsi_kwqe_conn_update *req =
1939 (struct iscsi_kwqe_conn_update *) kwqe;
1940 void *data;
1941 union l5cm_specific_data l5_data;
1942 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1943 int ret;
1944
1945 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1946 return -EINVAL;
1947
1948 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1949 if (!data)
1950 return -ENOMEM;
1951
1952 memcpy(data, kwqe, sizeof(struct kwqe));
1953
1954 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1955 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1956 return ret;
1957}
1958
Michael Chana2c9e762010-10-13 14:06:46 +00001959static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001960{
1961 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07001962 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00001963 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001964 union l5cm_specific_data l5_data;
1965 int ret;
Michael Chan68d7c1a2011-01-05 15:14:13 +00001966 u32 hw_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001967
Michael Chan71034ba2009-10-10 13:46:59 +00001968 init_waitqueue_head(&ctx->waitq);
1969 ctx->wait_cond = 0;
1970 memset(&l5_data, 0, sizeof(l5_data));
Michael Chan5e65789f2013-09-02 11:42:29 -07001971 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001972
1973 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00001974 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001975
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001976 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00001977 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001978 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1979 return -EBUSY;
1980 }
Michael Chan71034ba2009-10-10 13:46:59 +00001981
Michael Chandcc7e3a2011-08-26 09:45:40 +00001982 return 0;
Michael Chana2c9e762010-10-13 14:06:46 +00001983}
1984
1985static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1986{
1987 struct cnic_local *cp = dev->cnic_priv;
1988 struct iscsi_kwqe_conn_destroy *req =
1989 (struct iscsi_kwqe_conn_destroy *) kwqe;
1990 u32 l5_cid = req->reserved0;
1991 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1992 int ret = 0;
1993 struct iscsi_kcqe kcqe;
1994 struct kcqe *cqes[1];
1995
1996 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1997 goto skip_cfc_delete;
1998
Michael Chanfdf24082010-10-13 14:06:47 +00001999 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2000 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2001
2002 if (delta > (2 * HZ))
2003 delta = 0;
2004
2005 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2006 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2007 goto destroy_reply;
2008 }
Michael Chana2c9e762010-10-13 14:06:46 +00002009
2010 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2011
Michael Chan71034ba2009-10-10 13:46:59 +00002012skip_cfc_delete:
2013 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2014
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002015 if (!ret) {
2016 atomic_dec(&cp->iscsi_conn);
2017 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2018 }
Michael Chan71034ba2009-10-10 13:46:59 +00002019
Michael Chanfdf24082010-10-13 14:06:47 +00002020destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00002021 memset(&kcqe, 0, sizeof(kcqe));
2022 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2023 kcqe.iscsi_conn_id = l5_cid;
2024 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2025 kcqe.iscsi_conn_context_id = req->context_id;
2026
2027 cqes[0] = (struct kcqe *) &kcqe;
2028 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2029
Michael Chan23021c22012-01-04 12:12:28 +00002030 return 0;
Michael Chan71034ba2009-10-10 13:46:59 +00002031}
2032
2033static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2034 struct l4_kwq_connect_req1 *kwqe1,
2035 struct l4_kwq_connect_req3 *kwqe3,
2036 struct l5cm_active_conn_buffer *conn_buf)
2037{
2038 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2039 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2040 &conn_buf->xstorm_conn_buffer;
2041 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2042 &conn_buf->tstorm_conn_buffer;
2043 struct regpair context_addr;
2044 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2045 struct in6_addr src_ip, dst_ip;
2046 int i;
2047 u32 *addrp;
2048
2049 addrp = (u32 *) &conn_addr->local_ip_addr;
2050 for (i = 0; i < 4; i++, addrp++)
2051 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2052
2053 addrp = (u32 *) &conn_addr->remote_ip_addr;
2054 for (i = 0; i < 4; i++, addrp++)
2055 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2056
2057 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2058
2059 xstorm_buf->context_addr.hi = context_addr.hi;
2060 xstorm_buf->context_addr.lo = context_addr.lo;
2061 xstorm_buf->mss = 0xffff;
2062 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2063 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2064 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2065 xstorm_buf->pseudo_header_checksum =
2066 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2067
Michael Chan71034ba2009-10-10 13:46:59 +00002068 if (kwqe3->ka_timeout) {
2069 tstorm_buf->ka_enable = 1;
2070 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2071 tstorm_buf->ka_interval = kwqe3->ka_interval;
2072 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2073 }
Michael Chan71034ba2009-10-10 13:46:59 +00002074 tstorm_buf->max_rt_time = 0xffffffff;
2075}
2076
2077static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2078{
Michael Chan68c64d22012-12-06 10:33:11 +00002079 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07002080 u32 pfid = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00002081 u8 *mac = dev->mac_addr;
2082
2083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002084 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00002085 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002086 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002087 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002088 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00002089 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002090 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002091 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002092 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00002093 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002094 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002095
2096 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002097 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00002098 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002099 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002100 mac[4]);
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002102 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00002103 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002104 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002105 mac[2]);
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002107 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00002108 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002109 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00002110 mac[0]);
2111}
2112
Michael Chan71034ba2009-10-10 13:46:59 +00002113static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2114 u32 num, int *work)
2115{
2116 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00002117 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00002118 struct l4_kwq_connect_req1 *kwqe1 =
2119 (struct l4_kwq_connect_req1 *) wqes[0];
2120 struct l4_kwq_connect_req3 *kwqe3;
2121 struct l5cm_active_conn_buffer *conn_buf;
2122 struct l5cm_conn_addr_params *conn_addr;
2123 union l5cm_specific_data l5_data;
2124 u32 l5_cid = kwqe1->pg_cid;
2125 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2126 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2127 int ret;
2128
2129 if (num < 2) {
2130 *work = num;
2131 return -EINVAL;
2132 }
2133
2134 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2135 *work = 3;
2136 else
2137 *work = 2;
2138
2139 if (num < *work) {
2140 *work = num;
2141 return -EINVAL;
2142 }
2143
2144 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002145 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002146 return -ENOMEM;
2147 }
2148 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2149 if (!conn_buf)
2150 return -ENOMEM;
2151
2152 memset(conn_buf, 0, sizeof(*conn_buf));
2153
2154 conn_addr = &conn_buf->conn_addr_buf;
2155 conn_addr->remote_addr_0 = csk->ha[0];
2156 conn_addr->remote_addr_1 = csk->ha[1];
2157 conn_addr->remote_addr_2 = csk->ha[2];
2158 conn_addr->remote_addr_3 = csk->ha[3];
2159 conn_addr->remote_addr_4 = csk->ha[4];
2160 conn_addr->remote_addr_5 = csk->ha[5];
2161
2162 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2163 struct l4_kwq_connect_req2 *kwqe2 =
2164 (struct l4_kwq_connect_req2 *) wqes[1];
2165
2166 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2167 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2168 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2169
2170 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2171 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2172 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2173 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2174 }
2175 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2176
2177 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2178 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2179 conn_addr->local_tcp_port = kwqe1->src_port;
2180 conn_addr->remote_tcp_port = kwqe1->dst_port;
2181
2182 conn_addr->pmtu = kwqe3->pmtu;
2183 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2184
2185 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07002186 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002187
Michael Chan71034ba2009-10-10 13:46:59 +00002188 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2189 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2190 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002191 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002192
2193 return ret;
2194}
2195
2196static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2197{
2198 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2199 union l5cm_specific_data l5_data;
2200 int ret;
2201
2202 memset(&l5_data, 0, sizeof(l5_data));
2203 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2204 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2205 return ret;
2206}
2207
2208static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2209{
2210 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2211 union l5cm_specific_data l5_data;
2212 int ret;
2213
2214 memset(&l5_data, 0, sizeof(l5_data));
2215 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2216 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2217 return ret;
2218}
2219static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2220{
2221 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2222 struct l4_kcq kcqe;
2223 struct kcqe *cqes[1];
2224
2225 memset(&kcqe, 0, sizeof(kcqe));
2226 kcqe.pg_host_opaque = req->host_opaque;
2227 kcqe.pg_cid = req->host_opaque;
2228 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2229 cqes[0] = (struct kcqe *) &kcqe;
2230 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2231 return 0;
2232}
2233
2234static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2235{
2236 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2237 struct l4_kcq kcqe;
2238 struct kcqe *cqes[1];
2239
2240 memset(&kcqe, 0, sizeof(kcqe));
2241 kcqe.pg_host_opaque = req->pg_host_opaque;
2242 kcqe.pg_cid = req->pg_cid;
2243 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2244 cqes[0] = (struct kcqe *) &kcqe;
2245 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2246 return 0;
2247}
2248
Michael Chane1928c82010-12-23 07:43:04 +00002249static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2250{
2251 struct fcoe_kwqe_stat *req;
2252 struct fcoe_stat_ramrod_params *fcoe_stat;
2253 union l5cm_specific_data l5_data;
2254 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002255 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002256 int ret;
2257 u32 cid;
2258
2259 req = (struct fcoe_kwqe_stat *) kwqe;
Michael Chan5e65789f2013-09-02 11:42:29 -07002260 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Michael Chane1928c82010-12-23 07:43:04 +00002261
2262 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2263 if (!fcoe_stat)
2264 return -ENOMEM;
2265
2266 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2267 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2268
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002269 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002270 FCOE_CONNECTION_TYPE, &l5_data);
2271 return ret;
2272}
2273
2274static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2275 u32 num, int *work)
2276{
2277 int ret;
2278 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002279 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002280 u32 cid;
2281 struct fcoe_init_ramrod_params *fcoe_init;
2282 struct fcoe_kwqe_init1 *req1;
2283 struct fcoe_kwqe_init2 *req2;
2284 struct fcoe_kwqe_init3 *req3;
2285 union l5cm_specific_data l5_data;
2286
2287 if (num < 3) {
2288 *work = num;
2289 return -EINVAL;
2290 }
2291 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2292 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2293 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2294 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2295 *work = 1;
2296 return -EINVAL;
2297 }
2298 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2299 *work = 2;
2300 return -EINVAL;
2301 }
2302
2303 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2304 netdev_err(dev->netdev, "fcoe_init size too big\n");
2305 return -ENOMEM;
2306 }
2307 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2308 if (!fcoe_init)
2309 return -ENOMEM;
2310
2311 memset(fcoe_init, 0, sizeof(*fcoe_init));
2312 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2313 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2314 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002315 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2316 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2317 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
Michael Chane1928c82010-12-23 07:43:04 +00002318
2319 fcoe_init->sb_num = cp->status_blk_num;
2320 fcoe_init->eq_prod = MAX_KCQ_IDX;
2321 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2322 cp->kcq2.sw_prod_idx = 0;
2323
Michael Chan5e65789f2013-09-02 11:42:29 -07002324 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002325 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002326 FCOE_CONNECTION_TYPE, &l5_data);
2327 *work = 3;
2328 return ret;
2329}
2330
2331static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2332 u32 num, int *work)
2333{
2334 int ret = 0;
2335 u32 cid = -1, l5_cid;
2336 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002337 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002338 struct fcoe_kwqe_conn_offload1 *req1;
2339 struct fcoe_kwqe_conn_offload2 *req2;
2340 struct fcoe_kwqe_conn_offload3 *req3;
2341 struct fcoe_kwqe_conn_offload4 *req4;
2342 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2343 struct cnic_context *ctx;
2344 struct fcoe_context *fctx;
2345 struct regpair ctx_addr;
2346 union l5cm_specific_data l5_data;
2347 struct fcoe_kcqe kcqe;
2348 struct kcqe *cqes[1];
2349
2350 if (num < 4) {
2351 *work = num;
2352 return -EINVAL;
2353 }
2354 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2355 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2356 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2357 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2358
2359 *work = 4;
2360
2361 l5_cid = req1->fcoe_conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002362 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002363 goto err_reply;
2364
2365 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2366
2367 ctx = &cp->ctx_tbl[l5_cid];
2368 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2369 goto err_reply;
2370
2371 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2372 if (ret) {
2373 ret = 0;
2374 goto err_reply;
2375 }
2376 cid = ctx->cid;
2377
2378 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2379 if (fctx) {
Michael Chan5e65789f2013-09-02 11:42:29 -07002380 u32 hw_cid = BNX2X_HW_CID(bp, cid);
Michael Chane1928c82010-12-23 07:43:04 +00002381 u32 val;
2382
2383 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2384 FCOE_CONNECTION_TYPE);
2385 fctx->xstorm_ag_context.cdu_reserved = val;
2386 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2387 FCOE_CONNECTION_TYPE);
2388 fctx->ustorm_ag_context.cdu_usage = val;
2389 }
2390 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2391 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2392 goto err_reply;
2393 }
2394 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2395 if (!fcoe_offload)
2396 goto err_reply;
2397
2398 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2399 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2400 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2401 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2402 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2403
Michael Chan5e65789f2013-09-02 11:42:29 -07002404 cid = BNX2X_HW_CID(bp, cid);
Michael Chane1928c82010-12-23 07:43:04 +00002405 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2406 FCOE_CONNECTION_TYPE, &l5_data);
2407 if (!ret)
2408 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2409
2410 return ret;
2411
2412err_reply:
2413 if (cid != -1)
2414 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2415
2416 memset(&kcqe, 0, sizeof(kcqe));
2417 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2418 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2419 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2420
2421 cqes[0] = (struct kcqe *) &kcqe;
2422 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2423 return ret;
2424}
2425
2426static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2427{
2428 struct fcoe_kwqe_conn_enable_disable *req;
2429 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2430 union l5cm_specific_data l5_data;
2431 int ret;
2432 u32 cid, l5_cid;
2433 struct cnic_local *cp = dev->cnic_priv;
2434
2435 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2436 cid = req->context_id;
2437 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2438
2439 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2440 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2441 return -ENOMEM;
2442 }
2443 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2444 if (!fcoe_enable)
2445 return -ENOMEM;
2446
2447 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2448 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2449 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2450 FCOE_CONNECTION_TYPE, &l5_data);
2451 return ret;
2452}
2453
2454static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2455{
2456 struct fcoe_kwqe_conn_enable_disable *req;
2457 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2458 union l5cm_specific_data l5_data;
2459 int ret;
2460 u32 cid, l5_cid;
2461 struct cnic_local *cp = dev->cnic_priv;
2462
2463 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2464 cid = req->context_id;
2465 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002466 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002467 return -EINVAL;
2468
2469 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2470
2471 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2472 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2473 return -ENOMEM;
2474 }
2475 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2476 if (!fcoe_disable)
2477 return -ENOMEM;
2478
2479 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2480 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2481 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2482 FCOE_CONNECTION_TYPE, &l5_data);
2483 return ret;
2484}
2485
2486static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2487{
2488 struct fcoe_kwqe_conn_destroy *req;
2489 union l5cm_specific_data l5_data;
2490 int ret;
2491 u32 cid, l5_cid;
2492 struct cnic_local *cp = dev->cnic_priv;
2493 struct cnic_context *ctx;
2494 struct fcoe_kcqe kcqe;
2495 struct kcqe *cqes[1];
2496
2497 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2498 cid = req->context_id;
2499 l5_cid = req->conn_id;
Michael Chandc219a22011-08-26 09:45:39 +00002500 if (l5_cid >= dev->max_fcoe_conn)
Michael Chane1928c82010-12-23 07:43:04 +00002501 return -EINVAL;
2502
2503 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2504
2505 ctx = &cp->ctx_tbl[l5_cid];
2506
2507 init_waitqueue_head(&ctx->waitq);
2508 ctx->wait_cond = 0;
2509
Michael Chandcc7e3a2011-08-26 09:45:40 +00002510 memset(&kcqe, 0, sizeof(kcqe));
2511 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
Michael Chane1928c82010-12-23 07:43:04 +00002512 memset(&l5_data, 0, sizeof(l5_data));
2513 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2514 FCOE_CONNECTION_TYPE, &l5_data);
2515 if (ret == 0) {
Michael Chandcc7e3a2011-08-26 09:45:40 +00002516 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2517 if (ctx->wait_cond)
2518 kcqe.completion_status = 0;
Michael Chane1928c82010-12-23 07:43:04 +00002519 }
2520
Michael Chandcc7e3a2011-08-26 09:45:40 +00002521 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2522 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2523
Michael Chane1928c82010-12-23 07:43:04 +00002524 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2525 kcqe.fcoe_conn_id = req->conn_id;
2526 kcqe.fcoe_conn_context_id = cid;
2527
2528 cqes[0] = (struct kcqe *) &kcqe;
2529 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2530 return ret;
2531}
2532
Michael Chan74e49bb2011-07-20 14:55:23 +00002533static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2534{
2535 struct cnic_local *cp = dev->cnic_priv;
2536 u32 i;
2537
2538 for (i = start_cid; i < cp->max_cid_space; i++) {
2539 struct cnic_context *ctx = &cp->ctx_tbl[i];
2540 int j;
2541
2542 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2543 msleep(10);
2544
2545 for (j = 0; j < 5; j++) {
2546 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2547 break;
2548 msleep(20);
2549 }
2550
2551 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2552 netdev_warn(dev->netdev, "CID %x not deleted\n",
2553 ctx->cid);
2554 }
2555}
2556
Michael Chane1928c82010-12-23 07:43:04 +00002557static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2558{
2559 struct fcoe_kwqe_destroy *req;
2560 union l5cm_specific_data l5_data;
2561 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5e65789f2013-09-02 11:42:29 -07002562 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002563 int ret;
2564 u32 cid;
2565
Michael Chan74e49bb2011-07-20 14:55:23 +00002566 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2567
Michael Chane1928c82010-12-23 07:43:04 +00002568 req = (struct fcoe_kwqe_destroy *) kwqe;
Michael Chan5e65789f2013-09-02 11:42:29 -07002569 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
Michael Chane1928c82010-12-23 07:43:04 +00002570
2571 memset(&l5_data, 0, sizeof(l5_data));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002572 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
Michael Chane1928c82010-12-23 07:43:04 +00002573 FCOE_CONNECTION_TYPE, &l5_data);
2574 return ret;
2575}
2576
Michael Chan23021c22012-01-04 12:12:28 +00002577static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2578{
2579 struct cnic_local *cp = dev->cnic_priv;
2580 struct kcqe kcqe;
2581 struct kcqe *cqes[1];
2582 u32 cid;
2583 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2584 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
Michael Chan3238a9b2012-02-05 15:24:40 +00002585 u32 kcqe_op;
Michael Chan23021c22012-01-04 12:12:28 +00002586 int ulp_type;
2587
2588 cid = kwqe->kwqe_info0;
2589 memset(&kcqe, 0, sizeof(kcqe));
2590
Michael Chan3238a9b2012-02-05 15:24:40 +00002591 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2592 u32 l5_cid = 0;
2593
2594 ulp_type = CNIC_ULP_FCOE;
2595 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2596 struct fcoe_kwqe_conn_enable_disable *req;
2597
2598 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2599 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2600 cid = req->context_id;
2601 l5_cid = req->conn_id;
2602 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2603 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2604 } else {
2605 return;
2606 }
2607 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2608 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
Michael Chan8ec3e702012-03-21 15:38:34 +00002609 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan3238a9b2012-02-05 15:24:40 +00002610 kcqe.kcqe_info2 = cid;
2611 kcqe.kcqe_info0 = l5_cid;
2612
2613 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
Michael Chan23021c22012-01-04 12:12:28 +00002614 ulp_type = CNIC_ULP_ISCSI;
2615 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2616 cid = kwqe->kwqe_info1;
2617
2618 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2619 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
Michael Chan8ec3e702012-03-21 15:38:34 +00002620 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
Michael Chan23021c22012-01-04 12:12:28 +00002621 kcqe.kcqe_info2 = cid;
2622 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2623
2624 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2625 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002626
2627 ulp_type = CNIC_ULP_L4;
2628 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2629 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2630 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2631 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2632 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2633 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2634 else
2635 return;
2636
2637 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2638 KCQE_FLAGS_LAYER_MASK_L4;
Michael Chan8ec3e702012-03-21 15:38:34 +00002639 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
Michael Chan23021c22012-01-04 12:12:28 +00002640 l4kcqe->cid = cid;
2641 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2642 } else {
2643 return;
2644 }
2645
Joe Perches64699332012-06-04 12:44:16 +00002646 cqes[0] = &kcqe;
Michael Chan23021c22012-01-04 12:12:28 +00002647 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2648}
2649
Michael Chane1928c82010-12-23 07:43:04 +00002650static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2651 struct kwqe *wqes[], u32 num_wqes)
Michael Chan71034ba2009-10-10 13:46:59 +00002652{
2653 int i, work, ret;
2654 u32 opcode;
2655 struct kwqe *kwqe;
2656
2657 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2658 return -EAGAIN; /* bnx2 is down */
2659
2660 for (i = 0; i < num_wqes; ) {
2661 kwqe = wqes[i];
2662 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2663 work = 1;
2664
2665 switch (opcode) {
2666 case ISCSI_KWQE_OPCODE_INIT1:
2667 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2668 break;
2669 case ISCSI_KWQE_OPCODE_INIT2:
2670 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2671 break;
2672 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2673 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2674 num_wqes - i, &work);
2675 break;
2676 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2677 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2678 break;
2679 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2680 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2681 break;
2682 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2683 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2684 &work);
2685 break;
2686 case L4_KWQE_OPCODE_VALUE_CLOSE:
2687 ret = cnic_bnx2x_close(dev, kwqe);
2688 break;
2689 case L4_KWQE_OPCODE_VALUE_RESET:
2690 ret = cnic_bnx2x_reset(dev, kwqe);
2691 break;
2692 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2693 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2694 break;
2695 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2696 ret = cnic_bnx2x_update_pg(dev, kwqe);
2697 break;
2698 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2699 ret = 0;
2700 break;
2701 default:
2702 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002703 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2704 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002705 break;
2706 }
Michael Chan23021c22012-01-04 12:12:28 +00002707 if (ret < 0) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002708 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2709 opcode);
Michael Chan23021c22012-01-04 12:12:28 +00002710
2711 /* Possibly bnx2x parity error, send completion
2712 * to ulp drivers with error code to speed up
2713 * cleanup and reset recovery.
2714 */
2715 if (ret == -EIO || ret == -EAGAIN)
2716 cnic_bnx2x_kwqe_err(dev, kwqe);
2717 }
Michael Chan71034ba2009-10-10 13:46:59 +00002718 i += work;
2719 }
2720 return 0;
2721}
2722
Michael Chane1928c82010-12-23 07:43:04 +00002723static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2724 struct kwqe *wqes[], u32 num_wqes)
2725{
Michael Chan104a43e2013-09-02 11:42:28 -07002726 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chane1928c82010-12-23 07:43:04 +00002727 int i, work, ret;
2728 u32 opcode;
2729 struct kwqe *kwqe;
2730
2731 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2732 return -EAGAIN; /* bnx2 is down */
2733
Michael Chan104a43e2013-09-02 11:42:28 -07002734 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
Michael Chane1928c82010-12-23 07:43:04 +00002735 return -EINVAL;
2736
2737 for (i = 0; i < num_wqes; ) {
2738 kwqe = wqes[i];
2739 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2740 work = 1;
2741
2742 switch (opcode) {
2743 case FCOE_KWQE_OPCODE_INIT1:
2744 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2745 num_wqes - i, &work);
2746 break;
2747 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2748 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2749 num_wqes - i, &work);
2750 break;
2751 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2752 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2753 break;
2754 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2755 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2756 break;
2757 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2758 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2759 break;
2760 case FCOE_KWQE_OPCODE_DESTROY:
2761 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2762 break;
2763 case FCOE_KWQE_OPCODE_STAT:
2764 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2765 break;
2766 default:
2767 ret = 0;
2768 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2769 opcode);
2770 break;
2771 }
Michael Chan3238a9b2012-02-05 15:24:40 +00002772 if (ret < 0) {
Michael Chane1928c82010-12-23 07:43:04 +00002773 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2774 opcode);
Michael Chan3238a9b2012-02-05 15:24:40 +00002775
2776 /* Possibly bnx2x parity error, send completion
2777 * to ulp drivers with error code to speed up
2778 * cleanup and reset recovery.
2779 */
2780 if (ret == -EIO || ret == -EAGAIN)
2781 cnic_bnx2x_kwqe_err(dev, kwqe);
2782 }
Michael Chane1928c82010-12-23 07:43:04 +00002783 i += work;
2784 }
2785 return 0;
2786}
2787
2788static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2789 u32 num_wqes)
2790{
2791 int ret = -EINVAL;
2792 u32 layer_code;
2793
2794 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2795 return -EAGAIN; /* bnx2x is down */
2796
2797 if (!num_wqes)
2798 return 0;
2799
2800 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2801 switch (layer_code) {
2802 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2803 case KWQE_FLAGS_LAYER_MASK_L4:
2804 case KWQE_FLAGS_LAYER_MASK_L2:
2805 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2806 break;
2807
2808 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2809 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2810 break;
2811 }
2812 return ret;
2813}
2814
2815static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2816{
2817 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2818 return KCQE_FLAGS_LAYER_MASK_L4;
2819
2820 return opflag & KCQE_FLAGS_LAYER_MASK;
2821}
2822
Michael Chana4636962009-06-08 18:14:43 -07002823static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2824{
2825 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002826 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002827
2828 i = 0;
2829 j = 1;
2830 while (num_cqes) {
2831 struct cnic_ulp_ops *ulp_ops;
2832 int ulp_type;
2833 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
Michael Chane1928c82010-12-23 07:43:04 +00002834 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002835
2836 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002837 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002838
2839 while (j < num_cqes) {
2840 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2841
Michael Chane1928c82010-12-23 07:43:04 +00002842 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
Michael Chana4636962009-06-08 18:14:43 -07002843 break;
2844
2845 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002846 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002847 j++;
2848 }
2849
2850 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2851 ulp_type = CNIC_ULP_RDMA;
2852 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2853 ulp_type = CNIC_ULP_ISCSI;
Michael Chane1928c82010-12-23 07:43:04 +00002854 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2855 ulp_type = CNIC_ULP_FCOE;
Michael Chana4636962009-06-08 18:14:43 -07002856 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2857 ulp_type = CNIC_ULP_L4;
2858 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2859 goto end;
2860 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002861 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2862 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002863 goto end;
2864 }
2865
2866 rcu_read_lock();
2867 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2868 if (likely(ulp_ops)) {
2869 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2870 cp->completed_kcq + i, j);
2871 }
2872 rcu_read_unlock();
2873end:
2874 num_cqes -= j;
2875 i += j;
2876 j = 1;
2877 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002878 if (unlikely(comp))
2879 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002880}
2881
Michael Chan644b9d42010-06-24 14:58:40 +00002882static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002883{
2884 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002885 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002886 struct kcqe *kcqe;
2887 int kcqe_cnt = 0, last_cnt = 0;
2888
Michael Chan644b9d42010-06-24 14:58:40 +00002889 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002890 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002891 hw_prod = *info->hw_prod_idx_ptr;
Michael Chan59e51372011-06-14 01:32:38 +00002892 hw_prod = info->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002893
2894 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002895 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002896 cp->completed_kcq[kcqe_cnt++] = kcqe;
Michael Chan59e51372011-06-14 01:32:38 +00002897 i = info->next_idx(i);
Michael Chana4636962009-06-08 18:14:43 -07002898 ri = i & MAX_KCQ_IDX;
2899 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2900 last_cnt = kcqe_cnt;
2901 last = i;
2902 }
2903 }
2904
Michael Chan644b9d42010-06-24 14:58:40 +00002905 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002906 return last_cnt;
2907}
2908
Michael Chan48f753d2010-05-18 11:32:53 +00002909static int cnic_l2_completion(struct cnic_local *cp)
2910{
2911 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002912 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002913 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chanbe1fefc2014-03-17 19:19:07 -08002914 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002915 u32 cmd;
2916 int comp = 0;
2917
2918 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2919 return 0;
2920
2921 hw_cons = *cp->rx_cons_ptr;
2922 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2923 hw_cons++;
2924
2925 sw_cons = cp->rx_cons;
2926 while (sw_cons != hw_cons) {
2927 u8 cqe_fp_flags;
2928
2929 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2930 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2931 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2932 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2933 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2934 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2935 cmd == RAMROD_CMD_ID_ETH_HALT)
2936 comp++;
2937 }
2938 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2939 }
2940 return comp;
2941}
2942
Michael Chan86b53602009-10-10 13:46:57 +00002943static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002944{
Michael Chan541a7812010-10-06 03:17:22 +00002945 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002946 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002947
Michael Chan541a7812010-10-06 03:17:22 +00002948 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002949 return;
2950
Michael Chan541a7812010-10-06 03:17:22 +00002951 rx_cons = *cp->rx_cons_ptr;
2952 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002953 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002954 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2955 comp = cnic_l2_completion(cp);
2956
Michael Chana4636962009-06-08 18:14:43 -07002957 cp->tx_cons = tx_cons;
2958 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002959
Michael Chancd801532010-10-13 14:06:49 +00002960 if (cp->udev)
2961 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002962 }
Michael Chan48f753d2010-05-18 11:32:53 +00002963 if (comp)
2964 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002965}
2966
Michael Chanb177a5d52010-06-24 14:58:41 +00002967static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002968{
Michael Chana4636962009-06-08 18:14:43 -07002969 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002970 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002971 int kcqe_cnt;
2972
Michael Chan107c3f42011-03-02 13:00:49 +00002973 /* status block index must be read before reading other fields */
2974 rmb();
Michael Chana4636962009-06-08 18:14:43 -07002975 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2976
Michael Chan644b9d42010-06-24 14:58:40 +00002977 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002978
2979 service_kcqes(dev, kcqe_cnt);
2980
2981 /* Tell compiler that status_blk fields can change. */
2982 barrier();
Michael Chan93736652011-06-08 19:29:32 +00002983 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2984 /* status block index must be read first */
2985 rmb();
2986 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002987 }
2988
Michael Chan644b9d42010-06-24 14:58:40 +00002989 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002990
Michael Chan86b53602009-10-10 13:46:57 +00002991 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002992
Michael Chana4636962009-06-08 18:14:43 -07002993 return status_idx;
2994}
2995
Michael Chanb177a5d52010-06-24 14:58:41 +00002996static int cnic_service_bnx2(void *data, void *status_blk)
2997{
2998 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002999
Michael Chaneaaa6e92010-12-23 08:38:30 +00003000 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3001 struct status_block *sblk = status_blk;
3002
3003 return sblk->status_idx;
3004 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003005
3006 return cnic_service_bnx2_queues(dev);
3007}
3008
Michael Chana4636962009-06-08 18:14:43 -07003009static void cnic_service_bnx2_msix(unsigned long data)
3010{
3011 struct cnic_dev *dev = (struct cnic_dev *) data;
3012 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003013
Michael Chanb177a5d52010-06-24 14:58:41 +00003014 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07003015
Michael Chana4636962009-06-08 18:14:43 -07003016 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3017 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3018}
3019
Michael Chan66fee9e2010-06-24 14:58:38 +00003020static void cnic_doirq(struct cnic_dev *dev)
3021{
3022 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00003023
3024 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00003025 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3026
Michael Chan66fee9e2010-06-24 14:58:38 +00003027 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00003028 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00003029
3030 tasklet_schedule(&cp->cnic_irq_task);
3031 }
3032}
3033
Michael Chana4636962009-06-08 18:14:43 -07003034static irqreturn_t cnic_irq(int irq, void *dev_instance)
3035{
3036 struct cnic_dev *dev = dev_instance;
3037 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003038
3039 if (cp->ack_int)
3040 cp->ack_int(dev);
3041
Michael Chan66fee9e2010-06-24 14:58:38 +00003042 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07003043
3044 return IRQ_HANDLED;
3045}
3046
Michael Chan71034ba2009-10-10 13:46:59 +00003047static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3048 u16 index, u8 op, u8 update)
3049{
Michael Chan5bf945a2013-09-02 11:42:30 -07003050 struct bnx2x *bp = netdev_priv(dev->netdev);
3051 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
Michael Chan71034ba2009-10-10 13:46:59 +00003052 COMMAND_REG_INT_ACK);
3053 struct igu_ack_register igu_ack;
3054
3055 igu_ack.status_block_index = index;
3056 igu_ack.sb_id_and_flags =
3057 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3058 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3059 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3060 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3061
3062 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3063}
3064
Michael Chanee87a822010-10-13 14:06:51 +00003065static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3066 u16 index, u8 op, u8 update)
3067{
3068 struct igu_regular cmd_data;
3069 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3070
3071 cmd_data.sb_id_and_flags =
3072 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3073 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3074 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3075 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3076
3077
3078 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3079}
3080
Michael Chan71034ba2009-10-10 13:46:59 +00003081static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3082{
3083 struct cnic_local *cp = dev->cnic_priv;
3084
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003085 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00003086 IGU_INT_DISABLE, 0);
3087}
3088
Michael Chanee87a822010-10-13 14:06:51 +00003089static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3090{
3091 struct cnic_local *cp = dev->cnic_priv;
3092
3093 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3094 IGU_INT_DISABLE, 0);
3095}
3096
Michael Chan8cc0e022012-09-08 06:01:03 +00003097static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3098{
3099 struct cnic_local *cp = dev->cnic_priv;
3100
3101 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3102 IGU_INT_ENABLE, 1);
3103}
3104
3105static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3106{
3107 struct cnic_local *cp = dev->cnic_priv;
3108
3109 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3110 IGU_INT_ENABLE, 1);
3111}
3112
Michael Chanb177a5d52010-06-24 14:58:41 +00003113static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00003114{
Michael Chanb177a5d52010-06-24 14:58:41 +00003115 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00003116 int kcqe_cnt;
3117
Michael Chan107c3f42011-03-02 13:00:49 +00003118 /* status block index must be read before reading the KCQ */
3119 rmb();
Michael Chanb177a5d52010-06-24 14:58:41 +00003120 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00003121
3122 service_kcqes(dev, kcqe_cnt);
3123
3124 /* Tell compiler that sblk fields can change. */
3125 barrier();
Michael Chan71034ba2009-10-10 13:46:59 +00003126
Michael Chanb177a5d52010-06-24 14:58:41 +00003127 last_status = *info->status_idx_ptr;
Michael Chan107c3f42011-03-02 13:00:49 +00003128 /* status block index must be read before reading the KCQ */
3129 rmb();
Michael Chan71034ba2009-10-10 13:46:59 +00003130 }
Michael Chanb177a5d52010-06-24 14:58:41 +00003131 return last_status;
3132}
3133
3134static void cnic_service_bnx2x_bh(unsigned long data)
3135{
3136 struct cnic_dev *dev = (struct cnic_dev *) data;
3137 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48a30562013-09-18 01:50:39 -07003138 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan0197b082011-03-02 13:00:50 +00003139 u32 status_idx, new_status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00003140
3141 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3142 return;
3143
Michael Chan0197b082011-03-02 13:00:50 +00003144 while (1) {
3145 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00003146
Michael Chan0197b082011-03-02 13:00:50 +00003147 CNIC_WR16(dev, cp->kcq1.io_addr,
3148 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00003149
Michael Chan48a30562013-09-18 01:50:39 -07003150 if (!CNIC_SUPPORTS_FCOE(bp)) {
Michael Chan8cc0e022012-09-08 06:01:03 +00003151 cp->arm_int(dev, status_idx);
Michael Chan0197b082011-03-02 13:00:50 +00003152 break;
3153 }
3154
3155 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3156
3157 if (new_status_idx != status_idx)
3158 continue;
Michael Chane21ba412010-12-23 07:43:03 +00003159
3160 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3161 MAX_KCQ_IDX);
3162
Michael Chanee87a822010-10-13 14:06:51 +00003163 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3164 status_idx, IGU_INT_ENABLE, 1);
Michael Chan0197b082011-03-02 13:00:50 +00003165
3166 break;
Michael Chane21ba412010-12-23 07:43:03 +00003167 }
Michael Chan71034ba2009-10-10 13:46:59 +00003168}
3169
3170static int cnic_service_bnx2x(void *data, void *status_blk)
3171{
3172 struct cnic_dev *dev = data;
3173 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00003174
Michael Chan66fee9e2010-06-24 14:58:38 +00003175 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3176 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00003177
Michael Chan66fee9e2010-06-24 14:58:38 +00003178 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003179
3180 return 0;
3181}
3182
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003183static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3184{
3185 struct cnic_ulp_ops *ulp_ops;
3186
3187 if (if_type == CNIC_ULP_ISCSI)
3188 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3189
3190 mutex_lock(&cnic_lock);
3191 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3192 lockdep_is_held(&cnic_lock));
3193 if (!ulp_ops) {
3194 mutex_unlock(&cnic_lock);
3195 return;
3196 }
3197 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3198 mutex_unlock(&cnic_lock);
3199
3200 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3201 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3202
3203 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3204}
3205
Michael Chana4636962009-06-08 18:14:43 -07003206static void cnic_ulp_stop(struct cnic_dev *dev)
3207{
3208 struct cnic_local *cp = dev->cnic_priv;
3209 int if_type;
3210
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003211 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3212 cnic_ulp_stop_one(cp, if_type);
Michael Chana4636962009-06-08 18:14:43 -07003213}
3214
3215static void cnic_ulp_start(struct cnic_dev *dev)
3216{
3217 struct cnic_local *cp = dev->cnic_priv;
3218 int if_type;
3219
Michael Chana4636962009-06-08 18:14:43 -07003220 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3221 struct cnic_ulp_ops *ulp_ops;
3222
Michael Chan681dbd72009-08-14 15:49:46 +00003223 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003224 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3225 lockdep_is_held(&cnic_lock));
Michael Chan681dbd72009-08-14 15:49:46 +00003226 if (!ulp_ops || !ulp_ops->cnic_start) {
3227 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003228 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00003229 }
3230 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3231 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003232
3233 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3234 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00003235
3236 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07003237 }
Michael Chana4636962009-06-08 18:14:43 -07003238}
3239
Barak Witkowski1d187b32011-12-05 22:41:50 +00003240static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3241{
3242 struct cnic_local *cp = dev->cnic_priv;
3243 struct cnic_ulp_ops *ulp_ops;
3244 int rc;
3245
3246 mutex_lock(&cnic_lock);
Michael Chanf7bd12d2014-03-17 19:19:06 -08003247 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3248 lockdep_is_held(&cnic_lock));
Barak Witkowski1d187b32011-12-05 22:41:50 +00003249 if (ulp_ops && ulp_ops->cnic_get_stats)
3250 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3251 else
3252 rc = -ENODEV;
3253 mutex_unlock(&cnic_lock);
3254 return rc;
3255}
3256
Michael Chana4636962009-06-08 18:14:43 -07003257static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3258{
3259 struct cnic_dev *dev = data;
Barak Witkowski1d187b32011-12-05 22:41:50 +00003260 int ulp_type = CNIC_ULP_ISCSI;
Michael Chana4636962009-06-08 18:14:43 -07003261
3262 switch (info->cmd) {
3263 case CNIC_CTL_STOP_CMD:
3264 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003265
3266 cnic_ulp_stop(dev);
3267 cnic_stop_hw(dev);
3268
Michael Chana4636962009-06-08 18:14:43 -07003269 cnic_put(dev);
3270 break;
3271 case CNIC_CTL_START_CMD:
3272 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003273
3274 if (!cnic_start_hw(dev))
3275 cnic_ulp_start(dev);
3276
Michael Chana4636962009-06-08 18:14:43 -07003277 cnic_put(dev);
3278 break;
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003279 case CNIC_CTL_STOP_ISCSI_CMD: {
3280 struct cnic_local *cp = dev->cnic_priv;
3281 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3282 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3283 break;
3284 }
Michael Chan71034ba2009-10-10 13:46:59 +00003285 case CNIC_CTL_COMPLETION_CMD: {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003286 struct cnic_ctl_completion *comp = &info->data.comp;
3287 u32 cid = BNX2X_SW_CID(comp->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00003288 u32 l5_cid;
3289 struct cnic_local *cp = dev->cnic_priv;
3290
Michael Chana2028b232012-06-27 15:08:19 +00003291 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3292 break;
3293
Michael Chan71034ba2009-10-10 13:46:59 +00003294 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3295 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3296
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003297 if (unlikely(comp->error)) {
3298 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3299 netdev_err(dev->netdev,
3300 "CID %x CFC delete comp error %x\n",
3301 cid, comp->error);
3302 }
3303
Michael Chan71034ba2009-10-10 13:46:59 +00003304 ctx->wait_cond = 1;
3305 wake_up(&ctx->waitq);
3306 }
3307 break;
3308 }
Barak Witkowski1d187b32011-12-05 22:41:50 +00003309 case CNIC_CTL_FCOE_STATS_GET_CMD:
3310 ulp_type = CNIC_ULP_FCOE;
3311 /* fall through */
3312 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3313 cnic_hold(dev);
3314 cnic_copy_ulp_stats(dev, ulp_type);
3315 cnic_put(dev);
3316 break;
3317
Michael Chana4636962009-06-08 18:14:43 -07003318 default:
3319 return -EINVAL;
3320 }
3321 return 0;
3322}
3323
3324static void cnic_ulp_init(struct cnic_dev *dev)
3325{
3326 int i;
3327 struct cnic_local *cp = dev->cnic_priv;
3328
Michael Chana4636962009-06-08 18:14:43 -07003329 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3330 struct cnic_ulp_ops *ulp_ops;
3331
Michael Chan7fc1ece2009-08-14 15:49:47 +00003332 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003333 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003334 if (!ulp_ops || !ulp_ops->cnic_init) {
3335 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003336 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003337 }
3338 ulp_get(ulp_ops);
3339 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003340
3341 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3342 ulp_ops->cnic_init(dev);
3343
Michael Chan7fc1ece2009-08-14 15:49:47 +00003344 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003345 }
Michael Chana4636962009-06-08 18:14:43 -07003346}
3347
3348static void cnic_ulp_exit(struct cnic_dev *dev)
3349{
3350 int i;
3351 struct cnic_local *cp = dev->cnic_priv;
3352
Michael Chana4636962009-06-08 18:14:43 -07003353 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3354 struct cnic_ulp_ops *ulp_ops;
3355
Michael Chan7fc1ece2009-08-14 15:49:47 +00003356 mutex_lock(&cnic_lock);
Eric Dumazet13707f92011-01-26 19:28:23 +00003357 ulp_ops = cnic_ulp_tbl_prot(i);
Michael Chan7fc1ece2009-08-14 15:49:47 +00003358 if (!ulp_ops || !ulp_ops->cnic_exit) {
3359 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003360 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003361 }
3362 ulp_get(ulp_ops);
3363 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003364
3365 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3366 ulp_ops->cnic_exit(dev);
3367
Michael Chan7fc1ece2009-08-14 15:49:47 +00003368 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003369 }
Michael Chana4636962009-06-08 18:14:43 -07003370}
3371
3372static int cnic_cm_offload_pg(struct cnic_sock *csk)
3373{
3374 struct cnic_dev *dev = csk->dev;
3375 struct l4_kwq_offload_pg *l4kwqe;
3376 struct kwqe *wqes[1];
3377
3378 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3379 memset(l4kwqe, 0, sizeof(*l4kwqe));
3380 wqes[0] = (struct kwqe *) l4kwqe;
3381
3382 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3383 l4kwqe->flags =
3384 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3385 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3386
3387 l4kwqe->da0 = csk->ha[0];
3388 l4kwqe->da1 = csk->ha[1];
3389 l4kwqe->da2 = csk->ha[2];
3390 l4kwqe->da3 = csk->ha[3];
3391 l4kwqe->da4 = csk->ha[4];
3392 l4kwqe->da5 = csk->ha[5];
3393
3394 l4kwqe->sa0 = dev->mac_addr[0];
3395 l4kwqe->sa1 = dev->mac_addr[1];
3396 l4kwqe->sa2 = dev->mac_addr[2];
3397 l4kwqe->sa3 = dev->mac_addr[3];
3398 l4kwqe->sa4 = dev->mac_addr[4];
3399 l4kwqe->sa5 = dev->mac_addr[5];
3400
3401 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00003402 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07003403 l4kwqe->host_opaque = csk->l5_cid;
3404
3405 if (csk->vlan_id) {
3406 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3407 l4kwqe->vlan_tag = csk->vlan_id;
3408 l4kwqe->l2hdr_nbytes += 4;
3409 }
3410
3411 return dev->submit_kwqes(dev, wqes, 1);
3412}
3413
3414static int cnic_cm_update_pg(struct cnic_sock *csk)
3415{
3416 struct cnic_dev *dev = csk->dev;
3417 struct l4_kwq_update_pg *l4kwqe;
3418 struct kwqe *wqes[1];
3419
3420 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3421 memset(l4kwqe, 0, sizeof(*l4kwqe));
3422 wqes[0] = (struct kwqe *) l4kwqe;
3423
3424 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3425 l4kwqe->flags =
3426 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3427 l4kwqe->pg_cid = csk->pg_cid;
3428
3429 l4kwqe->da0 = csk->ha[0];
3430 l4kwqe->da1 = csk->ha[1];
3431 l4kwqe->da2 = csk->ha[2];
3432 l4kwqe->da3 = csk->ha[3];
3433 l4kwqe->da4 = csk->ha[4];
3434 l4kwqe->da5 = csk->ha[5];
3435
3436 l4kwqe->pg_host_opaque = csk->l5_cid;
3437 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3438
3439 return dev->submit_kwqes(dev, wqes, 1);
3440}
3441
3442static int cnic_cm_upload_pg(struct cnic_sock *csk)
3443{
3444 struct cnic_dev *dev = csk->dev;
3445 struct l4_kwq_upload *l4kwqe;
3446 struct kwqe *wqes[1];
3447
3448 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3449 memset(l4kwqe, 0, sizeof(*l4kwqe));
3450 wqes[0] = (struct kwqe *) l4kwqe;
3451
3452 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3453 l4kwqe->flags =
3454 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3455 l4kwqe->cid = csk->pg_cid;
3456
3457 return dev->submit_kwqes(dev, wqes, 1);
3458}
3459
3460static int cnic_cm_conn_req(struct cnic_sock *csk)
3461{
3462 struct cnic_dev *dev = csk->dev;
3463 struct l4_kwq_connect_req1 *l4kwqe1;
3464 struct l4_kwq_connect_req2 *l4kwqe2;
3465 struct l4_kwq_connect_req3 *l4kwqe3;
3466 struct kwqe *wqes[3];
3467 u8 tcp_flags = 0;
3468 int num_wqes = 2;
3469
3470 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3471 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3472 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3473 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3474 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3475 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3476
3477 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3478 l4kwqe3->flags =
3479 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3480 l4kwqe3->ka_timeout = csk->ka_timeout;
3481 l4kwqe3->ka_interval = csk->ka_interval;
3482 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3483 l4kwqe3->tos = csk->tos;
3484 l4kwqe3->ttl = csk->ttl;
3485 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3486 l4kwqe3->pmtu = csk->mtu;
3487 l4kwqe3->rcv_buf = csk->rcv_buf;
3488 l4kwqe3->snd_buf = csk->snd_buf;
3489 l4kwqe3->seed = csk->seed;
3490
3491 wqes[0] = (struct kwqe *) l4kwqe1;
3492 if (test_bit(SK_F_IPV6, &csk->flags)) {
3493 wqes[1] = (struct kwqe *) l4kwqe2;
3494 wqes[2] = (struct kwqe *) l4kwqe3;
3495 num_wqes = 3;
3496
3497 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3498 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3499 l4kwqe2->flags =
3500 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3501 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3502 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3503 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3504 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3505 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3506 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3507 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3508 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3509 sizeof(struct tcphdr);
3510 } else {
3511 wqes[1] = (struct kwqe *) l4kwqe3;
3512 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3513 sizeof(struct tcphdr);
3514 }
3515
3516 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3517 l4kwqe1->flags =
3518 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3519 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3520 l4kwqe1->cid = csk->cid;
3521 l4kwqe1->pg_cid = csk->pg_cid;
3522 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3523 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3524 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3525 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3526 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3527 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3528 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3529 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3530 if (csk->tcp_flags & SK_TCP_NAGLE)
3531 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3532 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3533 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3534 if (csk->tcp_flags & SK_TCP_SACK)
3535 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3536 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3537 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3538
3539 l4kwqe1->tcp_flags = tcp_flags;
3540
3541 return dev->submit_kwqes(dev, wqes, num_wqes);
3542}
3543
3544static int cnic_cm_close_req(struct cnic_sock *csk)
3545{
3546 struct cnic_dev *dev = csk->dev;
3547 struct l4_kwq_close_req *l4kwqe;
3548 struct kwqe *wqes[1];
3549
3550 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3551 memset(l4kwqe, 0, sizeof(*l4kwqe));
3552 wqes[0] = (struct kwqe *) l4kwqe;
3553
3554 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3555 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3556 l4kwqe->cid = csk->cid;
3557
3558 return dev->submit_kwqes(dev, wqes, 1);
3559}
3560
3561static int cnic_cm_abort_req(struct cnic_sock *csk)
3562{
3563 struct cnic_dev *dev = csk->dev;
3564 struct l4_kwq_reset_req *l4kwqe;
3565 struct kwqe *wqes[1];
3566
3567 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3568 memset(l4kwqe, 0, sizeof(*l4kwqe));
3569 wqes[0] = (struct kwqe *) l4kwqe;
3570
3571 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3572 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3573 l4kwqe->cid = csk->cid;
3574
3575 return dev->submit_kwqes(dev, wqes, 1);
3576}
3577
3578static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3579 u32 l5_cid, struct cnic_sock **csk, void *context)
3580{
3581 struct cnic_local *cp = dev->cnic_priv;
3582 struct cnic_sock *csk1;
3583
3584 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3585 return -EINVAL;
3586
Michael Chanfdf24082010-10-13 14:06:47 +00003587 if (cp->ctx_tbl) {
3588 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3589
3590 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3591 return -EAGAIN;
3592 }
3593
Michael Chana4636962009-06-08 18:14:43 -07003594 csk1 = &cp->csk_tbl[l5_cid];
3595 if (atomic_read(&csk1->ref_count))
3596 return -EAGAIN;
3597
3598 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3599 return -EBUSY;
3600
3601 csk1->dev = dev;
3602 csk1->cid = cid;
3603 csk1->l5_cid = l5_cid;
3604 csk1->ulp_type = ulp_type;
3605 csk1->context = context;
3606
3607 csk1->ka_timeout = DEF_KA_TIMEOUT;
3608 csk1->ka_interval = DEF_KA_INTERVAL;
3609 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3610 csk1->tos = DEF_TOS;
3611 csk1->ttl = DEF_TTL;
3612 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3613 csk1->rcv_buf = DEF_RCV_BUF;
3614 csk1->snd_buf = DEF_SND_BUF;
3615 csk1->seed = DEF_SEED;
Eddie Wai6cdcdbb2013-07-28 19:03:57 -07003616 csk1->tcp_flags = 0;
Michael Chana4636962009-06-08 18:14:43 -07003617
3618 *csk = csk1;
3619 return 0;
3620}
3621
3622static void cnic_cm_cleanup(struct cnic_sock *csk)
3623{
3624 if (csk->src_port) {
3625 struct cnic_dev *dev = csk->dev;
3626 struct cnic_local *cp = dev->cnic_priv;
3627
Michael Chan9b093362010-12-23 07:42:56 +00003628 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07003629 csk->src_port = 0;
3630 }
3631}
3632
3633static void cnic_close_conn(struct cnic_sock *csk)
3634{
3635 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3636 cnic_cm_upload_pg(csk);
3637 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3638 }
3639 cnic_cm_cleanup(csk);
3640}
3641
3642static int cnic_cm_destroy(struct cnic_sock *csk)
3643{
3644 if (!cnic_in_use(csk))
3645 return -EINVAL;
3646
3647 csk_hold(csk);
3648 clear_bit(SK_F_INUSE, &csk->flags);
3649 smp_mb__after_clear_bit();
3650 while (atomic_read(&csk->ref_count) != 1)
3651 msleep(1);
3652 cnic_cm_cleanup(csk);
3653
3654 csk->flags = 0;
3655 csk_put(csk);
3656 return 0;
3657}
3658
3659static inline u16 cnic_get_vlan(struct net_device *dev,
3660 struct net_device **vlan_dev)
3661{
3662 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3663 *vlan_dev = vlan_dev_real_dev(dev);
3664 return vlan_dev_vlan_id(dev);
3665 }
3666 *vlan_dev = dev;
3667 return 0;
3668}
3669
3670static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3671 struct dst_entry **dst)
3672{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003673#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07003674 struct rtable *rt;
3675
David S. Miller78fbfd82011-03-12 00:00:52 -05003676 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3677 if (!IS_ERR(rt)) {
Changli Gaod8d1f302010-06-10 23:31:35 -07003678 *dst = &rt->dst;
David S. Miller78fbfd82011-03-12 00:00:52 -05003679 return 0;
3680 }
3681 return PTR_ERR(rt);
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003682#else
3683 return -ENETUNREACH;
3684#endif
Michael Chana4636962009-06-08 18:14:43 -07003685}
3686
3687static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3688 struct dst_entry **dst)
3689{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003690#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
David S. Miller4c9483b2011-03-12 16:22:43 -05003691 struct flowi6 fl6;
Michael Chana4636962009-06-08 18:14:43 -07003692
David S. Miller4c9483b2011-03-12 16:22:43 -05003693 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003694 fl6.daddr = dst_addr->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -05003695 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3696 fl6.flowi6_oif = dst_addr->sin6_scope_id;
Michael Chana4636962009-06-08 18:14:43 -07003697
David S. Miller4c9483b2011-03-12 16:22:43 -05003698 *dst = ip6_route_output(&init_net, NULL, &fl6);
RongQing.Li05417432012-02-21 22:10:50 +00003699 if ((*dst)->error) {
3700 dst_release(*dst);
3701 *dst = NULL;
3702 return -ENETUNREACH;
3703 } else
Michael Chana4636962009-06-08 18:14:43 -07003704 return 0;
3705#endif
3706
3707 return -ENETUNREACH;
3708}
3709
3710static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3711 int ulp_type)
3712{
3713 struct cnic_dev *dev = NULL;
3714 struct dst_entry *dst;
3715 struct net_device *netdev = NULL;
3716 int err = -ENETUNREACH;
3717
3718 if (dst_addr->sin_family == AF_INET)
3719 err = cnic_get_v4_route(dst_addr, &dst);
3720 else if (dst_addr->sin_family == AF_INET6) {
3721 struct sockaddr_in6 *dst_addr6 =
3722 (struct sockaddr_in6 *) dst_addr;
3723
3724 err = cnic_get_v6_route(dst_addr6, &dst);
3725 } else
3726 return NULL;
3727
3728 if (err)
3729 return NULL;
3730
3731 if (!dst->dev)
3732 goto done;
3733
3734 cnic_get_vlan(dst->dev, &netdev);
3735
3736 dev = cnic_from_netdev(netdev);
3737
3738done:
3739 dst_release(dst);
3740 if (dev)
3741 cnic_put(dev);
3742 return dev;
3743}
3744
3745static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3746{
3747 struct cnic_dev *dev = csk->dev;
3748 struct cnic_local *cp = dev->cnic_priv;
3749
3750 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3751}
3752
3753static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3754{
3755 struct cnic_dev *dev = csk->dev;
3756 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003757 int is_v6, rc = 0;
3758 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003759 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003760 __be16 local_port;
3761 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003762
3763 if (saddr->local.v6.sin6_family == AF_INET6 &&
3764 saddr->remote.v6.sin6_family == AF_INET6)
3765 is_v6 = 1;
3766 else if (saddr->local.v4.sin_family == AF_INET &&
3767 saddr->remote.v4.sin_family == AF_INET)
3768 is_v6 = 0;
3769 else
3770 return -EINVAL;
3771
3772 clear_bit(SK_F_IPV6, &csk->flags);
3773
3774 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003775 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003776 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003777
3778 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3779 sizeof(struct in6_addr));
3780 csk->dst_port = saddr->remote.v6.sin6_port;
3781 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003782
3783 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003784 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003785
3786 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3787 csk->dst_port = saddr->remote.v4.sin_port;
3788 local_port = saddr->local.v4.sin_port;
3789 }
3790
Michael Chanc76284a2010-02-24 14:42:07 +00003791 csk->vlan_id = 0;
3792 csk->mtu = dev->netdev->mtu;
3793 if (dst && dst->dev) {
3794 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3795 if (realdev == dev->netdev) {
3796 csk->vlan_id = vlan;
3797 csk->mtu = dst_mtu(dst);
3798 }
3799 }
Michael Chana4636962009-06-08 18:14:43 -07003800
Michael Chan9b093362010-12-23 07:42:56 +00003801 port_id = be16_to_cpu(local_port);
3802 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3803 port_id < CNIC_LOCAL_PORT_MAX) {
3804 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3805 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003806 } else
Michael Chan9b093362010-12-23 07:42:56 +00003807 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003808
Michael Chan9b093362010-12-23 07:42:56 +00003809 if (!port_id) {
3810 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3811 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003812 rc = -ENOMEM;
3813 goto err_out;
3814 }
Michael Chan9b093362010-12-23 07:42:56 +00003815 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003816 }
3817 csk->src_port = local_port;
3818
Michael Chana4636962009-06-08 18:14:43 -07003819err_out:
3820 dst_release(dst);
3821 return rc;
3822}
3823
3824static void cnic_init_csk_state(struct cnic_sock *csk)
3825{
3826 csk->state = 0;
3827 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3828 clear_bit(SK_F_CLOSING, &csk->flags);
3829}
3830
3831static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3832{
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003833 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07003834 int err = 0;
3835
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07003836 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3837 return -EOPNOTSUPP;
3838
Michael Chana4636962009-06-08 18:14:43 -07003839 if (!cnic_in_use(csk))
3840 return -EINVAL;
3841
3842 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3843 return -EINVAL;
3844
3845 cnic_init_csk_state(csk);
3846
3847 err = cnic_get_route(csk, saddr);
3848 if (err)
3849 goto err_out;
3850
3851 err = cnic_resolve_addr(csk, saddr);
3852 if (!err)
3853 return 0;
3854
3855err_out:
3856 clear_bit(SK_F_CONNECT_START, &csk->flags);
3857 return err;
3858}
3859
3860static int cnic_cm_abort(struct cnic_sock *csk)
3861{
3862 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003863 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003864
3865 if (!cnic_in_use(csk))
3866 return -EINVAL;
3867
3868 if (cnic_abort_prep(csk))
3869 return cnic_cm_abort_req(csk);
3870
3871 /* Getting here means that we haven't started connect, or
Eddie Wai0d650ec2012-12-05 10:10:15 +00003872 * connect was not successful, or it has been reset by the target.
Michael Chana4636962009-06-08 18:14:43 -07003873 */
3874
Michael Chana4636962009-06-08 18:14:43 -07003875 cp->close_conn(csk, opcode);
Eddie Wai0d650ec2012-12-05 10:10:15 +00003876 if (csk->state != opcode) {
3877 /* Wait for remote reset sequence to complete */
3878 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3879 msleep(1);
3880
Michael Chan7b34a462010-06-15 08:57:03 +00003881 return -EALREADY;
Eddie Wai0d650ec2012-12-05 10:10:15 +00003882 }
Michael Chana4636962009-06-08 18:14:43 -07003883
3884 return 0;
3885}
3886
3887static int cnic_cm_close(struct cnic_sock *csk)
3888{
3889 if (!cnic_in_use(csk))
3890 return -EINVAL;
3891
3892 if (cnic_close_prep(csk)) {
3893 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3894 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003895 } else {
Eddie Wai0d650ec2012-12-05 10:10:15 +00003896 /* Wait for remote reset sequence to complete */
3897 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3898 msleep(1);
3899
Michael Chaned99daa52010-06-15 08:57:00 +00003900 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003901 }
3902 return 0;
3903}
3904
3905static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3906 u8 opcode)
3907{
3908 struct cnic_ulp_ops *ulp_ops;
3909 int ulp_type = csk->ulp_type;
3910
3911 rcu_read_lock();
3912 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3913 if (ulp_ops) {
3914 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3915 ulp_ops->cm_connect_complete(csk);
3916 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3917 ulp_ops->cm_close_complete(csk);
3918 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3919 ulp_ops->cm_remote_abort(csk);
3920 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3921 ulp_ops->cm_abort_complete(csk);
3922 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3923 ulp_ops->cm_remote_close(csk);
3924 }
3925 rcu_read_unlock();
3926}
3927
3928static int cnic_cm_set_pg(struct cnic_sock *csk)
3929{
3930 if (cnic_offld_prep(csk)) {
3931 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3932 cnic_cm_update_pg(csk);
3933 else
3934 cnic_cm_offload_pg(csk);
3935 }
3936 return 0;
3937}
3938
3939static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3940{
3941 struct cnic_local *cp = dev->cnic_priv;
3942 u32 l5_cid = kcqe->pg_host_opaque;
3943 u8 opcode = kcqe->op_code;
3944 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3945
3946 csk_hold(csk);
3947 if (!cnic_in_use(csk))
3948 goto done;
3949
3950 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3951 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3952 goto done;
3953 }
Eddie Waia9736c02010-02-24 14:42:04 +00003954 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3955 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3956 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3957 cnic_cm_upcall(cp, csk,
3958 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3959 goto done;
3960 }
3961
Michael Chana4636962009-06-08 18:14:43 -07003962 csk->pg_cid = kcqe->pg_cid;
3963 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3964 cnic_cm_conn_req(csk);
3965
3966done:
3967 csk_put(csk);
3968}
3969
Michael Chane1928c82010-12-23 07:43:04 +00003970static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3971{
3972 struct cnic_local *cp = dev->cnic_priv;
3973 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3974 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3975 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3976
3977 ctx->timestamp = jiffies;
3978 ctx->wait_cond = 1;
3979 wake_up(&ctx->waitq);
3980}
3981
Michael Chana4636962009-06-08 18:14:43 -07003982static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3983{
3984 struct cnic_local *cp = dev->cnic_priv;
3985 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3986 u8 opcode = l4kcqe->op_code;
3987 u32 l5_cid;
3988 struct cnic_sock *csk;
3989
Michael Chane1928c82010-12-23 07:43:04 +00003990 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3991 cnic_process_fcoe_term_conn(dev, kcqe);
3992 return;
3993 }
Michael Chana4636962009-06-08 18:14:43 -07003994 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3995 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3996 cnic_cm_process_offld_pg(dev, l4kcqe);
3997 return;
3998 }
3999
4000 l5_cid = l4kcqe->conn_id;
4001 if (opcode & 0x80)
4002 l5_cid = l4kcqe->cid;
4003 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4004 return;
4005
4006 csk = &cp->csk_tbl[l5_cid];
4007 csk_hold(csk);
4008
4009 if (!cnic_in_use(csk)) {
4010 csk_put(csk);
4011 return;
4012 }
4013
4014 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00004015 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4016 if (l4kcqe->status != 0) {
4017 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4018 cnic_cm_upcall(cp, csk,
4019 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4020 }
4021 break;
Michael Chana4636962009-06-08 18:14:43 -07004022 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4023 if (l4kcqe->status == 0)
4024 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
Michael Chan8ec3e702012-03-21 15:38:34 +00004025 else if (l4kcqe->status ==
4026 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Jeffrey Huang0cb1f4b2012-02-08 17:33:56 +00004027 set_bit(SK_F_HW_ERR, &csk->flags);
Michael Chana4636962009-06-08 18:14:43 -07004028
4029 smp_mb__before_clear_bit();
4030 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4031 cnic_cm_upcall(cp, csk, opcode);
4032 break;
4033
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004034 case L5CM_RAMROD_CMD_ID_CLOSE: {
4035 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4036
4037 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4038 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4039 l4kcqe->status, l5kcqe->completion_status);
Eddie Wai7bc910f2012-06-27 15:08:22 +00004040 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4041 /* Fall through */
4042 } else {
4043 break;
4044 }
Eddie Wai28e3a8f2013-07-28 19:03:59 -07004045 }
Michael Chana4636962009-06-08 18:14:43 -07004046 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07004047 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4048 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00004049 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4050 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chan8ec3e702012-03-21 15:38:34 +00004051 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
Michael Chan23021c22012-01-04 12:12:28 +00004052 set_bit(SK_F_HW_ERR, &csk->flags);
4053
Michael Chana4636962009-06-08 18:14:43 -07004054 cp->close_conn(csk, opcode);
4055 break;
4056
4057 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
Michael Chan101c40c2011-06-08 19:29:33 +00004058 /* after we already sent CLOSE_REQ */
4059 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4060 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4061 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4062 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4063 else
4064 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004065 break;
4066 }
4067 csk_put(csk);
4068}
4069
4070static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4071{
4072 struct cnic_dev *dev = data;
4073 int i;
4074
4075 for (i = 0; i < num; i++)
4076 cnic_cm_process_kcqe(dev, kcqe[i]);
4077}
4078
4079static struct cnic_ulp_ops cm_ulp_ops = {
4080 .indicate_kcqes = cnic_cm_indicate_kcqe,
4081};
4082
4083static void cnic_cm_free_mem(struct cnic_dev *dev)
4084{
4085 struct cnic_local *cp = dev->cnic_priv;
4086
4087 kfree(cp->csk_tbl);
4088 cp->csk_tbl = NULL;
4089 cnic_free_id_tbl(&cp->csk_port_tbl);
4090}
4091
4092static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4093{
4094 struct cnic_local *cp = dev->cnic_priv;
Eddie Wai11f23aa2011-06-08 19:29:34 +00004095 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07004096
4097 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4098 GFP_KERNEL);
4099 if (!cp->csk_tbl)
4100 return -ENOMEM;
4101
Akinobu Mitae00adf32013-05-07 16:18:15 -07004102 port_id = prandom_u32();
Eddie Wai11f23aa2011-06-08 19:29:34 +00004103 port_id %= CNIC_LOCAL_PORT_RANGE;
Michael Chana4636962009-06-08 18:14:43 -07004104 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
Eddie Wai11f23aa2011-06-08 19:29:34 +00004105 CNIC_LOCAL_PORT_MIN, port_id)) {
Michael Chana4636962009-06-08 18:14:43 -07004106 cnic_cm_free_mem(dev);
4107 return -ENOMEM;
4108 }
4109 return 0;
4110}
4111
4112static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4113{
Michael Chan943189f2010-06-15 08:57:02 +00004114 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4115 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4116 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4117 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00004118 }
Michael Chan943189f2010-06-15 08:57:02 +00004119
4120 /* 1. If event opcode matches the expected event in csk->state
Michael Chan101c40c2011-06-08 19:29:33 +00004121 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4122 * event
Michael Chan7b34a462010-06-15 08:57:03 +00004123 * 3. If the expected event is 0, meaning the connection was never
4124 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00004125 */
Michael Chan7b34a462010-06-15 08:57:03 +00004126 if (opcode == csk->state || csk->state == 0 ||
Michael Chan101c40c2011-06-08 19:29:33 +00004127 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4128 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
Michael Chan7b34a462010-06-15 08:57:03 +00004129 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4130 if (csk->state == 0)
4131 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07004132 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00004133 }
Michael Chana4636962009-06-08 18:14:43 -07004134 }
4135 return 0;
4136}
4137
4138static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4139{
4140 struct cnic_dev *dev = csk->dev;
4141 struct cnic_local *cp = dev->cnic_priv;
4142
Michael Chana1e621b2010-06-15 08:57:01 +00004143 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4144 cnic_cm_upcall(cp, csk, opcode);
4145 return;
4146 }
4147
Michael Chana4636962009-06-08 18:14:43 -07004148 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00004149 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00004150 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00004151 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07004152}
4153
4154static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4155{
4156}
4157
4158static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4159{
4160 u32 seed;
4161
Akinobu Mitae00adf32013-05-07 16:18:15 -07004162 seed = prandom_u32();
Michael Chana4636962009-06-08 18:14:43 -07004163 cnic_ctx_wr(dev, 45, 0, seed);
4164 return 0;
4165}
4166
Michael Chan71034ba2009-10-10 13:46:59 +00004167static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4168{
4169 struct cnic_dev *dev = csk->dev;
4170 struct cnic_local *cp = dev->cnic_priv;
4171 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4172 union l5cm_specific_data l5_data;
4173 u32 cmd = 0;
4174 int close_complete = 0;
4175
4176 switch (opcode) {
4177 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4178 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4179 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00004180 if (cnic_ready_to_close(csk, opcode)) {
Michael Chan23021c22012-01-04 12:12:28 +00004181 if (test_bit(SK_F_HW_ERR, &csk->flags))
4182 close_complete = 1;
4183 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
Michael Chan7b34a462010-06-15 08:57:03 +00004184 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4185 else
4186 close_complete = 1;
4187 }
Michael Chan71034ba2009-10-10 13:46:59 +00004188 break;
4189 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4190 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4191 break;
4192 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4193 close_complete = 1;
4194 break;
4195 }
4196 if (cmd) {
4197 memset(&l5_data, 0, sizeof(l5_data));
4198
4199 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4200 &l5_data);
4201 } else if (close_complete) {
4202 ctx->timestamp = jiffies;
4203 cnic_close_conn(csk);
4204 cnic_cm_upcall(cp, csk, csk->state);
4205 }
4206}
4207
4208static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4209{
Michael Chanfdf24082010-10-13 14:06:47 +00004210 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00004211
4212 if (!cp->ctx_tbl)
4213 return;
4214
4215 if (!netif_running(dev->netdev))
4216 return;
4217
Michael Chan74e49bb2011-07-20 14:55:23 +00004218 cnic_bnx2x_delete_wait(dev, 0);
Michael Chanfdf24082010-10-13 14:06:47 +00004219
4220 cancel_delayed_work(&cp->delete_task);
4221 flush_workqueue(cnic_wq);
4222
4223 if (atomic_read(&cp->iscsi_conn) != 0)
4224 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4225 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00004226}
4227
4228static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4229{
Michael Chan68c64d22012-12-06 10:33:11 +00004230 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07004231 u32 pfid = bp->pfid;
Michael Chan5bf945a2013-09-02 11:42:30 -07004232 u32 port = BP_PORT(bp);
Michael Chan71034ba2009-10-10 13:46:59 +00004233
4234 cnic_init_bnx2x_mac(dev);
Eddie Waib3bd2d62013-07-28 19:03:58 -07004235 cnic_bnx2x_set_tcp_options(dev, 0, 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004236
4237 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004238 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004239
4240 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004241 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004242 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004243 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00004244 DEF_MAX_DA_COUNT);
4245
4246 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004247 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00004248 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004249 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00004250 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004251 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00004252 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004253 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00004254
Michael Chan14203982010-10-06 03:16:06 +00004255 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004256 DEF_MAX_CWND);
4257 return 0;
4258}
4259
Michael Chanfdf24082010-10-13 14:06:47 +00004260static void cnic_delete_task(struct work_struct *work)
4261{
4262 struct cnic_local *cp;
4263 struct cnic_dev *dev;
4264 u32 i;
4265 int need_resched = 0;
4266
4267 cp = container_of(work, struct cnic_local, delete_task.work);
4268 dev = cp->dev;
4269
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004270 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4271 struct drv_ctl_info info;
4272
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004273 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
Dmitry Kravkovfab0dc82011-03-31 17:04:22 -07004274
4275 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4276 cp->ethdev->drv_ctl(dev->netdev, &info);
4277 }
4278
Michael Chanfdf24082010-10-13 14:06:47 +00004279 for (i = 0; i < cp->max_cid_space; i++) {
4280 struct cnic_context *ctx = &cp->ctx_tbl[i];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004281 int err;
Michael Chanfdf24082010-10-13 14:06:47 +00004282
4283 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4284 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4285 continue;
4286
4287 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4288 need_resched = 1;
4289 continue;
4290 }
4291
4292 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4293 continue;
4294
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004295 err = cnic_bnx2x_destroy_ramrod(dev, i);
Michael Chanfdf24082010-10-13 14:06:47 +00004296
4297 cnic_free_bnx2x_conn_resc(dev, i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004298 if (!err) {
4299 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4300 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00004301
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004302 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4303 }
Michael Chanfdf24082010-10-13 14:06:47 +00004304 }
4305
4306 if (need_resched)
4307 queue_delayed_work(cnic_wq, &cp->delete_task,
4308 msecs_to_jiffies(10));
4309
4310}
4311
Michael Chana4636962009-06-08 18:14:43 -07004312static int cnic_cm_open(struct cnic_dev *dev)
4313{
4314 struct cnic_local *cp = dev->cnic_priv;
4315 int err;
4316
4317 err = cnic_cm_alloc_mem(dev);
4318 if (err)
4319 return err;
4320
4321 err = cp->start_cm(dev);
4322
4323 if (err)
4324 goto err_out;
4325
Michael Chanfdf24082010-10-13 14:06:47 +00004326 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4327
Michael Chana4636962009-06-08 18:14:43 -07004328 dev->cm_create = cnic_cm_create;
4329 dev->cm_destroy = cnic_cm_destroy;
4330 dev->cm_connect = cnic_cm_connect;
4331 dev->cm_abort = cnic_cm_abort;
4332 dev->cm_close = cnic_cm_close;
4333 dev->cm_select_dev = cnic_cm_select_dev;
4334
4335 cp->ulp_handle[CNIC_ULP_L4] = dev;
4336 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4337 return 0;
4338
4339err_out:
4340 cnic_cm_free_mem(dev);
4341 return err;
4342}
4343
4344static int cnic_cm_shutdown(struct cnic_dev *dev)
4345{
4346 struct cnic_local *cp = dev->cnic_priv;
4347 int i;
4348
Michael Chana4636962009-06-08 18:14:43 -07004349 if (!cp->csk_tbl)
4350 return 0;
4351
4352 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4353 struct cnic_sock *csk = &cp->csk_tbl[i];
4354
4355 clear_bit(SK_F_INUSE, &csk->flags);
4356 cnic_cm_cleanup(csk);
4357 }
4358 cnic_cm_free_mem(dev);
4359
4360 return 0;
4361}
4362
4363static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4364{
Michael Chana4636962009-06-08 18:14:43 -07004365 u32 cid_addr;
4366 int i;
4367
Michael Chana4636962009-06-08 18:14:43 -07004368 cid_addr = GET_CID_ADDR(cid);
4369
4370 for (i = 0; i < CTX_SIZE; i += 4)
4371 cnic_ctx_wr(dev, cid_addr, i, 0);
4372}
4373
4374static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4375{
4376 struct cnic_local *cp = dev->cnic_priv;
4377 int ret = 0, i;
4378 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4379
Michael Chan4ce45e02012-12-06 10:33:10 +00004380 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004381 return 0;
4382
4383 for (i = 0; i < cp->ctx_blks; i++) {
4384 int j;
4385 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4386 u32 val;
4387
Michael Chanbe1fefc2014-03-17 19:19:07 -08004388 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07004389
4390 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4391 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4392 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4393 (u64) cp->ctx_arr[i].mapping >> 32);
4394 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4395 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4396 for (j = 0; j < 10; j++) {
4397
4398 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4399 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4400 break;
4401 udelay(5);
4402 }
4403 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4404 ret = -EBUSY;
4405 break;
4406 }
4407 }
4408 return ret;
4409}
4410
4411static void cnic_free_irq(struct cnic_dev *dev)
4412{
4413 struct cnic_local *cp = dev->cnic_priv;
4414 struct cnic_eth_dev *ethdev = cp->ethdev;
4415
4416 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4417 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004418 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07004419 free_irq(ethdev->irq_arr[0].vector, dev);
4420 }
4421}
4422
Michael Chan6e0dc642010-10-13 14:06:44 +00004423static int cnic_request_irq(struct cnic_dev *dev)
4424{
4425 struct cnic_local *cp = dev->cnic_priv;
4426 struct cnic_eth_dev *ethdev = cp->ethdev;
4427 int err;
4428
4429 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4430 if (err)
4431 tasklet_disable(&cp->cnic_irq_task);
4432
4433 return err;
4434}
4435
Michael Chana4636962009-06-08 18:14:43 -07004436static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4437{
4438 struct cnic_local *cp = dev->cnic_priv;
4439 struct cnic_eth_dev *ethdev = cp->ethdev;
4440
4441 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4442 int err, i = 0;
4443 int sblk_num = cp->status_blk_num;
4444 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4445 BNX2_HC_SB_CONFIG_1;
4446
4447 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4448
4449 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4450 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4451 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4452
Michael Chana4dde3a2010-02-24 14:42:08 +00004453 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00004454 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07004455 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004456 err = cnic_request_irq(dev);
4457 if (err)
Michael Chana4636962009-06-08 18:14:43 -07004458 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00004459
Michael Chana4dde3a2010-02-24 14:42:08 +00004460 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07004461 i < 10) {
4462 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4463 1 << (11 + sblk_num));
4464 udelay(10);
4465 i++;
4466 barrier();
4467 }
Michael Chana4dde3a2010-02-24 14:42:08 +00004468 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07004469 cnic_free_irq(dev);
4470 goto failed;
4471 }
4472
4473 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00004474 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004475 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4476 int i = 0;
4477
4478 while (sblk->status_completion_producer_index && i < 10) {
4479 CNIC_WR(dev, BNX2_HC_COMMAND,
4480 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4481 udelay(10);
4482 i++;
4483 barrier();
4484 }
4485 if (sblk->status_completion_producer_index)
4486 goto failed;
4487
4488 }
4489 return 0;
4490
4491failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00004492 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07004493 return -EBUSY;
4494}
4495
4496static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4497{
4498 struct cnic_local *cp = dev->cnic_priv;
4499 struct cnic_eth_dev *ethdev = cp->ethdev;
4500
4501 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4502 return;
4503
4504 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4505 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4506}
4507
4508static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4509{
4510 struct cnic_local *cp = dev->cnic_priv;
4511 struct cnic_eth_dev *ethdev = cp->ethdev;
4512
4513 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4514 return;
4515
4516 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4518 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4519 synchronize_irq(ethdev->irq_arr[0].vector);
4520}
4521
4522static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4523{
4524 struct cnic_local *cp = dev->cnic_priv;
4525 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004526 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004527 u32 cid_addr, tx_cid, sb_id;
4528 u32 val, offset0, offset1, offset2, offset3;
4529 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004530 struct bnx2_tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00004531 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00004532 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004533
4534 sb_id = cp->status_blk_num;
4535 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07004536 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4537 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004538 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004539
4540 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07004541 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4542 (TX_TSS_CID << 7));
4543 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4544 }
4545 cp->tx_cons = *cp->tx_cons_ptr;
4546
4547 cid_addr = GET_CID_ADDR(tx_cid);
Michael Chan4ce45e02012-12-06 10:33:10 +00004548 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
Michael Chana4636962009-06-08 18:14:43 -07004549 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4550
4551 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4552 cnic_ctx_wr(dev, cid_addr2, i, 0);
4553
4554 offset0 = BNX2_L2CTX_TYPE_XI;
4555 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4556 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4557 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4558 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07004559 cnic_init_context(dev, tx_cid);
4560 cnic_init_context(dev, tx_cid + 1);
4561
Michael Chana4636962009-06-08 18:14:43 -07004562 offset0 = BNX2_L2CTX_TYPE;
4563 offset1 = BNX2_L2CTX_CMD_TYPE;
4564 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4565 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4566 }
4567 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4568 cnic_ctx_wr(dev, cid_addr, offset0, val);
4569
4570 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4571 cnic_ctx_wr(dev, cid_addr, offset1, val);
4572
Joe Perches43d620c2011-06-16 19:08:06 +00004573 txbd = udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07004574
Michael Chancd801532010-10-13 14:06:49 +00004575 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004576 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004577 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4578 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4579 }
Michael Chancd801532010-10-13 14:06:49 +00004580 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004581 cnic_ctx_wr(dev, cid_addr, offset2, val);
4582 txbd->tx_bd_haddr_hi = val;
4583
Michael Chancd801532010-10-13 14:06:49 +00004584 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004585 cnic_ctx_wr(dev, cid_addr, offset3, val);
4586 txbd->tx_bd_haddr_lo = val;
4587}
4588
4589static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4590{
4591 struct cnic_local *cp = dev->cnic_priv;
4592 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004593 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004594 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4595 int i;
Michael Chan2bc40782012-12-06 10:33:09 +00004596 struct bnx2_rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00004597 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00004598 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07004599
4600 sb_id = cp->status_blk_num;
4601 cnic_init_context(dev, 2);
4602 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4603 coal_reg = BNX2_HC_COMMAND;
4604 coal_val = CNIC_RD(dev, coal_reg);
4605 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004606 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004607
4608 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4609 coal_reg = BNX2_HC_COALESCE_NOW;
4610 coal_val = 1 << (11 + sb_id);
4611 }
4612 i = 0;
4613 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4614 CNIC_WR(dev, coal_reg, coal_val);
4615 udelay(10);
4616 i++;
4617 barrier();
4618 }
4619 cp->rx_cons = *cp->rx_cons_ptr;
4620
4621 cid_addr = GET_CID_ADDR(2);
4622 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4623 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4624 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4625
4626 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07004627 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07004628 else
Michael Chand0549382009-10-28 03:41:59 -07004629 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004630 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4631
Michael Chanbe1fefc2014-03-17 19:19:07 -08004632 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
Michael Chan2bc40782012-12-06 10:33:09 +00004633 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
Michael Chana4636962009-06-08 18:14:43 -07004634 dma_addr_t buf_map;
4635 int n = (i % cp->l2_rx_ring_size) + 1;
4636
Michael Chancd801532010-10-13 14:06:49 +00004637 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07004638 rxbd->rx_bd_len = cp->l2_single_buf_size;
4639 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4640 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4641 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4642 }
Michael Chanbe1fefc2014-03-17 19:19:07 -08004643 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004644 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4645 rxbd->rx_bd_haddr_hi = val;
4646
Michael Chanbe1fefc2014-03-17 19:19:07 -08004647 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004648 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4649 rxbd->rx_bd_haddr_lo = val;
4650
4651 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4652 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4653}
4654
4655static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4656{
4657 struct kwqe *wqes[1], l2kwqe;
4658
4659 memset(&l2kwqe, 0, sizeof(l2kwqe));
4660 wqes[0] = &l2kwqe;
Michael Chane1928c82010-12-23 07:43:04 +00004661 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
Michael Chana4636962009-06-08 18:14:43 -07004662 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4663 KWQE_OPCODE_SHIFT) | 2;
4664 dev->submit_kwqes(dev, wqes, 1);
4665}
4666
4667static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4668{
4669 struct cnic_local *cp = dev->cnic_priv;
4670 u32 val;
4671
4672 val = cp->func << 2;
4673
4674 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4675
4676 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4677 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4678 dev->mac_addr[0] = (u8) (val >> 8);
4679 dev->mac_addr[1] = (u8) val;
4680
4681 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4682
4683 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4684 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4685 dev->mac_addr[2] = (u8) (val >> 24);
4686 dev->mac_addr[3] = (u8) (val >> 16);
4687 dev->mac_addr[4] = (u8) (val >> 8);
4688 dev->mac_addr[5] = (u8) val;
4689
4690 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4691
4692 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
Michael Chan4ce45e02012-12-06 10:33:10 +00004693 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
Michael Chana4636962009-06-08 18:14:43 -07004694 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4695
4696 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4697 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4698 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4699}
4700
4701static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4702{
4703 struct cnic_local *cp = dev->cnic_priv;
4704 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00004705 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00004706 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07004707 int err;
4708
4709 cnic_set_bnx2_mac(dev);
4710
4711 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4712 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
Michael Chanbe1fefc2014-03-17 19:19:07 -08004713 if (CNIC_PAGE_BITS > 12)
Michael Chana4636962009-06-08 18:14:43 -07004714 val |= (12 - 8) << 4;
4715 else
Michael Chanbe1fefc2014-03-17 19:19:07 -08004716 val |= (CNIC_PAGE_BITS - 8) << 4;
Michael Chana4636962009-06-08 18:14:43 -07004717
4718 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4719
4720 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4721 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4722 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4723
4724 err = cnic_setup_5709_context(dev, 1);
4725 if (err)
4726 return err;
4727
4728 cnic_init_context(dev, KWQ_CID);
4729 cnic_init_context(dev, KCQ_CID);
4730
Michael Chane6c28892010-06-24 14:58:39 +00004731 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07004732 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4733
4734 cp->max_kwq_idx = MAX_KWQ_IDX;
4735 cp->kwq_prod_idx = 0;
4736 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00004737 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07004738
Michael Chan4ce45e02012-12-06 10:33:10 +00004739 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
Michael Chana4636962009-06-08 18:14:43 -07004740 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4741 else
4742 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4743
4744 /* Initialize the kernel work queue context. */
4745 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chanbe1fefc2014-03-17 19:19:07 -08004746 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004747 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004748
Michael Chanbe1fefc2014-03-17 19:19:07 -08004749 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004750 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004751
Michael Chanbe1fefc2014-03-17 19:19:07 -08004752 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004754
4755 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00004756 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004757
4758 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00004759 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004760
Michael Chane6c28892010-06-24 14:58:39 +00004761 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4762 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07004763
Michael Chane6c28892010-06-24 14:58:39 +00004764 cp->kcq1.sw_prod_idx = 0;
4765 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004766 &sblk->status_completion_producer_index;
Michael Chane6c28892010-06-24 14:58:39 +00004767
Joe Perches64699332012-06-04 12:44:16 +00004768 cp->kcq1.status_idx_ptr = &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004769
4770 /* Initialize the kernel complete queue context. */
4771 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
Michael Chanbe1fefc2014-03-17 19:19:07 -08004772 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004773 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004774
Michael Chanbe1fefc2014-03-17 19:19:07 -08004775 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004776 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004777
Michael Chanbe1fefc2014-03-17 19:19:07 -08004778 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004780
Michael Chane6c28892010-06-24 14:58:39 +00004781 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4782 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004783
Michael Chane6c28892010-06-24 14:58:39 +00004784 val = (u32) cp->kcq1.dma.pgtbl_map;
4785 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004786
4787 cp->int_num = 0;
4788 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004789 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004790 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004791 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004792
Michael Chane6c28892010-06-24 14:58:39 +00004793 cp->kcq1.hw_prod_idx_ptr =
Joe Perches64699332012-06-04 12:44:16 +00004794 &msblk->status_completion_producer_index;
4795 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4796 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004797 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004798 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4799 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004800 }
4801
4802 /* Enable Commnad Scheduler notification when we write to the
4803 * host producer index of the kernel contexts. */
4804 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4805
4806 /* Enable Command Scheduler notification when we write to either
4807 * the Send Queue or Receive Queue producer indexes of the kernel
4808 * bypass contexts. */
4809 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4810 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4811
4812 /* Notify COM when the driver post an application buffer. */
4813 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4814
4815 /* Set the CP and COM doorbells. These two processors polls the
4816 * doorbell for a non zero value before running. This must be done
4817 * after setting up the kernel queue contexts. */
4818 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4819 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4820
4821 cnic_init_bnx2_tx_ring(dev);
4822 cnic_init_bnx2_rx_ring(dev);
4823
4824 err = cnic_init_bnx2_irq(dev);
4825 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004826 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004827 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4828 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4829 return err;
4830 }
4831
Michael Chanad9b4352013-01-23 03:21:52 +00004832 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4833
Michael Chana4636962009-06-08 18:14:43 -07004834 return 0;
4835}
4836
Michael Chan71034ba2009-10-10 13:46:59 +00004837static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4838{
4839 struct cnic_local *cp = dev->cnic_priv;
4840 struct cnic_eth_dev *ethdev = cp->ethdev;
4841 u32 start_offset = ethdev->ctx_tbl_offset;
4842 int i;
4843
4844 for (i = 0; i < cp->ctx_blks; i++) {
4845 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4846 dma_addr_t map = ctx->mapping;
4847
4848 if (cp->ctx_align) {
4849 unsigned long mask = cp->ctx_align - 1;
4850
4851 map = (map + mask) & ~mask;
4852 }
4853
4854 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4855 }
4856}
4857
4858static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4859{
4860 struct cnic_local *cp = dev->cnic_priv;
4861 struct cnic_eth_dev *ethdev = cp->ethdev;
4862 int err = 0;
4863
Joe Perches164165d2009-11-19 09:30:10 +00004864 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004865 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004866 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4867 err = cnic_request_irq(dev);
4868
Michael Chan71034ba2009-10-10 13:46:59 +00004869 return err;
4870}
4871
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004872static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4873 u16 sb_id, u8 sb_index,
4874 u8 disable)
4875{
Michael Chan68c64d22012-12-06 10:33:11 +00004876 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004877
4878 u32 addr = BAR_CSTRORM_INTMEM +
4879 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4880 offsetof(struct hc_status_block_data_e1x, index_data) +
4881 sizeof(struct hc_index_data)*sb_index +
4882 offsetof(struct hc_index_data, flags);
4883 u16 flags = CNIC_RD16(dev, addr);
4884 /* clear and set */
4885 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4886 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4887 HC_INDEX_DATA_HC_ENABLED);
4888 CNIC_WR16(dev, addr, flags);
4889}
4890
Michael Chan71034ba2009-10-10 13:46:59 +00004891static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4892{
4893 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00004894 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chan71034ba2009-10-10 13:46:59 +00004895 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004896
4897 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004898 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4899 offsetof(struct hc_status_block_data_e1x, index_data) +
4900 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004901 offsetof(struct hc_index_data, timeout), 64 / 4);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004902 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004903}
4904
4905static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4906{
4907}
4908
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004909static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4910 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004911{
4912 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004913 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004914 struct cnic_uio_dev *udev = cp->udev;
4915 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4916 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004917 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004918 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004919 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004920 u32 val;
4921
Michael Chanbe1fefc2014-03-17 19:19:07 -08004922 memset(txbd, 0, CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004923
Michael Chancd801532010-10-13 14:06:49 +00004924 buf_map = udev->l2_buf_map;
Michael Chan2bc40782012-12-06 10:33:09 +00004925 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
Michael Chan71034ba2009-10-10 13:46:59 +00004926 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004927 struct eth_tx_parse_bd_e1x *pbd_e1x =
4928 &((txbd + 1)->parse_bd_e1x);
4929 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
Michael Chan71034ba2009-10-10 13:46:59 +00004930 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4931
4932 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4933 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4934 reg_bd->addr_hi = start_bd->addr_hi;
4935 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4936 start_bd->nbytes = cpu_to_le16(0x10);
4937 start_bd->nbd = cpu_to_le16(3);
4938 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004939 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
Michael Chan71034ba2009-10-10 13:46:59 +00004940 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4941
Michael Chan104a43e2013-09-02 11:42:28 -07004942 if (BNX2X_CHIP_IS_E2_PLUS(bp))
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004943 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
Michael Chan4ce45e02012-12-06 10:33:10 +00004944 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004945 else
Michael Chan4ce45e02012-12-06 10:33:10 +00004946 pbd_e1x->global_data = (UNICAST_ADDRESS <<
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004947 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00004948 }
Michael Chan71034ba2009-10-10 13:46:59 +00004949
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004950 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004951 txbd->next_bd.addr_hi = cpu_to_le32(val);
4952
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004953 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004954
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004955 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004956 txbd->next_bd.addr_lo = cpu_to_le32(val);
4957
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004958 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004959
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004960 /* Other ramrod params */
4961 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4962 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004963
4964 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004965 if (cli < MAX_STAT_COUNTER_ID) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004966 data->general.statistics_zero_flg = 1;
4967 data->general.statistics_en_flg = 1;
4968 data->general.statistics_counter_id = cli;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004969 }
Michael Chan71034ba2009-10-10 13:46:59 +00004970
4971 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004972 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004973}
4974
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004975static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4976 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004977{
4978 struct cnic_local *cp = dev->cnic_priv;
Michael Chan104a43e2013-09-02 11:42:28 -07004979 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00004980 struct cnic_uio_dev *udev = cp->udev;
4981 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chanbe1fefc2014-03-17 19:19:07 -08004982 CNIC_PAGE_SIZE);
Michael Chan71034ba2009-10-10 13:46:59 +00004983 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chanbe1fefc2014-03-17 19:19:07 -08004984 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004985 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004986 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004987 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan104a43e2013-09-02 11:42:28 -07004988 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004989 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004990 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004991
4992 /* General data */
4993 data->general.client_id = cli;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004994 data->general.activate_flg = 1;
4995 data->general.sp_client_id = cli;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004996 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
Michael Chana5b3c4a2013-09-02 11:42:31 -07004997 data->general.func_id = bp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004998
4999 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5000 dma_addr_t buf_map;
5001 int n = (i % cp->l2_rx_ring_size) + 1;
5002
Michael Chancd801532010-10-13 14:06:49 +00005003 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00005004 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5005 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5006 }
Michael Chan71034ba2009-10-10 13:46:59 +00005007
Michael Chanbe1fefc2014-03-17 19:19:07 -08005008 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005009 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005010 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005011
Michael Chanbe1fefc2014-03-17 19:19:07 -08005012 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005013 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005014 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005015
5016 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Michael Chanbe1fefc2014-03-17 19:19:07 -08005017 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00005018 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005019 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005020
Michael Chanbe1fefc2014-03-17 19:19:07 -08005021 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005022 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005023 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00005024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005025 /* Other ramrod params */
5026 data->rx.client_qzone_id = cl_qzone_id;
5027 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5028 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00005029
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005030 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
Michael Chan71034ba2009-10-10 13:46:59 +00005031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005032 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005033 data->rx.outer_vlan_removal_enable_flg = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005034 data->rx.silent_vlan_removal_flg = 1;
5035 data->rx.silent_vlan_value = 0;
5036 data->rx.silent_vlan_mask = 0xffff;
Michael Chan71034ba2009-10-10 13:46:59 +00005037
5038 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005039 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00005040 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005041}
5042
Michael Chane21ba412010-12-23 07:43:03 +00005043static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5044{
5045 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005046 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chana5b3c4a2013-09-02 11:42:31 -07005047 u32 pfid = bp->pfid;
Michael Chane21ba412010-12-23 07:43:03 +00005048
5049 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5050 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5051 cp->kcq1.sw_prod_idx = 0;
5052
Michael Chan104a43e2013-09-02 11:42:28 -07005053 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005054 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5055
5056 cp->kcq1.hw_prod_idx_ptr =
5057 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5058 cp->kcq1.status_idx_ptr =
5059 &sb->sb.running_index[SM_RX_ID];
5060 } else {
5061 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5062
5063 cp->kcq1.hw_prod_idx_ptr =
5064 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5065 cp->kcq1.status_idx_ptr =
5066 &sb->sb.running_index[SM_RX_ID];
5067 }
5068
Michael Chan104a43e2013-09-02 11:42:28 -07005069 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chane21ba412010-12-23 07:43:03 +00005070 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5071
5072 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5073 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5074 cp->kcq2.sw_prod_idx = 0;
5075 cp->kcq2.hw_prod_idx_ptr =
5076 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5077 cp->kcq2.status_idx_ptr =
5078 &sb->sb.running_index[SM_RX_ID];
5079 }
5080}
5081
Michael Chan71034ba2009-10-10 13:46:59 +00005082static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5083{
5084 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005085 struct bnx2x *bp = netdev_priv(dev->netdev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005086 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan68c64d22012-12-06 10:33:11 +00005087 int func, ret;
Michael Chan14203982010-10-06 03:16:06 +00005088 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00005089
Michael Chana9e0a4f2012-01-04 12:12:27 +00005090 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
Michael Chan68c64d22012-12-06 10:33:11 +00005091 cp->func = bp->pf_num;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005092
Michael Chan68c64d22012-12-06 10:33:11 +00005093 func = CNIC_FUNC(cp);
Michael Chana5b3c4a2013-09-02 11:42:31 -07005094 pfid = bp->pfid;
Michael Chan14203982010-10-06 03:16:06 +00005095
Michael Chan71034ba2009-10-10 13:46:59 +00005096 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005097 cp->iscsi_start_cid, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005098
5099 if (ret)
5100 return -ENOMEM;
5101
Michael Chan104a43e2013-09-02 11:42:28 -07005102 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chandc219a22011-08-26 09:45:39 +00005103 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
Eddie Wai11f23aa2011-06-08 19:29:34 +00005104 cp->fcoe_start_cid, 0);
Michael Chane1928c82010-12-23 07:43:04 +00005105
5106 if (ret)
5107 return -ENOMEM;
5108 }
5109
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005110 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5111
Michael Chane21ba412010-12-23 07:43:03 +00005112 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005113
Michael Chan71034ba2009-10-10 13:46:59 +00005114 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00005115 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00005116 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005117 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005118 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005119 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005120 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005121 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005122 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005123 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005124 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005125 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00005126 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00005127 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005128 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00005129 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00005130 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005131 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00005132 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005133 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00005134 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005135 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005136 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00005137
Michael Chan71034ba2009-10-10 13:46:59 +00005138 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005139 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00005140 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5141 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005142 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00005143 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5144
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005145 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5146 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5147
Michael Chan71034ba2009-10-10 13:46:59 +00005148 cnic_setup_bnx2x_context(dev);
5149
Michael Chan71034ba2009-10-10 13:46:59 +00005150 ret = cnic_init_bnx2x_irq(dev);
5151 if (ret)
5152 return ret;
5153
Michael Chanad9b4352013-01-23 03:21:52 +00005154 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chan71034ba2009-10-10 13:46:59 +00005155 return 0;
5156}
5157
Michael Chan86b53602009-10-10 13:46:57 +00005158static void cnic_init_rings(struct cnic_dev *dev)
5159{
Michael Chan541a7812010-10-06 03:17:22 +00005160 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005161 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancd801532010-10-13 14:06:49 +00005162 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00005163
5164 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5165 return;
5166
Michael Chan86b53602009-10-10 13:46:57 +00005167 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5168 cnic_init_bnx2_tx_ring(dev);
5169 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00005170 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00005171 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005172 u32 cli = cp->ethdev->iscsi_l2_client_id;
5173 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan68d7c1a2011-01-05 15:14:13 +00005174 u32 cl_qzone_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005175 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00005176 union l5cm_specific_data l5_data;
5177 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chane1dd8832011-07-13 17:24:19 +00005178 u32 off, i, *cid_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00005179
5180 rx_prods.bd_prod = 0;
5181 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5182 barrier();
5183
Michael Chan104a43e2013-09-02 11:42:28 -07005184 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005185
Michael Chanc7596b72009-12-02 15:15:35 +00005186 off = BAR_USTRORM_INTMEM +
Michael Chan104a43e2013-09-02 11:42:28 -07005187 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
Michael Chanee87a822010-10-13 14:06:51 +00005188 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
Michael Chan5bf945a2013-09-02 11:42:30 -07005189 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00005190
5191 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00005192 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00005193
Michael Chan48f753d2010-05-18 11:32:53 +00005194 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5195
Michael Chancd801532010-10-13 14:06:49 +00005196 data = udev->l2_buf;
Michael Chane1dd8832011-07-13 17:24:19 +00005197 cid_ptr = udev->l2_buf + 12;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005198
5199 memset(data, 0, sizeof(*data));
5200
5201 cnic_init_bnx2x_tx_ring(dev, data);
5202 cnic_init_bnx2x_rx_ring(dev, data);
5203
Michael Chancd801532010-10-13 14:06:49 +00005204 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5205 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005206
Michael Chan541a7812010-10-06 03:17:22 +00005207 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5208
Michael Chan71034ba2009-10-10 13:46:59 +00005209 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005210 cid, ETH_CONNECTION_TYPE, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005211
Michael Chan48f753d2010-05-18 11:32:53 +00005212 i = 0;
5213 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5214 ++i < 10)
5215 msleep(1);
5216
5217 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5218 netdev_err(dev->netdev,
5219 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005220 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00005221 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chanf78afb32013-09-18 01:50:38 -07005222 *cid_ptr = cid >> 4;
5223 *(cid_ptr + 1) = cid * bp->db_size;
Eddie Waid15e2a92013-12-31 23:18:34 -08005224 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
Michael Chan86b53602009-10-10 13:46:57 +00005225 }
5226}
5227
5228static void cnic_shutdown_rings(struct cnic_dev *dev)
5229{
Michael Chan541a7812010-10-06 03:17:22 +00005230 struct cnic_local *cp = dev->cnic_priv;
Michael Chane1dd8832011-07-13 17:24:19 +00005231 struct cnic_uio_dev *udev = cp->udev;
5232 void *rx_ring;
Michael Chan541a7812010-10-06 03:17:22 +00005233
5234 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5235 return;
5236
Michael Chan86b53602009-10-10 13:46:57 +00005237 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5238 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005239 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00005240 u32 cli = cp->ethdev->iscsi_l2_client_id;
5241 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00005242 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00005243 int i;
Michael Chan71034ba2009-10-10 13:46:59 +00005244
Michael Chan5159fdc2010-12-23 07:42:59 +00005245 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00005246
Michael Chan48f753d2010-05-18 11:32:53 +00005247 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5248
Michael Chan8b065b62009-12-02 15:15:36 +00005249 l5_data.phy_address.lo = cli;
5250 l5_data.phy_address.hi = 0;
5251 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00005252 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00005253 i = 0;
5254 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5255 ++i < 10)
5256 msleep(1);
5257
5258 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5259 netdev_err(dev->netdev,
5260 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005261 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00005262
5263 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005264 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan68d7c1a2011-01-05 15:14:13 +00005265 cid, NONE_CONNECTION_TYPE, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00005266 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00005267 }
Michael Chan541a7812010-10-06 03:17:22 +00005268 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chanbe1fefc2014-03-17 19:19:07 -08005269 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5270 memset(rx_ring, 0, CNIC_PAGE_SIZE);
Michael Chan86b53602009-10-10 13:46:57 +00005271}
5272
Michael Chana3059b12009-08-14 15:49:44 +00005273static int cnic_register_netdev(struct cnic_dev *dev)
5274{
5275 struct cnic_local *cp = dev->cnic_priv;
5276 struct cnic_eth_dev *ethdev = cp->ethdev;
5277 int err;
5278
5279 if (!ethdev)
5280 return -ENODEV;
5281
5282 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5283 return 0;
5284
5285 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5286 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00005287 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00005288
Michael Chan9e9402e2013-08-02 11:28:23 -07005289 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5290 * can change after firmware is downloaded.
5291 */
5292 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5293 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5294 dev->max_iscsi_conn = 0;
5295
Michael Chana3059b12009-08-14 15:49:44 +00005296 return err;
5297}
5298
5299static void cnic_unregister_netdev(struct cnic_dev *dev)
5300{
5301 struct cnic_local *cp = dev->cnic_priv;
5302 struct cnic_eth_dev *ethdev = cp->ethdev;
5303
5304 if (!ethdev)
5305 return;
5306
5307 ethdev->drv_unregister_cnic(dev->netdev);
5308}
5309
Michael Chana4636962009-06-08 18:14:43 -07005310static int cnic_start_hw(struct cnic_dev *dev)
5311{
5312 struct cnic_local *cp = dev->cnic_priv;
5313 struct cnic_eth_dev *ethdev = cp->ethdev;
5314 int err;
5315
5316 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5317 return -EALREADY;
5318
Michael Chana4636962009-06-08 18:14:43 -07005319 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07005320 pci_dev_get(dev->pcidev);
5321 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00005322 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07005323 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5324
5325 err = cp->alloc_resc(dev);
5326 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005327 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005328 goto err1;
5329 }
5330
5331 err = cp->start_hw(dev);
5332 if (err)
5333 goto err1;
5334
5335 err = cnic_cm_open(dev);
5336 if (err)
5337 goto err1;
5338
5339 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5340
5341 cp->enable_int(dev);
5342
5343 return 0;
5344
5345err1:
Michael Chana4636962009-06-08 18:14:43 -07005346 cp->free_resc(dev);
5347 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07005348 return err;
5349}
5350
5351static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5352{
Michael Chana4636962009-06-08 18:14:43 -07005353 cnic_disable_bnx2_int_sync(dev);
5354
5355 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5356 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5357
5358 cnic_init_context(dev, KWQ_CID);
5359 cnic_init_context(dev, KCQ_CID);
5360
5361 cnic_setup_5709_context(dev, 0);
5362 cnic_free_irq(dev);
5363
Michael Chana4636962009-06-08 18:14:43 -07005364 cnic_free_resc(dev);
5365}
5366
Michael Chan71034ba2009-10-10 13:46:59 +00005367
5368static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5369{
5370 struct cnic_local *cp = dev->cnic_priv;
Michael Chan68c64d22012-12-06 10:33:11 +00005371 struct bnx2x *bp = netdev_priv(dev->netdev);
Michael Chancaa9e932012-12-05 10:10:14 +00005372 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5373 u32 sb_id = cp->status_blk_num;
5374 u32 idx_off, syn_off;
Michael Chan71034ba2009-10-10 13:46:59 +00005375
5376 cnic_free_irq(dev);
Michael Chancaa9e932012-12-05 10:10:14 +00005377
Michael Chan104a43e2013-09-02 11:42:28 -07005378 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chancaa9e932012-12-05 10:10:14 +00005379 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5380 (hc_index * sizeof(u16));
5381
5382 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5383 } else {
5384 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5385 (hc_index * sizeof(u16));
5386
5387 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5388 }
5389 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5390 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5391 idx_off, 0);
5392
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005393 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00005394 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chana5b3c4a2013-09-02 11:42:31 -07005395 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00005396 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005397 cnic_free_resc(dev);
5398}
5399
Michael Chana4636962009-06-08 18:14:43 -07005400static void cnic_stop_hw(struct cnic_dev *dev)
5401{
5402 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5403 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00005404 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07005405
Michael Chan48f753d2010-05-18 11:32:53 +00005406 /* Need to wait for the ring shutdown event to complete
5407 * before clearing the CNIC_UP flag.
5408 */
Michael Chan82346a72012-09-08 06:01:05 +00005409 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00005410 msleep(100);
5411 i++;
5412 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005413 cnic_shutdown_rings(dev);
Michael Chana2028b232012-06-27 15:08:19 +00005414 cp->stop_cm(dev);
Michael Chanad9b4352013-01-23 03:21:52 +00005415 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
Michael Chana4636962009-06-08 18:14:43 -07005416 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
Eric Dumazet2cfa5a02011-11-23 07:09:32 +00005417 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
Michael Chana4636962009-06-08 18:14:43 -07005418 synchronize_rcu();
5419 cnic_cm_shutdown(dev);
5420 cp->stop_hw(dev);
5421 pci_dev_put(dev->pcidev);
5422 }
5423}
5424
5425static void cnic_free_dev(struct cnic_dev *dev)
5426{
5427 int i = 0;
5428
5429 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5430 msleep(100);
5431 i++;
5432 }
5433 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00005434 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07005435
Joe Perchesddf79b22010-02-17 15:01:54 +00005436 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005437 dev_put(dev->netdev);
5438 kfree(dev);
5439}
5440
5441static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5442 struct pci_dev *pdev)
5443{
5444 struct cnic_dev *cdev;
5445 struct cnic_local *cp;
5446 int alloc_size;
5447
5448 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5449
Joe Perchesb2adaca2013-02-03 17:43:58 +00005450 cdev = kzalloc(alloc_size, GFP_KERNEL);
5451 if (cdev == NULL)
Michael Chana4636962009-06-08 18:14:43 -07005452 return NULL;
Michael Chana4636962009-06-08 18:14:43 -07005453
5454 cdev->netdev = dev;
5455 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5456 cdev->register_device = cnic_register_device;
5457 cdev->unregister_device = cnic_unregister_device;
5458 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5459
5460 cp = cdev->cnic_priv;
5461 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07005462 cp->l2_single_buf_size = 0x400;
5463 cp->l2_rx_ring_size = 3;
5464
5465 spin_lock_init(&cp->cnic_ulp_lock);
5466
Joe Perchesddf79b22010-02-17 15:01:54 +00005467 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005468
5469 return cdev;
5470}
5471
5472static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5473{
5474 struct pci_dev *pdev;
5475 struct cnic_dev *cdev;
5476 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005477 struct bnx2 *bp = netdev_priv(dev);
Michael Chana4636962009-06-08 18:14:43 -07005478 struct cnic_eth_dev *ethdev = NULL;
Michael Chana4636962009-06-08 18:14:43 -07005479
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005480 if (bp->cnic_probe)
5481 ethdev = (bp->cnic_probe)(dev);
5482
Michael Chana4636962009-06-08 18:14:43 -07005483 if (!ethdev)
5484 return NULL;
5485
5486 pdev = ethdev->pdev;
5487 if (!pdev)
5488 return NULL;
5489
5490 dev_hold(dev);
5491 pci_dev_get(pdev);
Sergei Shtylyovff938e42011-02-28 11:57:33 -08005492 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5493 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5494 (pdev->revision < 0x10)) {
5495 pci_dev_put(pdev);
5496 goto cnic_err;
Michael Chana4636962009-06-08 18:14:43 -07005497 }
5498 pci_dev_put(pdev);
5499
5500 cdev = cnic_alloc_dev(dev, pdev);
5501 if (cdev == NULL)
5502 goto cnic_err;
5503
5504 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5505 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5506
5507 cp = cdev->cnic_priv;
5508 cp->ethdev = ethdev;
5509 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005510 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07005511
Michael Chan7625eb22011-06-08 19:29:36 +00005512 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5513
Michael Chana4636962009-06-08 18:14:43 -07005514 cp->cnic_ops = &cnic_bnx2_ops;
5515 cp->start_hw = cnic_start_bnx2_hw;
5516 cp->stop_hw = cnic_stop_bnx2_hw;
5517 cp->setup_pgtbl = cnic_setup_page_tbl;
5518 cp->alloc_resc = cnic_alloc_bnx2_resc;
5519 cp->free_resc = cnic_free_resc;
5520 cp->start_cm = cnic_cm_init_bnx2_hw;
5521 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5522 cp->enable_int = cnic_enable_bnx2_int;
5523 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5524 cp->close_conn = cnic_close_bnx2_conn;
Michael Chana4636962009-06-08 18:14:43 -07005525 return cdev;
5526
5527cnic_err:
5528 dev_put(dev);
5529 return NULL;
5530}
5531
Michael Chan71034ba2009-10-10 13:46:59 +00005532static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5533{
5534 struct pci_dev *pdev;
5535 struct cnic_dev *cdev;
5536 struct cnic_local *cp;
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005537 struct bnx2x *bp = netdev_priv(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005538 struct cnic_eth_dev *ethdev = NULL;
Michael Chan71034ba2009-10-10 13:46:59 +00005539
Michael Chan4bd9b0ff2012-12-06 10:33:12 +00005540 if (bp->cnic_probe)
5541 ethdev = bp->cnic_probe(dev);
5542
Michael Chan71034ba2009-10-10 13:46:59 +00005543 if (!ethdev)
5544 return NULL;
5545
5546 pdev = ethdev->pdev;
5547 if (!pdev)
5548 return NULL;
5549
5550 dev_hold(dev);
5551 cdev = cnic_alloc_dev(dev, pdev);
5552 if (cdev == NULL) {
5553 dev_put(dev);
5554 return NULL;
5555 }
5556
5557 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5558 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5559
5560 cp = cdev->cnic_priv;
5561 cp->ethdev = ethdev;
5562 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005563 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00005564
Barak Witkowski1d187b32011-12-05 22:41:50 +00005565 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5566
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005567 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5568 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
Michael Chan104a43e2013-09-02 11:42:28 -07005569 if (CNIC_SUPPORTS_FCOE(bp)) {
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005570 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
Bhanu Prakash Gollapudi0eb43b42013-04-22 19:22:30 +00005571 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5572 }
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005573
Michael Chandc219a22011-08-26 09:45:39 +00005574 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5575 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5576
Joe Perchesd458cdf2013-10-01 19:04:40 -07005577 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
Vladislav Zolotarov2ba45142011-01-31 14:39:17 +00005578
Michael Chan71034ba2009-10-10 13:46:59 +00005579 cp->cnic_ops = &cnic_bnx2x_ops;
5580 cp->start_hw = cnic_start_bnx2x_hw;
5581 cp->stop_hw = cnic_stop_bnx2x_hw;
5582 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5583 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5584 cp->free_resc = cnic_free_resc;
5585 cp->start_cm = cnic_cm_init_bnx2x_hw;
5586 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5587 cp->enable_int = cnic_enable_bnx2x_int;
5588 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chan104a43e2013-09-02 11:42:28 -07005589 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
Michael Chanee87a822010-10-13 14:06:51 +00005590 cp->ack_int = cnic_ack_bnx2x_e2_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005591 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5592 } else {
Michael Chanee87a822010-10-13 14:06:51 +00005593 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan8cc0e022012-09-08 06:01:03 +00005594 cp->arm_int = cnic_arm_bnx2x_msix;
5595 }
Michael Chan71034ba2009-10-10 13:46:59 +00005596 cp->close_conn = cnic_close_bnx2x_conn;
Michael Chan71034ba2009-10-10 13:46:59 +00005597 return cdev;
5598}
5599
Michael Chana4636962009-06-08 18:14:43 -07005600static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5601{
5602 struct ethtool_drvinfo drvinfo;
5603 struct cnic_dev *cdev = NULL;
5604
5605 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5606 memset(&drvinfo, 0, sizeof(drvinfo));
5607 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5608
5609 if (!strcmp(drvinfo.driver, "bnx2"))
5610 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005611 if (!strcmp(drvinfo.driver, "bnx2x"))
5612 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07005613 if (cdev) {
5614 write_lock(&cnic_dev_lock);
5615 list_add(&cdev->list, &cnic_dev_list);
5616 write_unlock(&cnic_dev_lock);
5617 }
5618 }
5619 return cdev;
5620}
5621
Michael Chan415199f2011-07-20 14:55:24 +00005622static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5623 u16 vlan_id)
5624{
5625 int if_type;
5626
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx;
5631
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent)
5634 continue;
5635
5636 ctx = cp->ulp_handle[if_type];
5637
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5639 }
5640 rcu_read_unlock();
5641}
5642
Ben Hutchings1aa8b472012-07-10 10:56:59 +00005643/* netdev event handler */
Michael Chana4636962009-06-08 18:14:43 -07005644static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5645 void *ptr)
5646{
Jiri Pirko351638e2013-05-28 01:30:21 +00005647 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
Michael Chana4636962009-06-08 18:14:43 -07005648 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -07005649 int new_dev = 0;
5650
5651 dev = cnic_from_netdev(netdev);
5652
Michael Chan415fb872013-07-28 19:03:55 -07005653 if (!dev && event == NETDEV_REGISTER) {
Michael Chana4636962009-06-08 18:14:43 -07005654 /* Check for the hot-plug device */
5655 dev = is_cnic_dev(netdev);
5656 if (dev) {
5657 new_dev = 1;
5658 cnic_hold(dev);
5659 }
5660 }
5661 if (dev) {
5662 struct cnic_local *cp = dev->cnic_priv;
5663
5664 if (new_dev)
5665 cnic_ulp_init(dev);
5666 else if (event == NETDEV_UNREGISTER)
5667 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07005668
Michael Chan415fb872013-07-28 19:03:55 -07005669 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00005670 if (cnic_register_netdev(dev) != 0) {
5671 cnic_put(dev);
5672 goto done;
5673 }
Michael Chana4636962009-06-08 18:14:43 -07005674 if (!cnic_start_hw(dev))
5675 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07005676 }
5677
Michael Chan415199f2011-07-20 14:55:24 +00005678 cnic_rcv_netevent(cp, event, 0);
Michael Chana4636962009-06-08 18:14:43 -07005679
5680 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005681 cnic_ulp_stop(dev);
5682 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005683 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005684 } else if (event == NETDEV_UNREGISTER) {
5685 write_lock(&cnic_dev_lock);
5686 list_del_init(&dev->list);
5687 write_unlock(&cnic_dev_lock);
5688
5689 cnic_put(dev);
5690 cnic_free_dev(dev);
5691 goto done;
5692 }
5693 cnic_put(dev);
Michael Chan415199f2011-07-20 14:55:24 +00005694 } else {
5695 struct net_device *realdev;
5696 u16 vid;
5697
5698 vid = cnic_get_vlan(netdev, &realdev);
5699 if (realdev) {
5700 dev = cnic_from_netdev(realdev);
5701 if (dev) {
5702 vid |= VLAN_TAG_PRESENT;
5703 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5704 cnic_put(dev);
5705 }
5706 }
Michael Chana4636962009-06-08 18:14:43 -07005707 }
5708done:
5709 return NOTIFY_DONE;
5710}
5711
5712static struct notifier_block cnic_netdev_notifier = {
5713 .notifier_call = cnic_netdev_event
5714};
5715
5716static void cnic_release(void)
5717{
Michael Chana3ceeeb2010-10-13 14:06:50 +00005718 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005719
Michael Chana3ceeeb2010-10-13 14:06:50 +00005720 while (!list_empty(&cnic_udev_list)) {
5721 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5722 list);
5723 cnic_free_uio(udev);
5724 }
Michael Chana4636962009-06-08 18:14:43 -07005725}
5726
5727static int __init cnic_init(void)
5728{
5729 int rc = 0;
5730
Joe Perchesddf79b22010-02-17 15:01:54 +00005731 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005732
5733 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5734 if (rc) {
5735 cnic_release();
5736 return rc;
5737 }
5738
Michael Chanfdf24082010-10-13 14:06:47 +00005739 cnic_wq = create_singlethread_workqueue("cnic_wq");
5740 if (!cnic_wq) {
5741 cnic_release();
5742 unregister_netdevice_notifier(&cnic_netdev_notifier);
5743 return -ENOMEM;
5744 }
5745
Michael Chana4636962009-06-08 18:14:43 -07005746 return 0;
5747}
5748
5749static void __exit cnic_exit(void)
5750{
5751 unregister_netdevice_notifier(&cnic_netdev_notifier);
5752 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005753 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005754}
5755
5756module_init(cnic_init);
5757module_exit(cnic_exit);