blob: 41957fa2d010eb83f1b1ec2ec852289bb3fd6733 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chan1d9cfc42010-02-24 14:42:09 +00003 * Copyright (c) 2006-2010 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31#define BCM_VLAN 1
32#endif
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/route.h>
36#include <net/ipv6.h>
37#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070038#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070039#include <scsi/iscsi_if.h>
40
41#include "cnic_if.h"
42#include "bnx2.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000043#include "bnx2x/bnx2x_reg.h"
44#include "bnx2x/bnx2x_fw_defs.h"
45#include "bnx2x/bnx2x_hsi.h"
Michael Chane2513062009-10-10 13:46:58 +000046#include "../scsi/bnx2i/57xx_iscsi_constants.h"
47#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chana4636962009-06-08 18:14:43 -070048#include "cnic.h"
49#include "cnic_defs.h"
50
51#define DRV_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070052
53static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
55
56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59MODULE_LICENSE("GPL");
60MODULE_VERSION(CNIC_MODULE_VERSION);
61
Michael Chan8adc92402010-12-23 07:42:57 +000062/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070063static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000064static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070065static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock);
67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000071static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070072static int cnic_ctl(void *, struct cnic_ctl_info *);
73
74static struct cnic_ops cnic_bnx2_ops = {
75 .cnic_owner = THIS_MODULE,
76 .cnic_handler = cnic_service_bnx2,
77 .cnic_ctl = cnic_ctl,
78};
79
Michael Chan71034ba2009-10-10 13:46:59 +000080static struct cnic_ops cnic_bnx2x_ops = {
81 .cnic_owner = THIS_MODULE,
82 .cnic_handler = cnic_service_bnx2x,
83 .cnic_ctl = cnic_ctl,
84};
85
Michael Chanfdf24082010-10-13 14:06:47 +000086static struct workqueue_struct *cnic_wq;
87
Michael Chan86b53602009-10-10 13:46:57 +000088static void cnic_shutdown_rings(struct cnic_dev *);
89static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -070090static int cnic_cm_set_pg(struct cnic_sock *);
91
92static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
93{
Michael Chancd801532010-10-13 14:06:49 +000094 struct cnic_uio_dev *udev = uinfo->priv;
95 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -070096
97 if (!capable(CAP_NET_ADMIN))
98 return -EPERM;
99
Michael Chancd801532010-10-13 14:06:49 +0000100 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700101 return -EBUSY;
102
Michael Chan86b53602009-10-10 13:46:57 +0000103 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000104 dev = udev->dev;
105
Michael Chana3ceeeb2010-10-13 14:06:50 +0000106 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000107 rtnl_unlock();
108 return -ENODEV;
109 }
110
Michael Chancd801532010-10-13 14:06:49 +0000111 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700112
Michael Chana3ceeeb2010-10-13 14:06:50 +0000113 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000114 cnic_init_rings(dev);
115 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700116
117 return 0;
118}
119
120static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
121{
Michael Chancd801532010-10-13 14:06:49 +0000122 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000123
Michael Chancd801532010-10-13 14:06:49 +0000124 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700125 return 0;
126}
127
128static inline void cnic_hold(struct cnic_dev *dev)
129{
130 atomic_inc(&dev->ref_count);
131}
132
133static inline void cnic_put(struct cnic_dev *dev)
134{
135 atomic_dec(&dev->ref_count);
136}
137
138static inline void csk_hold(struct cnic_sock *csk)
139{
140 atomic_inc(&csk->ref_count);
141}
142
143static inline void csk_put(struct cnic_sock *csk)
144{
145 atomic_dec(&csk->ref_count);
146}
147
148static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
149{
150 struct cnic_dev *cdev;
151
152 read_lock(&cnic_dev_lock);
153 list_for_each_entry(cdev, &cnic_dev_list, list) {
154 if (netdev == cdev->netdev) {
155 cnic_hold(cdev);
156 read_unlock(&cnic_dev_lock);
157 return cdev;
158 }
159 }
160 read_unlock(&cnic_dev_lock);
161 return NULL;
162}
163
Michael Chan7fc1ece2009-08-14 15:49:47 +0000164static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
165{
166 atomic_inc(&ulp_ops->ref_count);
167}
168
169static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
170{
171 atomic_dec(&ulp_ops->ref_count);
172}
173
Michael Chana4636962009-06-08 18:14:43 -0700174static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
175{
176 struct cnic_local *cp = dev->cnic_priv;
177 struct cnic_eth_dev *ethdev = cp->ethdev;
178 struct drv_ctl_info info;
179 struct drv_ctl_io *io = &info.data.io;
180
181 info.cmd = DRV_CTL_CTX_WR_CMD;
182 io->cid_addr = cid_addr;
183 io->offset = off;
184 io->data = val;
185 ethdev->drv_ctl(dev->netdev, &info);
186}
187
Michael Chan71034ba2009-10-10 13:46:59 +0000188static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
194
195 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
196 io->offset = off;
197 io->dma_addr = addr;
198 ethdev->drv_ctl(dev->netdev, &info);
199}
200
201static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
202{
203 struct cnic_local *cp = dev->cnic_priv;
204 struct cnic_eth_dev *ethdev = cp->ethdev;
205 struct drv_ctl_info info;
206 struct drv_ctl_l2_ring *ring = &info.data.ring;
207
208 if (start)
209 info.cmd = DRV_CTL_START_L2_CMD;
210 else
211 info.cmd = DRV_CTL_STOP_L2_CMD;
212
213 ring->cid = cid;
214 ring->client_id = cl_id;
215 ethdev->drv_ctl(dev->netdev, &info);
216}
217
Michael Chana4636962009-06-08 18:14:43 -0700218static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
219{
220 struct cnic_local *cp = dev->cnic_priv;
221 struct cnic_eth_dev *ethdev = cp->ethdev;
222 struct drv_ctl_info info;
223 struct drv_ctl_io *io = &info.data.io;
224
225 info.cmd = DRV_CTL_IO_WR_CMD;
226 io->offset = off;
227 io->data = val;
228 ethdev->drv_ctl(dev->netdev, &info);
229}
230
231static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
232{
233 struct cnic_local *cp = dev->cnic_priv;
234 struct cnic_eth_dev *ethdev = cp->ethdev;
235 struct drv_ctl_info info;
236 struct drv_ctl_io *io = &info.data.io;
237
238 info.cmd = DRV_CTL_IO_RD_CMD;
239 io->offset = off;
240 ethdev->drv_ctl(dev->netdev, &info);
241 return io->data;
242}
243
244static int cnic_in_use(struct cnic_sock *csk)
245{
246 return test_bit(SK_F_INUSE, &csk->flags);
247}
248
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000249static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700250{
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000255 info.cmd = cmd;
256 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700257 ethdev->drv_ctl(dev->netdev, &info);
258}
259
Michael Chan71034ba2009-10-10 13:46:59 +0000260static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
261{
262 u32 i;
263
Michael Chan520efdf2010-06-24 14:58:37 +0000264 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000265 if (cp->ctx_tbl[i].cid == cid) {
266 *l5_cid = i;
267 return 0;
268 }
269 }
270 return -EINVAL;
271}
272
Michael Chana4636962009-06-08 18:14:43 -0700273static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
274 struct cnic_sock *csk)
275{
276 struct iscsi_path path_req;
277 char *buf = NULL;
278 u16 len = 0;
279 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
280 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000281 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000282 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700283
Michael Chancd801532010-10-13 14:06:49 +0000284 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700285 return -ENODEV;
286
287 if (csk) {
288 len = sizeof(path_req);
289 buf = (char *) &path_req;
290 memset(&path_req, 0, len);
291
292 msg_type = ISCSI_KEVENT_PATH_REQ;
293 path_req.handle = (u64) csk->l5_cid;
294 if (test_bit(SK_F_IPV6, &csk->flags)) {
295 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
296 sizeof(struct in6_addr));
297 path_req.ip_addr_len = 16;
298 } else {
299 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
300 sizeof(struct in_addr));
301 path_req.ip_addr_len = 4;
302 }
303 path_req.vlan_id = csk->vlan_id;
304 path_req.pmtu = csk->mtu;
305 }
306
Michael Chan939b82e2010-12-23 07:42:58 +0000307 while (retry < 3) {
308 rc = 0;
309 rcu_read_lock();
310 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
311 if (ulp_ops)
312 rc = ulp_ops->iscsi_nl_send_msg(
313 cp->ulp_handle[CNIC_ULP_ISCSI],
314 msg_type, buf, len);
315 rcu_read_unlock();
316 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
317 break;
318
319 msleep(100);
320 retry++;
321 }
Michael Chana4636962009-06-08 18:14:43 -0700322 return 0;
323}
324
Eddie Wai42ecbb82010-12-23 07:43:02 +0000325static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
326
Michael Chana4636962009-06-08 18:14:43 -0700327static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
328 char *buf, u16 len)
329{
330 int rc = -EINVAL;
331
332 switch (msg_type) {
333 case ISCSI_UEVENT_PATH_UPDATE: {
334 struct cnic_local *cp;
335 u32 l5_cid;
336 struct cnic_sock *csk;
337 struct iscsi_path *path_resp;
338
339 if (len < sizeof(*path_resp))
340 break;
341
342 path_resp = (struct iscsi_path *) buf;
343 cp = dev->cnic_priv;
344 l5_cid = (u32) path_resp->handle;
345 if (l5_cid >= MAX_CM_SK_TBL_SZ)
346 break;
347
Michael Chand02a5e62010-02-24 14:42:06 +0000348 rcu_read_lock();
349 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
350 rc = -ENODEV;
351 rcu_read_unlock();
352 break;
353 }
Michael Chana4636962009-06-08 18:14:43 -0700354 csk = &cp->csk_tbl[l5_cid];
355 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000356 if (cnic_in_use(csk) &&
357 test_bit(SK_F_CONNECT_START, &csk->flags)) {
358
Michael Chana4636962009-06-08 18:14:43 -0700359 memcpy(csk->ha, path_resp->mac_addr, 6);
360 if (test_bit(SK_F_IPV6, &csk->flags))
361 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
362 sizeof(struct in6_addr));
363 else
364 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
365 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000366
367 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700368 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000369 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
370 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
371
372 cnic_cm_upcall(cp, csk,
373 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
374 clear_bit(SK_F_CONNECT_START, &csk->flags);
375 }
Michael Chana4636962009-06-08 18:14:43 -0700376 }
377 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000378 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700379 rc = 0;
380 }
381 }
382
383 return rc;
384}
385
386static int cnic_offld_prep(struct cnic_sock *csk)
387{
388 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
389 return 0;
390
391 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
392 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
393 return 0;
394 }
395
396 return 1;
397}
398
399static int cnic_close_prep(struct cnic_sock *csk)
400{
401 clear_bit(SK_F_CONNECT_START, &csk->flags);
402 smp_mb__after_clear_bit();
403
404 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
405 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
406 msleep(1);
407
408 return 1;
409 }
410 return 0;
411}
412
413static int cnic_abort_prep(struct cnic_sock *csk)
414{
415 clear_bit(SK_F_CONNECT_START, &csk->flags);
416 smp_mb__after_clear_bit();
417
418 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
419 msleep(1);
420
421 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
422 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
423 return 1;
424 }
425
426 return 0;
427}
428
Michael Chan6d7760a2009-07-27 11:25:58 -0700429static void cnic_uio_stop(void)
430{
431 struct cnic_dev *dev;
432
433 read_lock(&cnic_dev_lock);
434 list_for_each_entry(dev, &cnic_dev_list, list) {
435 struct cnic_local *cp = dev->cnic_priv;
436
Michael Chancd801532010-10-13 14:06:49 +0000437 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Michael Chan6d7760a2009-07-27 11:25:58 -0700438 }
439 read_unlock(&cnic_dev_lock);
440}
441
Michael Chana4636962009-06-08 18:14:43 -0700442int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
443{
444 struct cnic_dev *dev;
445
roel kluin0d37f362009-11-02 06:53:44 +0000446 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000447 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700448 return -EINVAL;
449 }
450 mutex_lock(&cnic_lock);
451 if (cnic_ulp_tbl[ulp_type]) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000452 pr_err("%s: Type %d has already been registered\n",
453 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700454 mutex_unlock(&cnic_lock);
455 return -EBUSY;
456 }
457
458 read_lock(&cnic_dev_lock);
459 list_for_each_entry(dev, &cnic_dev_list, list) {
460 struct cnic_local *cp = dev->cnic_priv;
461
462 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
463 }
464 read_unlock(&cnic_dev_lock);
465
Michael Chan7fc1ece2009-08-14 15:49:47 +0000466 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700467 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
468 mutex_unlock(&cnic_lock);
469
470 /* Prevent race conditions with netdev_event */
471 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700472 list_for_each_entry(dev, &cnic_dev_list, list) {
473 struct cnic_local *cp = dev->cnic_priv;
474
475 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
476 ulp_ops->cnic_init(dev);
477 }
Michael Chana4636962009-06-08 18:14:43 -0700478 rtnl_unlock();
479
480 return 0;
481}
482
483int cnic_unregister_driver(int ulp_type)
484{
485 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000486 struct cnic_ulp_ops *ulp_ops;
487 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700488
roel kluin0d37f362009-11-02 06:53:44 +0000489 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000490 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700491 return -EINVAL;
492 }
493 mutex_lock(&cnic_lock);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000494 ulp_ops = cnic_ulp_tbl[ulp_type];
495 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000496 pr_err("%s: Type %d has not been registered\n",
497 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700498 goto out_unlock;
499 }
500 read_lock(&cnic_dev_lock);
501 list_for_each_entry(dev, &cnic_dev_list, list) {
502 struct cnic_local *cp = dev->cnic_priv;
503
504 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000505 pr_err("%s: Type %d still has devices registered\n",
506 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700507 read_unlock(&cnic_dev_lock);
508 goto out_unlock;
509 }
510 }
511 read_unlock(&cnic_dev_lock);
512
Michael Chan6d7760a2009-07-27 11:25:58 -0700513 if (ulp_type == CNIC_ULP_ISCSI)
514 cnic_uio_stop();
515
Michael Chana4636962009-06-08 18:14:43 -0700516 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
517
518 mutex_unlock(&cnic_lock);
519 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000520 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
521 msleep(100);
522 i++;
523 }
524
525 if (atomic_read(&ulp_ops->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +0000526 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -0700527 return 0;
528
529out_unlock:
530 mutex_unlock(&cnic_lock);
531 return -EINVAL;
532}
533
534static int cnic_start_hw(struct cnic_dev *);
535static void cnic_stop_hw(struct cnic_dev *);
536
537static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
538 void *ulp_ctx)
539{
540 struct cnic_local *cp = dev->cnic_priv;
541 struct cnic_ulp_ops *ulp_ops;
542
roel kluin0d37f362009-11-02 06:53:44 +0000543 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000544 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700545 return -EINVAL;
546 }
547 mutex_lock(&cnic_lock);
548 if (cnic_ulp_tbl[ulp_type] == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000549 pr_err("%s: Driver with type %d has not been registered\n",
550 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700551 mutex_unlock(&cnic_lock);
552 return -EAGAIN;
553 }
554 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000555 pr_err("%s: Type %d has already been registered to this device\n",
556 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700557 mutex_unlock(&cnic_lock);
558 return -EBUSY;
559 }
560
561 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
562 cp->ulp_handle[ulp_type] = ulp_ctx;
563 ulp_ops = cnic_ulp_tbl[ulp_type];
564 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
565 cnic_hold(dev);
566
567 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
568 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
569 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
570
571 mutex_unlock(&cnic_lock);
572
573 return 0;
574
575}
576EXPORT_SYMBOL(cnic_register_driver);
577
578static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
579{
580 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000581 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700582
roel kluin0d37f362009-11-02 06:53:44 +0000583 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000584 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700585 return -EINVAL;
586 }
587 mutex_lock(&cnic_lock);
588 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
589 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
590 cnic_put(dev);
591 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000592 pr_err("%s: device not registered to this ulp type %d\n",
593 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700594 mutex_unlock(&cnic_lock);
595 return -EINVAL;
596 }
597 mutex_unlock(&cnic_lock);
598
599 synchronize_rcu();
600
Michael Chan681dbd72009-08-14 15:49:46 +0000601 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
602 i < 20) {
603 msleep(100);
604 i++;
605 }
606 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000607 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000608
Michael Chana4636962009-06-08 18:14:43 -0700609 return 0;
610}
611EXPORT_SYMBOL(cnic_unregister_driver);
612
613static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
614{
615 id_tbl->start = start_id;
616 id_tbl->max = size;
617 id_tbl->next = 0;
618 spin_lock_init(&id_tbl->lock);
619 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
620 if (!id_tbl->table)
621 return -ENOMEM;
622
623 return 0;
624}
625
626static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
627{
628 kfree(id_tbl->table);
629 id_tbl->table = NULL;
630}
631
632static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
633{
634 int ret = -1;
635
636 id -= id_tbl->start;
637 if (id >= id_tbl->max)
638 return ret;
639
640 spin_lock(&id_tbl->lock);
641 if (!test_bit(id, id_tbl->table)) {
642 set_bit(id, id_tbl->table);
643 ret = 0;
644 }
645 spin_unlock(&id_tbl->lock);
646 return ret;
647}
648
649/* Returns -1 if not successful */
650static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
651{
652 u32 id;
653
654 spin_lock(&id_tbl->lock);
655 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
656 if (id >= id_tbl->max) {
657 id = -1;
658 if (id_tbl->next != 0) {
659 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
660 if (id >= id_tbl->next)
661 id = -1;
662 }
663 }
664
665 if (id < id_tbl->max) {
666 set_bit(id, id_tbl->table);
667 id_tbl->next = (id + 1) & (id_tbl->max - 1);
668 id += id_tbl->start;
669 }
670
671 spin_unlock(&id_tbl->lock);
672
673 return id;
674}
675
676static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
677{
678 if (id == -1)
679 return;
680
681 id -= id_tbl->start;
682 if (id >= id_tbl->max)
683 return;
684
685 clear_bit(id, id_tbl->table);
686}
687
688static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
689{
690 int i;
691
692 if (!dma->pg_arr)
693 return;
694
695 for (i = 0; i < dma->num_pages; i++) {
696 if (dma->pg_arr[i]) {
Michael Chan3248e162009-12-02 15:15:39 +0000697 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
698 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700699 dma->pg_arr[i] = NULL;
700 }
701 }
702 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000703 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
704 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700705 dma->pgtbl = NULL;
706 }
707 kfree(dma->pg_arr);
708 dma->pg_arr = NULL;
709 dma->num_pages = 0;
710}
711
712static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713{
714 int i;
715 u32 *page_table = dma->pgtbl;
716
717 for (i = 0; i < dma->num_pages; i++) {
718 /* Each entry needs to be in big endian format. */
719 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
720 page_table++;
721 *page_table = (u32) dma->pg_map_arr[i];
722 page_table++;
723 }
724}
725
Michael Chan71034ba2009-10-10 13:46:59 +0000726static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
727{
728 int i;
729 u32 *page_table = dma->pgtbl;
730
731 for (i = 0; i < dma->num_pages; i++) {
732 /* Each entry needs to be in little endian format. */
733 *page_table = dma->pg_map_arr[i] & 0xffffffff;
734 page_table++;
735 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
736 page_table++;
737 }
738}
739
Michael Chana4636962009-06-08 18:14:43 -0700740static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
741 int pages, int use_pg_tbl)
742{
743 int i, size;
744 struct cnic_local *cp = dev->cnic_priv;
745
746 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
747 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
748 if (dma->pg_arr == NULL)
749 return -ENOMEM;
750
751 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
752 dma->num_pages = pages;
753
754 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000755 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
756 BCM_PAGE_SIZE,
757 &dma->pg_map_arr[i],
758 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700759 if (dma->pg_arr[i] == NULL)
760 goto error;
761 }
762 if (!use_pg_tbl)
763 return 0;
764
765 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
766 ~(BCM_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000767 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
768 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700769 if (dma->pgtbl == NULL)
770 goto error;
771
772 cp->setup_pgtbl(dev, dma);
773
774 return 0;
775
776error:
777 cnic_free_dma(dev, dma);
778 return -ENOMEM;
779}
780
Michael Chan86b53602009-10-10 13:46:57 +0000781static void cnic_free_context(struct cnic_dev *dev)
782{
783 struct cnic_local *cp = dev->cnic_priv;
784 int i;
785
786 for (i = 0; i < cp->ctx_blks; i++) {
787 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000788 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
789 cp->ctx_arr[i].ctx,
790 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000791 cp->ctx_arr[i].ctx = NULL;
792 }
793 }
794}
795
Michael Chancd801532010-10-13 14:06:49 +0000796static void __cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700797{
Michael Chancd801532010-10-13 14:06:49 +0000798 uio_unregister_device(&udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -0700799
Michael Chancd801532010-10-13 14:06:49 +0000800 if (udev->l2_buf) {
801 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
802 udev->l2_buf, udev->l2_buf_map);
803 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700804 }
805
Michael Chancd801532010-10-13 14:06:49 +0000806 if (udev->l2_ring) {
807 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
808 udev->l2_ring, udev->l2_ring_map);
809 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700810 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000811
812 pci_dev_put(udev->pdev);
813 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000814}
815
Michael Chancd801532010-10-13 14:06:49 +0000816static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000817{
Michael Chancd801532010-10-13 14:06:49 +0000818 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000819 return;
820
Michael Chana3ceeeb2010-10-13 14:06:50 +0000821 write_lock(&cnic_dev_lock);
822 list_del_init(&udev->list);
823 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000824 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000825}
826
827static void cnic_free_resc(struct cnic_dev *dev)
828{
829 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000830 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000831
Michael Chancd801532010-10-13 14:06:49 +0000832 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000833 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000834 cp->udev = NULL;
Michael Chanc06c0462010-10-13 14:06:48 +0000835 }
Michael Chana4636962009-06-08 18:14:43 -0700836
Michael Chan86b53602009-10-10 13:46:57 +0000837 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700838 kfree(cp->ctx_arr);
839 cp->ctx_arr = NULL;
840 cp->ctx_blks = 0;
841
842 cnic_free_dma(dev, &cp->gbl_buf_info);
843 cnic_free_dma(dev, &cp->conn_buf_info);
844 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000845 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000846 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000847 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700848 kfree(cp->iscsi_tbl);
849 cp->iscsi_tbl = NULL;
850 kfree(cp->ctx_tbl);
851 cp->ctx_tbl = NULL;
852
Michael Chane1928c82010-12-23 07:43:04 +0000853 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
Michael Chana4636962009-06-08 18:14:43 -0700854 cnic_free_id_tbl(&cp->cid_tbl);
855}
856
857static int cnic_alloc_context(struct cnic_dev *dev)
858{
859 struct cnic_local *cp = dev->cnic_priv;
860
861 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
862 int i, k, arr_size;
863
864 cp->ctx_blk_size = BCM_PAGE_SIZE;
865 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
866 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
867 sizeof(struct cnic_ctx);
868 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
869 if (cp->ctx_arr == NULL)
870 return -ENOMEM;
871
872 k = 0;
873 for (i = 0; i < 2; i++) {
874 u32 j, reg, off, lo, hi;
875
876 if (i == 0)
877 off = BNX2_PG_CTX_MAP;
878 else
879 off = BNX2_ISCSI_CTX_MAP;
880
881 reg = cnic_reg_rd_ind(dev, off);
882 lo = reg >> 16;
883 hi = reg & 0xffff;
884 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
885 cp->ctx_arr[k].cid = j;
886 }
887
888 cp->ctx_blks = k;
889 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
890 cp->ctx_blks = 0;
891 return -ENOMEM;
892 }
893
894 for (i = 0; i < cp->ctx_blks; i++) {
895 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000896 dma_alloc_coherent(&dev->pcidev->dev,
897 BCM_PAGE_SIZE,
898 &cp->ctx_arr[i].mapping,
899 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700900 if (cp->ctx_arr[i].ctx == NULL)
901 return -ENOMEM;
902 }
903 }
904 return 0;
905}
906
Michael Chane6c28892010-06-24 14:58:39 +0000907static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
908{
909 int err, i, is_bnx2 = 0;
910 struct kcqe **kcq;
911
912 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
913 is_bnx2 = 1;
914
915 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
916 if (err)
917 return err;
918
919 kcq = (struct kcqe **) info->dma.pg_arr;
920 info->kcq = kcq;
921
922 if (is_bnx2)
923 return 0;
924
925 for (i = 0; i < KCQ_PAGE_CNT; i++) {
926 struct bnx2x_bd_chain_next *next =
927 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
928 int j = i + 1;
929
930 if (j >= KCQ_PAGE_CNT)
931 j = 0;
932 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
933 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
934 }
935 return 0;
936}
937
Michael Chancd801532010-10-13 14:06:49 +0000938static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +0000939{
940 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000941 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +0000942
Michael Chana3ceeeb2010-10-13 14:06:50 +0000943 read_lock(&cnic_dev_lock);
944 list_for_each_entry(udev, &cnic_udev_list, list) {
945 if (udev->pdev == dev->pcidev) {
946 udev->dev = dev;
947 cp->udev = udev;
948 read_unlock(&cnic_dev_lock);
949 return 0;
950 }
951 }
952 read_unlock(&cnic_dev_lock);
953
Michael Chancd801532010-10-13 14:06:49 +0000954 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
955 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +0000956 return -ENOMEM;
957
Michael Chancd801532010-10-13 14:06:49 +0000958 udev->uio_dev = -1;
959
960 udev->dev = dev;
961 udev->pdev = dev->pcidev;
962 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
963 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
964 &udev->l2_ring_map,
965 GFP_KERNEL | __GFP_COMP);
966 if (!udev->l2_ring)
Michael Chanec0248e2009-08-26 09:49:22 +0000967 return -ENOMEM;
968
Michael Chancd801532010-10-13 14:06:49 +0000969 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
970 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
971 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
972 &udev->l2_buf_map,
973 GFP_KERNEL | __GFP_COMP);
974 if (!udev->l2_buf)
975 return -ENOMEM;
976
Michael Chana3ceeeb2010-10-13 14:06:50 +0000977 write_lock(&cnic_dev_lock);
978 list_add(&udev->list, &cnic_udev_list);
979 write_unlock(&cnic_dev_lock);
980
981 pci_dev_get(udev->pdev);
982
Michael Chancd801532010-10-13 14:06:49 +0000983 cp->udev = udev;
984
Michael Chanec0248e2009-08-26 09:49:22 +0000985 return 0;
986}
987
Michael Chancd801532010-10-13 14:06:49 +0000988static int cnic_init_uio(struct cnic_dev *dev)
989{
Michael Chan5e9b2db2009-08-26 09:49:23 +0000990 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000991 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +0000992 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +0000993 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +0000994
Michael Chancd801532010-10-13 14:06:49 +0000995 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +0000996 return -ENOMEM;
997
Michael Chancd801532010-10-13 14:06:49 +0000998 uinfo = &udev->cnic_uinfo;
999
Michael Chan5e9b2db2009-08-26 09:49:23 +00001000 uinfo->mem[0].addr = dev->netdev->base_addr;
1001 uinfo->mem[0].internal_addr = dev->regview;
1002 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1003 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1004
Michael Chan5e9b2db2009-08-26 09:49:23 +00001005 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chana4dde3a2010-02-24 14:42:08 +00001006 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chancd801532010-10-13 14:06:49 +00001007 PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001008 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1009 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1010 else
1011 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1012
1013 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001014 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1015 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1016 PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001017 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001018
1019 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001020 }
1021
1022 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1023
Michael Chancd801532010-10-13 14:06:49 +00001024 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1025 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001026 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1027
Michael Chancd801532010-10-13 14:06:49 +00001028 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1029 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001030 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1031
1032 uinfo->version = CNIC_MODULE_VERSION;
1033 uinfo->irq = UIO_IRQ_CUSTOM;
1034
1035 uinfo->open = cnic_uio_open;
1036 uinfo->release = cnic_uio_close;
1037
Michael Chana3ceeeb2010-10-13 14:06:50 +00001038 if (udev->uio_dev == -1) {
1039 if (!uinfo->priv) {
1040 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001041
Michael Chana3ceeeb2010-10-13 14:06:50 +00001042 ret = uio_register_device(&udev->pdev->dev, uinfo);
1043 }
1044 } else {
1045 cnic_init_rings(dev);
1046 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001047
Michael Chancd801532010-10-13 14:06:49 +00001048 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001049}
1050
Michael Chana4636962009-06-08 18:14:43 -07001051static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1052{
1053 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001054 int ret;
1055
1056 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1057 if (ret)
1058 goto error;
1059 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1060
Michael Chane6c28892010-06-24 14:58:39 +00001061 ret = cnic_alloc_kcq(dev, &cp->kcq1);
Michael Chana4636962009-06-08 18:14:43 -07001062 if (ret)
1063 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001064
1065 ret = cnic_alloc_context(dev);
1066 if (ret)
1067 goto error;
1068
Michael Chancd801532010-10-13 14:06:49 +00001069 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001070 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001071 goto error;
1072
Michael Chancd801532010-10-13 14:06:49 +00001073 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001074 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001075 goto error;
1076
Michael Chana4636962009-06-08 18:14:43 -07001077 return 0;
1078
1079error:
1080 cnic_free_resc(dev);
1081 return ret;
1082}
1083
Michael Chan71034ba2009-10-10 13:46:59 +00001084static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1085{
1086 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001087 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001088 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001089
Michael Chan520efdf2010-06-24 14:58:37 +00001090 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001091 blks = total_mem / ctx_blk_size;
1092 if (total_mem % ctx_blk_size)
1093 blks++;
1094
1095 if (blks > cp->ethdev->ctx_tbl_len)
1096 return -ENOMEM;
1097
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001098 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001099 if (cp->ctx_arr == NULL)
1100 return -ENOMEM;
1101
1102 cp->ctx_blks = blks;
1103 cp->ctx_blk_size = ctx_blk_size;
Michael Chanee87a822010-10-13 14:06:51 +00001104 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
Michael Chan71034ba2009-10-10 13:46:59 +00001105 cp->ctx_align = 0;
1106 else
1107 cp->ctx_align = ctx_blk_size;
1108
1109 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1110
1111 for (i = 0; i < blks; i++) {
1112 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001113 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1114 &cp->ctx_arr[i].mapping,
1115 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001116 if (cp->ctx_arr[i].ctx == NULL)
1117 return -ENOMEM;
1118
1119 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1120 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1121 cnic_free_context(dev);
1122 cp->ctx_blk_size += cp->ctx_align;
1123 i = -1;
1124 continue;
1125 }
1126 }
1127 }
1128 return 0;
1129}
1130
1131static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1132{
1133 struct cnic_local *cp = dev->cnic_priv;
Michael Chan520efdf2010-06-24 14:58:37 +00001134 struct cnic_eth_dev *ethdev = cp->ethdev;
1135 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001136 int i, j, n, ret, pages;
1137 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1138
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001139 cp->iro_arr = ethdev->iro_arr;
1140
Michael Chane1928c82010-12-23 07:43:04 +00001141 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
Michael Chan520efdf2010-06-24 14:58:37 +00001142 cp->iscsi_start_cid = start_cid;
Michael Chane1928c82010-12-23 07:43:04 +00001143 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1144
1145 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1146 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1147 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1148 if (!cp->fcoe_init_cid)
1149 cp->fcoe_init_cid = 0x10;
1150 }
1151
Michael Chan520efdf2010-06-24 14:58:37 +00001152 if (start_cid < BNX2X_ISCSI_START_CID) {
1153 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1154
1155 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
Michael Chane1928c82010-12-23 07:43:04 +00001156 cp->fcoe_start_cid += delta;
Michael Chan520efdf2010-06-24 14:58:37 +00001157 cp->max_cid_space += delta;
1158 }
1159
Michael Chan71034ba2009-10-10 13:46:59 +00001160 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1161 GFP_KERNEL);
1162 if (!cp->iscsi_tbl)
1163 goto error;
1164
1165 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001166 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001167 if (!cp->ctx_tbl)
1168 goto error;
1169
1170 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1171 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1172 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1173 }
1174
Michael Chane1928c82010-12-23 07:43:04 +00001175 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1176 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1177
Michael Chan520efdf2010-06-24 14:58:37 +00001178 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
Michael Chan71034ba2009-10-10 13:46:59 +00001179 PAGE_SIZE;
1180
1181 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1182 if (ret)
1183 return -ENOMEM;
1184
1185 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001186 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001187 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1188
1189 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1190 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1191 off;
1192
1193 if ((i % n) == (n - 1))
1194 j++;
1195 }
1196
Michael Chane6c28892010-06-24 14:58:39 +00001197 ret = cnic_alloc_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00001198 if (ret)
1199 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001200
Michael Chane21ba412010-12-23 07:43:03 +00001201 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1202 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1203 if (ret)
1204 goto error;
1205 }
1206
Michael Chan71034ba2009-10-10 13:46:59 +00001207 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1208 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1209 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1210 if (ret)
1211 goto error;
1212
1213 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1214 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1215 if (ret)
1216 goto error;
1217
1218 ret = cnic_alloc_bnx2x_context(dev);
1219 if (ret)
1220 goto error;
1221
Michael Chan71034ba2009-10-10 13:46:59 +00001222 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1223
1224 cp->l2_rx_ring_size = 15;
1225
Michael Chancd801532010-10-13 14:06:49 +00001226 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001227 if (ret)
1228 goto error;
1229
Michael Chancd801532010-10-13 14:06:49 +00001230 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001231 if (ret)
1232 goto error;
1233
1234 return 0;
1235
1236error:
1237 cnic_free_resc(dev);
1238 return -ENOMEM;
1239}
1240
Michael Chana4636962009-06-08 18:14:43 -07001241static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1242{
1243 return cp->max_kwq_idx -
1244 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1245}
1246
1247static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1248 u32 num_wqes)
1249{
1250 struct cnic_local *cp = dev->cnic_priv;
1251 struct kwqe *prod_qe;
1252 u16 prod, sw_prod, i;
1253
1254 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1255 return -EAGAIN; /* bnx2 is down */
1256
1257 spin_lock_bh(&cp->cnic_ulp_lock);
1258 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001259 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001260 spin_unlock_bh(&cp->cnic_ulp_lock);
1261 return -EAGAIN;
1262 }
1263
Michael Chan1f1332a2010-05-18 11:32:52 +00001264 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001265
1266 prod = cp->kwq_prod_idx;
1267 sw_prod = prod & MAX_KWQ_IDX;
1268 for (i = 0; i < num_wqes; i++) {
1269 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1270 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1271 prod++;
1272 sw_prod = prod & MAX_KWQ_IDX;
1273 }
1274 cp->kwq_prod_idx = prod;
1275
1276 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1277
1278 spin_unlock_bh(&cp->cnic_ulp_lock);
1279 return 0;
1280}
1281
Michael Chan71034ba2009-10-10 13:46:59 +00001282static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1283 union l5cm_specific_data *l5_data)
1284{
1285 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1286 dma_addr_t map;
1287
1288 map = ctx->kwqe_data_mapping;
1289 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1290 l5_data->phy_address.hi = (u64) map >> 32;
1291 return ctx->kwqe_data;
1292}
1293
1294static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1295 u32 type, union l5cm_specific_data *l5_data)
1296{
1297 struct cnic_local *cp = dev->cnic_priv;
1298 struct l5cm_spe kwqe;
1299 struct kwqe_16 *kwq[1];
1300 int ret;
1301
1302 kwqe.hdr.conn_and_cmd_data =
1303 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chanceb7e1c2010-10-06 03:14:54 +00001304 BNX2X_HW_CID(cp, cid)));
Michael Chan71034ba2009-10-10 13:46:59 +00001305 kwqe.hdr.type = cpu_to_le16(type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001306 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001307 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1308 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1309
1310 kwq[0] = (struct kwqe_16 *) &kwqe;
1311
1312 spin_lock_bh(&cp->cnic_ulp_lock);
1313 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1314 spin_unlock_bh(&cp->cnic_ulp_lock);
1315
1316 if (ret == 1)
1317 return 0;
1318
1319 return -EBUSY;
1320}
1321
1322static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1323 struct kcqe *cqes[], u32 num_cqes)
1324{
1325 struct cnic_local *cp = dev->cnic_priv;
1326 struct cnic_ulp_ops *ulp_ops;
1327
1328 rcu_read_lock();
1329 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1330 if (likely(ulp_ops)) {
1331 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1332 cqes, num_cqes);
1333 }
1334 rcu_read_unlock();
1335}
1336
1337static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1338{
1339 struct cnic_local *cp = dev->cnic_priv;
1340 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001341 int hq_bds, pages;
1342 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001343
1344 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1345 cp->num_ccells = req1->num_ccells_per_conn;
1346 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1347 cp->num_iscsi_tasks;
1348 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1349 BNX2X_ISCSI_R2TQE_SIZE;
1350 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1351 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1352 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1353 cp->num_cqs = req1->num_cqs;
1354
1355 if (!dev->max_iscsi_conn)
1356 return 0;
1357
1358 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001359 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001360 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001361 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001362 PAGE_SIZE);
1363 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001364 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001365 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001366 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001367 req1->num_tasks_per_conn);
1368
1369 /* init Ustorm RAM */
1370 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001371 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001372 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001373 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001374 PAGE_SIZE);
1375 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001376 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001377 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001378 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001379 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001380 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001381 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001382 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001383 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001384 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001385 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1386
1387 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001388 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001389 PAGE_SIZE);
1390 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001391 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001392 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001393 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001394 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001395 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001396 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001397 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001398 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001399 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001400 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1401
1402 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001403 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001404 PAGE_SIZE);
1405 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001406 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001407 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001408 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001409 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001410 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001411 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001412 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001413 hq_bds);
1414
1415 return 0;
1416}
1417
1418static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1419{
1420 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1421 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00001422 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001423 struct iscsi_kcqe kcqe;
1424 struct kcqe *cqes[1];
1425
1426 memset(&kcqe, 0, sizeof(kcqe));
1427 if (!dev->max_iscsi_conn) {
1428 kcqe.completion_status =
1429 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1430 goto done;
1431 }
1432
1433 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001434 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001435 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001436 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001437 req2->error_bit_map[1]);
1438
1439 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001440 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001441 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001442 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001443 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001444 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001445 req2->error_bit_map[1]);
1446
1447 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001448 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001449
1450 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1451
1452done:
1453 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1454 cqes[0] = (struct kcqe *) &kcqe;
1455 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1456
1457 return 0;
1458}
1459
1460static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1461{
1462 struct cnic_local *cp = dev->cnic_priv;
1463 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1464
1465 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1466 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1467
1468 cnic_free_dma(dev, &iscsi->hq_info);
1469 cnic_free_dma(dev, &iscsi->r2tq_info);
1470 cnic_free_dma(dev, &iscsi->task_array_info);
Michael Chane1928c82010-12-23 07:43:04 +00001471 cnic_free_id(&cp->cid_tbl, ctx->cid);
1472 } else {
1473 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001474 }
Michael Chane1928c82010-12-23 07:43:04 +00001475
Michael Chan71034ba2009-10-10 13:46:59 +00001476 ctx->cid = 0;
1477}
1478
1479static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1480{
1481 u32 cid;
1482 int ret, pages;
1483 struct cnic_local *cp = dev->cnic_priv;
1484 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1485 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1486
Michael Chane1928c82010-12-23 07:43:04 +00001487 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1488 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1489 if (cid == -1) {
1490 ret = -ENOMEM;
1491 goto error;
1492 }
1493 ctx->cid = cid;
1494 return 0;
1495 }
1496
Michael Chan71034ba2009-10-10 13:46:59 +00001497 cid = cnic_alloc_new_id(&cp->cid_tbl);
1498 if (cid == -1) {
1499 ret = -ENOMEM;
1500 goto error;
1501 }
1502
1503 ctx->cid = cid;
1504 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1505
1506 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1507 if (ret)
1508 goto error;
1509
1510 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1511 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1512 if (ret)
1513 goto error;
1514
1515 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1516 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1517 if (ret)
1518 goto error;
1519
1520 return 0;
1521
1522error:
1523 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1524 return ret;
1525}
1526
1527static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1528 struct regpair *ctx_addr)
1529{
1530 struct cnic_local *cp = dev->cnic_priv;
1531 struct cnic_eth_dev *ethdev = cp->ethdev;
1532 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1533 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1534 unsigned long align_off = 0;
1535 dma_addr_t ctx_map;
1536 void *ctx;
1537
1538 if (cp->ctx_align) {
1539 unsigned long mask = cp->ctx_align - 1;
1540
1541 if (cp->ctx_arr[blk].mapping & mask)
1542 align_off = cp->ctx_align -
1543 (cp->ctx_arr[blk].mapping & mask);
1544 }
1545 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1546 (off * BNX2X_CONTEXT_MEM_SIZE);
1547 ctx = cp->ctx_arr[blk].ctx + align_off +
1548 (off * BNX2X_CONTEXT_MEM_SIZE);
1549 if (init)
1550 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1551
1552 ctx_addr->lo = ctx_map & 0xffffffff;
1553 ctx_addr->hi = (u64) ctx_map >> 32;
1554 return ctx;
1555}
1556
1557static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1558 u32 num)
1559{
1560 struct cnic_local *cp = dev->cnic_priv;
1561 struct iscsi_kwqe_conn_offload1 *req1 =
1562 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1563 struct iscsi_kwqe_conn_offload2 *req2 =
1564 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1565 struct iscsi_kwqe_conn_offload3 *req3;
1566 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1567 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1568 u32 cid = ctx->cid;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001569 u32 hw_cid = BNX2X_HW_CID(cp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001570 struct iscsi_context *ictx;
1571 struct regpair context_addr;
1572 int i, j, n = 2, n_max;
1573
1574 ctx->ctx_flags = 0;
1575 if (!req2->num_additional_wqes)
1576 return -EINVAL;
1577
1578 n_max = req2->num_additional_wqes + 2;
1579
1580 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1581 if (ictx == NULL)
1582 return -ENOMEM;
1583
1584 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1585
1586 ictx->xstorm_ag_context.hq_prod = 1;
1587
1588 ictx->xstorm_st_context.iscsi.first_burst_length =
1589 ISCSI_DEF_FIRST_BURST_LEN;
1590 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1591 ISCSI_DEF_MAX_RECV_SEG_LEN;
1592 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1593 req1->sq_page_table_addr_lo;
1594 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1595 req1->sq_page_table_addr_hi;
1596 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1597 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1598 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1599 iscsi->hq_info.pgtbl_map & 0xffffffff;
1600 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1601 (u64) iscsi->hq_info.pgtbl_map >> 32;
1602 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1603 iscsi->hq_info.pgtbl[0];
1604 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1605 iscsi->hq_info.pgtbl[1];
1606 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1607 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1608 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1609 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1610 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1611 iscsi->r2tq_info.pgtbl[0];
1612 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1613 iscsi->r2tq_info.pgtbl[1];
1614 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1615 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1616 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1617 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1618 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1619 BNX2X_ISCSI_PBL_NOT_CACHED;
1620 ictx->xstorm_st_context.iscsi.flags.flags |=
1621 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1622 ictx->xstorm_st_context.iscsi.flags.flags |=
1623 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1624
1625 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1626 /* TSTORM requires the base address of RQ DB & not PTE */
1627 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1628 req2->rq_page_table_addr_lo & PAGE_MASK;
1629 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1630 req2->rq_page_table_addr_hi;
1631 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1632 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1633 ictx->tstorm_st_context.tcp.flags2 |=
1634 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001635 ictx->tstorm_st_context.tcp.ooo_support_mode =
1636 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001637
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001638 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001639
1640 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001641 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001642 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001643 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001644 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1645 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1646 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1647 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1648 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1649 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1650 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1651 iscsi->r2tq_info.pgtbl[0];
1652 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1653 iscsi->r2tq_info.pgtbl[1];
1654 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1655 req1->cq_page_table_addr_lo;
1656 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1657 req1->cq_page_table_addr_hi;
1658 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1659 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1660 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1661 ictx->ustorm_st_context.task_pbe_cache_index =
1662 BNX2X_ISCSI_PBL_NOT_CACHED;
1663 ictx->ustorm_st_context.task_pdu_cache_index =
1664 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1665
1666 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1667 if (j == 3) {
1668 if (n >= n_max)
1669 break;
1670 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1671 j = 0;
1672 }
1673 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1674 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1675 req3->qp_first_pte[j].hi;
1676 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1677 req3->qp_first_pte[j].lo;
1678 }
1679
1680 ictx->ustorm_st_context.task_pbl_base.lo =
1681 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1682 ictx->ustorm_st_context.task_pbl_base.hi =
1683 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1684 ictx->ustorm_st_context.tce_phy_addr.lo =
1685 iscsi->task_array_info.pgtbl[0];
1686 ictx->ustorm_st_context.tce_phy_addr.hi =
1687 iscsi->task_array_info.pgtbl[1];
1688 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1689 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1690 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1691 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1692 ISCSI_DEF_MAX_BURST_LEN;
1693 ictx->ustorm_st_context.negotiated_rx |=
1694 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1695 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1696
1697 ictx->cstorm_st_context.hq_pbl_base.lo =
1698 iscsi->hq_info.pgtbl_map & 0xffffffff;
1699 ictx->cstorm_st_context.hq_pbl_base.hi =
1700 (u64) iscsi->hq_info.pgtbl_map >> 32;
1701 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1702 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1703 ictx->cstorm_st_context.task_pbl_base.lo =
1704 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1705 ictx->cstorm_st_context.task_pbl_base.hi =
1706 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1707 /* CSTORM and USTORM initialization is different, CSTORM requires
1708 * CQ DB base & not PTE addr */
1709 ictx->cstorm_st_context.cq_db_base.lo =
1710 req1->cq_page_table_addr_lo & PAGE_MASK;
1711 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1712 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1713 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1714 for (i = 0; i < cp->num_cqs; i++) {
1715 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1716 ISCSI_INITIAL_SN;
1717 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1718 ISCSI_INITIAL_SN;
1719 }
1720
1721 ictx->xstorm_ag_context.cdu_reserved =
1722 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1723 ISCSI_CONNECTION_TYPE);
1724 ictx->ustorm_ag_context.cdu_usage =
1725 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1726 ISCSI_CONNECTION_TYPE);
1727 return 0;
1728
1729}
1730
1731static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1732 u32 num, int *work)
1733{
1734 struct iscsi_kwqe_conn_offload1 *req1;
1735 struct iscsi_kwqe_conn_offload2 *req2;
1736 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00001737 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001738 struct iscsi_kcqe kcqe;
1739 struct kcqe *cqes[1];
1740 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001741 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001742
1743 if (num < 2) {
1744 *work = num;
1745 return -EINVAL;
1746 }
1747
1748 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1749 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1750 if ((num - 2) < req2->num_additional_wqes) {
1751 *work = num;
1752 return -EINVAL;
1753 }
Joe Perches779bb412010-11-14 17:04:37 +00001754 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001755
1756 l5_cid = req1->iscsi_conn_id;
1757 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1758 return -EINVAL;
1759
1760 memset(&kcqe, 0, sizeof(kcqe));
1761 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1762 kcqe.iscsi_conn_id = l5_cid;
1763 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1764
Michael Chanfdf24082010-10-13 14:06:47 +00001765 ctx = &cp->ctx_tbl[l5_cid];
1766 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1767 kcqe.completion_status =
1768 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1769 goto done;
1770 }
1771
Michael Chan71034ba2009-10-10 13:46:59 +00001772 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1773 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001774 goto done;
1775 }
1776 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1777 if (ret) {
1778 atomic_dec(&cp->iscsi_conn);
1779 ret = 0;
1780 goto done;
1781 }
1782 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1783 if (ret < 0) {
1784 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1785 atomic_dec(&cp->iscsi_conn);
1786 goto done;
1787 }
1788
1789 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001790 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001791
1792done:
1793 cqes[0] = (struct kcqe *) &kcqe;
1794 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1795 return ret;
1796}
1797
1798
1799static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1800{
1801 struct cnic_local *cp = dev->cnic_priv;
1802 struct iscsi_kwqe_conn_update *req =
1803 (struct iscsi_kwqe_conn_update *) kwqe;
1804 void *data;
1805 union l5cm_specific_data l5_data;
1806 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1807 int ret;
1808
1809 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1810 return -EINVAL;
1811
1812 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1813 if (!data)
1814 return -ENOMEM;
1815
1816 memcpy(data, kwqe, sizeof(struct kwqe));
1817
1818 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1819 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1820 return ret;
1821}
1822
Michael Chana2c9e762010-10-13 14:06:46 +00001823static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001824{
1825 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001826 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001827 union l5cm_specific_data l5_data;
1828 int ret;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001829 u32 hw_cid, type;
Michael Chan71034ba2009-10-10 13:46:59 +00001830
Michael Chan71034ba2009-10-10 13:46:59 +00001831 init_waitqueue_head(&ctx->waitq);
1832 ctx->wait_cond = 0;
1833 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001834 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1835 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1836 & SPE_HDR_CONN_TYPE;
1837 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1838 SPE_HDR_FUNCTION_ID);
1839
1840 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1841 hw_cid, type, &l5_data);
1842
Michael Chan71034ba2009-10-10 13:46:59 +00001843 if (ret == 0)
1844 wait_event(ctx->waitq, ctx->wait_cond);
1845
Michael Chana2c9e762010-10-13 14:06:46 +00001846 return ret;
1847}
1848
1849static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1850{
1851 struct cnic_local *cp = dev->cnic_priv;
1852 struct iscsi_kwqe_conn_destroy *req =
1853 (struct iscsi_kwqe_conn_destroy *) kwqe;
1854 u32 l5_cid = req->reserved0;
1855 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1856 int ret = 0;
1857 struct iscsi_kcqe kcqe;
1858 struct kcqe *cqes[1];
1859
1860 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1861 goto skip_cfc_delete;
1862
Michael Chanfdf24082010-10-13 14:06:47 +00001863 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1864 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1865
1866 if (delta > (2 * HZ))
1867 delta = 0;
1868
1869 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1870 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1871 goto destroy_reply;
1872 }
Michael Chana2c9e762010-10-13 14:06:46 +00001873
1874 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1875
Michael Chan71034ba2009-10-10 13:46:59 +00001876skip_cfc_delete:
1877 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1878
1879 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00001880 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001881
Michael Chanfdf24082010-10-13 14:06:47 +00001882destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00001883 memset(&kcqe, 0, sizeof(kcqe));
1884 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1885 kcqe.iscsi_conn_id = l5_cid;
1886 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1887 kcqe.iscsi_conn_context_id = req->context_id;
1888
1889 cqes[0] = (struct kcqe *) &kcqe;
1890 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1891
1892 return ret;
1893}
1894
1895static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1896 struct l4_kwq_connect_req1 *kwqe1,
1897 struct l4_kwq_connect_req3 *kwqe3,
1898 struct l5cm_active_conn_buffer *conn_buf)
1899{
1900 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1901 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1902 &conn_buf->xstorm_conn_buffer;
1903 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1904 &conn_buf->tstorm_conn_buffer;
1905 struct regpair context_addr;
1906 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1907 struct in6_addr src_ip, dst_ip;
1908 int i;
1909 u32 *addrp;
1910
1911 addrp = (u32 *) &conn_addr->local_ip_addr;
1912 for (i = 0; i < 4; i++, addrp++)
1913 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1914
1915 addrp = (u32 *) &conn_addr->remote_ip_addr;
1916 for (i = 0; i < 4; i++, addrp++)
1917 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1918
1919 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1920
1921 xstorm_buf->context_addr.hi = context_addr.hi;
1922 xstorm_buf->context_addr.lo = context_addr.lo;
1923 xstorm_buf->mss = 0xffff;
1924 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1925 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1926 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1927 xstorm_buf->pseudo_header_checksum =
1928 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1929
1930 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1931 tstorm_buf->params |=
1932 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1933 if (kwqe3->ka_timeout) {
1934 tstorm_buf->ka_enable = 1;
1935 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1936 tstorm_buf->ka_interval = kwqe3->ka_interval;
1937 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1938 }
1939 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1940 tstorm_buf->snd_buf = kwqe3->snd_buf;
1941 tstorm_buf->max_rt_time = 0xffffffff;
1942}
1943
1944static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1945{
1946 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00001947 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001948 u8 *mac = dev->mac_addr;
1949
1950 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001951 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001952 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001953 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00001954 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001955 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00001956 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001957 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00001958 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001959 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00001960 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001961 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00001962
1963 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001964 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00001965 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001966 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00001967 mac[4]);
1968 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001969 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00001970 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001971 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00001972 mac[2]);
1973 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001974 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
Michael Chan71034ba2009-10-10 13:46:59 +00001975 mac[1]);
1976 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001977 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
Michael Chan71034ba2009-10-10 13:46:59 +00001978 mac[0]);
1979}
1980
1981static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1985 u16 tstorm_flags = 0;
1986
1987 if (tcp_ts) {
1988 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1989 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1990 }
1991
1992 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001993 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001994
1995 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001996 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001997}
1998
1999static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2000 u32 num, int *work)
2001{
2002 struct cnic_local *cp = dev->cnic_priv;
2003 struct l4_kwq_connect_req1 *kwqe1 =
2004 (struct l4_kwq_connect_req1 *) wqes[0];
2005 struct l4_kwq_connect_req3 *kwqe3;
2006 struct l5cm_active_conn_buffer *conn_buf;
2007 struct l5cm_conn_addr_params *conn_addr;
2008 union l5cm_specific_data l5_data;
2009 u32 l5_cid = kwqe1->pg_cid;
2010 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2011 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2012 int ret;
2013
2014 if (num < 2) {
2015 *work = num;
2016 return -EINVAL;
2017 }
2018
2019 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2020 *work = 3;
2021 else
2022 *work = 2;
2023
2024 if (num < *work) {
2025 *work = num;
2026 return -EINVAL;
2027 }
2028
2029 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002030 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002031 return -ENOMEM;
2032 }
2033 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2034 if (!conn_buf)
2035 return -ENOMEM;
2036
2037 memset(conn_buf, 0, sizeof(*conn_buf));
2038
2039 conn_addr = &conn_buf->conn_addr_buf;
2040 conn_addr->remote_addr_0 = csk->ha[0];
2041 conn_addr->remote_addr_1 = csk->ha[1];
2042 conn_addr->remote_addr_2 = csk->ha[2];
2043 conn_addr->remote_addr_3 = csk->ha[3];
2044 conn_addr->remote_addr_4 = csk->ha[4];
2045 conn_addr->remote_addr_5 = csk->ha[5];
2046
2047 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2048 struct l4_kwq_connect_req2 *kwqe2 =
2049 (struct l4_kwq_connect_req2 *) wqes[1];
2050
2051 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2052 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2053 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2054
2055 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2056 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2057 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2058 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2059 }
2060 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2061
2062 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2063 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2064 conn_addr->local_tcp_port = kwqe1->src_port;
2065 conn_addr->remote_tcp_port = kwqe1->dst_port;
2066
2067 conn_addr->pmtu = kwqe3->pmtu;
2068 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2069
2070 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002071 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002072
2073 cnic_bnx2x_set_tcp_timestamp(dev,
2074 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2075
2076 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2077 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2078 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002079 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002080
2081 return ret;
2082}
2083
2084static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2085{
2086 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2087 union l5cm_specific_data l5_data;
2088 int ret;
2089
2090 memset(&l5_data, 0, sizeof(l5_data));
2091 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2092 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2093 return ret;
2094}
2095
2096static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2097{
2098 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2099 union l5cm_specific_data l5_data;
2100 int ret;
2101
2102 memset(&l5_data, 0, sizeof(l5_data));
2103 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2104 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2105 return ret;
2106}
2107static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2108{
2109 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2110 struct l4_kcq kcqe;
2111 struct kcqe *cqes[1];
2112
2113 memset(&kcqe, 0, sizeof(kcqe));
2114 kcqe.pg_host_opaque = req->host_opaque;
2115 kcqe.pg_cid = req->host_opaque;
2116 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2117 cqes[0] = (struct kcqe *) &kcqe;
2118 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2119 return 0;
2120}
2121
2122static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2123{
2124 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2125 struct l4_kcq kcqe;
2126 struct kcqe *cqes[1];
2127
2128 memset(&kcqe, 0, sizeof(kcqe));
2129 kcqe.pg_host_opaque = req->pg_host_opaque;
2130 kcqe.pg_cid = req->pg_cid;
2131 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2132 cqes[0] = (struct kcqe *) &kcqe;
2133 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2134 return 0;
2135}
2136
Michael Chane1928c82010-12-23 07:43:04 +00002137static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2138{
2139 struct fcoe_kwqe_stat *req;
2140 struct fcoe_stat_ramrod_params *fcoe_stat;
2141 union l5cm_specific_data l5_data;
2142 struct cnic_local *cp = dev->cnic_priv;
2143 int ret;
2144 u32 cid;
2145
2146 req = (struct fcoe_kwqe_stat *) kwqe;
2147 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2148
2149 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2150 if (!fcoe_stat)
2151 return -ENOMEM;
2152
2153 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2154 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2155
2156 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2157 FCOE_CONNECTION_TYPE, &l5_data);
2158 return ret;
2159}
2160
2161static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2162 u32 num, int *work)
2163{
2164 int ret;
2165 struct cnic_local *cp = dev->cnic_priv;
2166 u32 cid;
2167 struct fcoe_init_ramrod_params *fcoe_init;
2168 struct fcoe_kwqe_init1 *req1;
2169 struct fcoe_kwqe_init2 *req2;
2170 struct fcoe_kwqe_init3 *req3;
2171 union l5cm_specific_data l5_data;
2172
2173 if (num < 3) {
2174 *work = num;
2175 return -EINVAL;
2176 }
2177 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2178 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2179 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2180 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2181 *work = 1;
2182 return -EINVAL;
2183 }
2184 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2185 *work = 2;
2186 return -EINVAL;
2187 }
2188
2189 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2190 netdev_err(dev->netdev, "fcoe_init size too big\n");
2191 return -ENOMEM;
2192 }
2193 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2194 if (!fcoe_init)
2195 return -ENOMEM;
2196
2197 memset(fcoe_init, 0, sizeof(*fcoe_init));
2198 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2199 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2200 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2201 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2202 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2203 fcoe_init->eq_next_page_addr.lo =
2204 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2205 fcoe_init->eq_next_page_addr.hi =
2206 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2207
2208 fcoe_init->sb_num = cp->status_blk_num;
2209 fcoe_init->eq_prod = MAX_KCQ_IDX;
2210 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2211 cp->kcq2.sw_prod_idx = 0;
2212
2213 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2214 printk(KERN_ERR "bdbg: submitting INIT RAMROD \n");
2215 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2216 FCOE_CONNECTION_TYPE, &l5_data);
2217 *work = 3;
2218 return ret;
2219}
2220
2221static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2222 u32 num, int *work)
2223{
2224 int ret = 0;
2225 u32 cid = -1, l5_cid;
2226 struct cnic_local *cp = dev->cnic_priv;
2227 struct fcoe_kwqe_conn_offload1 *req1;
2228 struct fcoe_kwqe_conn_offload2 *req2;
2229 struct fcoe_kwqe_conn_offload3 *req3;
2230 struct fcoe_kwqe_conn_offload4 *req4;
2231 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2232 struct cnic_context *ctx;
2233 struct fcoe_context *fctx;
2234 struct regpair ctx_addr;
2235 union l5cm_specific_data l5_data;
2236 struct fcoe_kcqe kcqe;
2237 struct kcqe *cqes[1];
2238
2239 if (num < 4) {
2240 *work = num;
2241 return -EINVAL;
2242 }
2243 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2244 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2245 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2246 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2247
2248 *work = 4;
2249
2250 l5_cid = req1->fcoe_conn_id;
2251 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2252 goto err_reply;
2253
2254 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2255
2256 ctx = &cp->ctx_tbl[l5_cid];
2257 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2258 goto err_reply;
2259
2260 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2261 if (ret) {
2262 ret = 0;
2263 goto err_reply;
2264 }
2265 cid = ctx->cid;
2266
2267 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2268 if (fctx) {
2269 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2270 u32 val;
2271
2272 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2273 FCOE_CONNECTION_TYPE);
2274 fctx->xstorm_ag_context.cdu_reserved = val;
2275 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2276 FCOE_CONNECTION_TYPE);
2277 fctx->ustorm_ag_context.cdu_usage = val;
2278 }
2279 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2280 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2281 goto err_reply;
2282 }
2283 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2284 if (!fcoe_offload)
2285 goto err_reply;
2286
2287 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2288 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2289 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2290 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2291 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2292
2293 cid = BNX2X_HW_CID(cp, cid);
2294 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2295 FCOE_CONNECTION_TYPE, &l5_data);
2296 if (!ret)
2297 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2298
2299 return ret;
2300
2301err_reply:
2302 if (cid != -1)
2303 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2304
2305 memset(&kcqe, 0, sizeof(kcqe));
2306 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2307 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2308 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2309
2310 cqes[0] = (struct kcqe *) &kcqe;
2311 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2312 return ret;
2313}
2314
2315static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2316{
2317 struct fcoe_kwqe_conn_enable_disable *req;
2318 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2319 union l5cm_specific_data l5_data;
2320 int ret;
2321 u32 cid, l5_cid;
2322 struct cnic_local *cp = dev->cnic_priv;
2323
2324 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2325 cid = req->context_id;
2326 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2327
2328 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2329 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2330 return -ENOMEM;
2331 }
2332 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2333 if (!fcoe_enable)
2334 return -ENOMEM;
2335
2336 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2337 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2338 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2339 FCOE_CONNECTION_TYPE, &l5_data);
2340 return ret;
2341}
2342
2343static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2344{
2345 struct fcoe_kwqe_conn_enable_disable *req;
2346 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2347 union l5cm_specific_data l5_data;
2348 int ret;
2349 u32 cid, l5_cid;
2350 struct cnic_local *cp = dev->cnic_priv;
2351
2352 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2353 cid = req->context_id;
2354 l5_cid = req->conn_id;
2355 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2356 return -EINVAL;
2357
2358 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2359
2360 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2361 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2362 return -ENOMEM;
2363 }
2364 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2365 if (!fcoe_disable)
2366 return -ENOMEM;
2367
2368 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2369 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2370 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2371 FCOE_CONNECTION_TYPE, &l5_data);
2372 return ret;
2373}
2374
2375static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2376{
2377 struct fcoe_kwqe_conn_destroy *req;
2378 union l5cm_specific_data l5_data;
2379 int ret;
2380 u32 cid, l5_cid;
2381 struct cnic_local *cp = dev->cnic_priv;
2382 struct cnic_context *ctx;
2383 struct fcoe_kcqe kcqe;
2384 struct kcqe *cqes[1];
2385
2386 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2387 cid = req->context_id;
2388 l5_cid = req->conn_id;
2389 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2390 return -EINVAL;
2391
2392 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2393
2394 ctx = &cp->ctx_tbl[l5_cid];
2395
2396 init_waitqueue_head(&ctx->waitq);
2397 ctx->wait_cond = 0;
2398
2399 memset(&l5_data, 0, sizeof(l5_data));
2400 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2401 FCOE_CONNECTION_TYPE, &l5_data);
2402 if (ret == 0) {
2403 wait_event(ctx->waitq, ctx->wait_cond);
2404 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2405 queue_delayed_work(cnic_wq, &cp->delete_task,
2406 msecs_to_jiffies(2000));
2407 }
2408
2409 memset(&kcqe, 0, sizeof(kcqe));
2410 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2411 kcqe.fcoe_conn_id = req->conn_id;
2412 kcqe.fcoe_conn_context_id = cid;
2413
2414 cqes[0] = (struct kcqe *) &kcqe;
2415 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2416 return ret;
2417}
2418
2419static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2420{
2421 struct fcoe_kwqe_destroy *req;
2422 union l5cm_specific_data l5_data;
2423 struct cnic_local *cp = dev->cnic_priv;
2424 int ret;
2425 u32 cid;
2426
2427 req = (struct fcoe_kwqe_destroy *) kwqe;
2428 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2429
2430 memset(&l5_data, 0, sizeof(l5_data));
2431 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2432 FCOE_CONNECTION_TYPE, &l5_data);
2433 return ret;
2434}
2435
2436static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2437 struct kwqe *wqes[], u32 num_wqes)
Michael Chan71034ba2009-10-10 13:46:59 +00002438{
2439 int i, work, ret;
2440 u32 opcode;
2441 struct kwqe *kwqe;
2442
2443 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2444 return -EAGAIN; /* bnx2 is down */
2445
2446 for (i = 0; i < num_wqes; ) {
2447 kwqe = wqes[i];
2448 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2449 work = 1;
2450
2451 switch (opcode) {
2452 case ISCSI_KWQE_OPCODE_INIT1:
2453 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2454 break;
2455 case ISCSI_KWQE_OPCODE_INIT2:
2456 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2457 break;
2458 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2459 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2460 num_wqes - i, &work);
2461 break;
2462 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2463 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2464 break;
2465 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2466 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2467 break;
2468 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2469 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2470 &work);
2471 break;
2472 case L4_KWQE_OPCODE_VALUE_CLOSE:
2473 ret = cnic_bnx2x_close(dev, kwqe);
2474 break;
2475 case L4_KWQE_OPCODE_VALUE_RESET:
2476 ret = cnic_bnx2x_reset(dev, kwqe);
2477 break;
2478 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2479 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2480 break;
2481 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2482 ret = cnic_bnx2x_update_pg(dev, kwqe);
2483 break;
2484 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2485 ret = 0;
2486 break;
2487 default:
2488 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002489 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2490 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002491 break;
2492 }
2493 if (ret < 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00002494 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2495 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002496 i += work;
2497 }
2498 return 0;
2499}
2500
Michael Chane1928c82010-12-23 07:43:04 +00002501static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2502 struct kwqe *wqes[], u32 num_wqes)
2503{
2504 struct cnic_local *cp = dev->cnic_priv;
2505 int i, work, ret;
2506 u32 opcode;
2507 struct kwqe *kwqe;
2508
2509 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2510 return -EAGAIN; /* bnx2 is down */
2511
2512 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2513 return -EINVAL;
2514
2515 for (i = 0; i < num_wqes; ) {
2516 kwqe = wqes[i];
2517 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2518 work = 1;
2519
2520 switch (opcode) {
2521 case FCOE_KWQE_OPCODE_INIT1:
2522 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2523 num_wqes - i, &work);
2524 break;
2525 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2526 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2527 num_wqes - i, &work);
2528 break;
2529 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2530 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2531 break;
2532 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2533 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2534 break;
2535 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2536 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2537 break;
2538 case FCOE_KWQE_OPCODE_DESTROY:
2539 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2540 break;
2541 case FCOE_KWQE_OPCODE_STAT:
2542 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2543 break;
2544 default:
2545 ret = 0;
2546 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2547 opcode);
2548 break;
2549 }
2550 if (ret < 0)
2551 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2552 opcode);
2553 i += work;
2554 }
2555 return 0;
2556}
2557
2558static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2559 u32 num_wqes)
2560{
2561 int ret = -EINVAL;
2562 u32 layer_code;
2563
2564 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2565 return -EAGAIN; /* bnx2x is down */
2566
2567 if (!num_wqes)
2568 return 0;
2569
2570 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2571 switch (layer_code) {
2572 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2573 case KWQE_FLAGS_LAYER_MASK_L4:
2574 case KWQE_FLAGS_LAYER_MASK_L2:
2575 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2576 break;
2577
2578 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2579 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2580 break;
2581 }
2582 return ret;
2583}
2584
2585static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2586{
2587 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2588 return KCQE_FLAGS_LAYER_MASK_L4;
2589
2590 return opflag & KCQE_FLAGS_LAYER_MASK;
2591}
2592
Michael Chana4636962009-06-08 18:14:43 -07002593static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2594{
2595 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002596 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002597
2598 i = 0;
2599 j = 1;
2600 while (num_cqes) {
2601 struct cnic_ulp_ops *ulp_ops;
2602 int ulp_type;
2603 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
Michael Chane1928c82010-12-23 07:43:04 +00002604 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002605
2606 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002607 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002608
2609 while (j < num_cqes) {
2610 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2611
Michael Chane1928c82010-12-23 07:43:04 +00002612 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
Michael Chana4636962009-06-08 18:14:43 -07002613 break;
2614
2615 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002616 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002617 j++;
2618 }
2619
2620 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2621 ulp_type = CNIC_ULP_RDMA;
2622 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2623 ulp_type = CNIC_ULP_ISCSI;
Michael Chane1928c82010-12-23 07:43:04 +00002624 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2625 ulp_type = CNIC_ULP_FCOE;
Michael Chana4636962009-06-08 18:14:43 -07002626 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2627 ulp_type = CNIC_ULP_L4;
2628 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2629 goto end;
2630 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002631 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2632 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002633 goto end;
2634 }
2635
2636 rcu_read_lock();
2637 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2638 if (likely(ulp_ops)) {
2639 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2640 cp->completed_kcq + i, j);
2641 }
2642 rcu_read_unlock();
2643end:
2644 num_cqes -= j;
2645 i += j;
2646 j = 1;
2647 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002648 if (unlikely(comp))
2649 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002650}
2651
2652static u16 cnic_bnx2_next_idx(u16 idx)
2653{
2654 return idx + 1;
2655}
2656
2657static u16 cnic_bnx2_hw_idx(u16 idx)
2658{
2659 return idx;
2660}
2661
Michael Chan71034ba2009-10-10 13:46:59 +00002662static u16 cnic_bnx2x_next_idx(u16 idx)
2663{
2664 idx++;
2665 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2666 idx++;
2667
2668 return idx;
2669}
2670
2671static u16 cnic_bnx2x_hw_idx(u16 idx)
2672{
2673 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2674 idx++;
2675 return idx;
2676}
2677
Michael Chan644b9d42010-06-24 14:58:40 +00002678static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002679{
2680 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002681 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002682 struct kcqe *kcqe;
2683 int kcqe_cnt = 0, last_cnt = 0;
2684
Michael Chan644b9d42010-06-24 14:58:40 +00002685 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002686 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002687 hw_prod = *info->hw_prod_idx_ptr;
2688 hw_prod = cp->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002689
2690 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002691 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002692 cp->completed_kcq[kcqe_cnt++] = kcqe;
2693 i = cp->next_idx(i);
2694 ri = i & MAX_KCQ_IDX;
2695 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2696 last_cnt = kcqe_cnt;
2697 last = i;
2698 }
2699 }
2700
Michael Chan644b9d42010-06-24 14:58:40 +00002701 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002702 return last_cnt;
2703}
2704
Michael Chan48f753d2010-05-18 11:32:53 +00002705static int cnic_l2_completion(struct cnic_local *cp)
2706{
2707 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002708 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002709 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chancd801532010-10-13 14:06:49 +00002710 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002711 u32 cmd;
2712 int comp = 0;
2713
2714 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2715 return 0;
2716
2717 hw_cons = *cp->rx_cons_ptr;
2718 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2719 hw_cons++;
2720
2721 sw_cons = cp->rx_cons;
2722 while (sw_cons != hw_cons) {
2723 u8 cqe_fp_flags;
2724
2725 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2726 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2727 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2728 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2729 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2730 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2731 cmd == RAMROD_CMD_ID_ETH_HALT)
2732 comp++;
2733 }
2734 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2735 }
2736 return comp;
2737}
2738
Michael Chan86b53602009-10-10 13:46:57 +00002739static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002740{
Michael Chan541a7812010-10-06 03:17:22 +00002741 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002742 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002743
Michael Chan541a7812010-10-06 03:17:22 +00002744 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002745 return;
2746
Michael Chan541a7812010-10-06 03:17:22 +00002747 rx_cons = *cp->rx_cons_ptr;
2748 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002749 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002750 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2751 comp = cnic_l2_completion(cp);
2752
Michael Chana4636962009-06-08 18:14:43 -07002753 cp->tx_cons = tx_cons;
2754 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002755
Michael Chancd801532010-10-13 14:06:49 +00002756 if (cp->udev)
2757 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002758 }
Michael Chan48f753d2010-05-18 11:32:53 +00002759 if (comp)
2760 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002761}
2762
Michael Chanb177a5d52010-06-24 14:58:41 +00002763static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002764{
Michael Chana4636962009-06-08 18:14:43 -07002765 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002766 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002767 int kcqe_cnt;
2768
Michael Chana4636962009-06-08 18:14:43 -07002769 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2770
Michael Chan644b9d42010-06-24 14:58:40 +00002771 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002772
2773 service_kcqes(dev, kcqe_cnt);
2774
2775 /* Tell compiler that status_blk fields can change. */
2776 barrier();
Michael Chan644b9d42010-06-24 14:58:40 +00002777 if (status_idx != *cp->kcq1.status_idx_ptr) {
Michael Chanb177a5d52010-06-24 14:58:41 +00002778 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002779 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002780 } else
2781 break;
2782 }
2783
Michael Chan644b9d42010-06-24 14:58:40 +00002784 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002785
Michael Chan86b53602009-10-10 13:46:57 +00002786 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002787
Michael Chana4636962009-06-08 18:14:43 -07002788 return status_idx;
2789}
2790
Michael Chanb177a5d52010-06-24 14:58:41 +00002791static int cnic_service_bnx2(void *data, void *status_blk)
2792{
2793 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002794
Michael Chaneaaa6e92010-12-23 08:38:30 +00002795 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2796 struct status_block *sblk = status_blk;
2797
2798 return sblk->status_idx;
2799 }
Michael Chanb177a5d52010-06-24 14:58:41 +00002800
2801 return cnic_service_bnx2_queues(dev);
2802}
2803
Michael Chana4636962009-06-08 18:14:43 -07002804static void cnic_service_bnx2_msix(unsigned long data)
2805{
2806 struct cnic_dev *dev = (struct cnic_dev *) data;
2807 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07002808
Michael Chanb177a5d52010-06-24 14:58:41 +00002809 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07002810
Michael Chana4636962009-06-08 18:14:43 -07002811 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2812 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2813}
2814
Michael Chan66fee9e2010-06-24 14:58:38 +00002815static void cnic_doirq(struct cnic_dev *dev)
2816{
2817 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00002818
2819 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00002820 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2821
Michael Chan66fee9e2010-06-24 14:58:38 +00002822 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00002823 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00002824
2825 tasklet_schedule(&cp->cnic_irq_task);
2826 }
2827}
2828
Michael Chana4636962009-06-08 18:14:43 -07002829static irqreturn_t cnic_irq(int irq, void *dev_instance)
2830{
2831 struct cnic_dev *dev = dev_instance;
2832 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07002833
2834 if (cp->ack_int)
2835 cp->ack_int(dev);
2836
Michael Chan66fee9e2010-06-24 14:58:38 +00002837 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07002838
2839 return IRQ_HANDLED;
2840}
2841
Michael Chan71034ba2009-10-10 13:46:59 +00002842static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2843 u16 index, u8 op, u8 update)
2844{
2845 struct cnic_local *cp = dev->cnic_priv;
2846 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2847 COMMAND_REG_INT_ACK);
2848 struct igu_ack_register igu_ack;
2849
2850 igu_ack.status_block_index = index;
2851 igu_ack.sb_id_and_flags =
2852 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2853 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2854 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2855 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2856
2857 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2858}
2859
Michael Chanee87a822010-10-13 14:06:51 +00002860static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2861 u16 index, u8 op, u8 update)
2862{
2863 struct igu_regular cmd_data;
2864 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2865
2866 cmd_data.sb_id_and_flags =
2867 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2868 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2869 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2870 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2871
2872
2873 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2874}
2875
Michael Chan71034ba2009-10-10 13:46:59 +00002876static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2877{
2878 struct cnic_local *cp = dev->cnic_priv;
2879
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002880 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00002881 IGU_INT_DISABLE, 0);
2882}
2883
Michael Chanee87a822010-10-13 14:06:51 +00002884static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2885{
2886 struct cnic_local *cp = dev->cnic_priv;
2887
2888 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2889 IGU_INT_DISABLE, 0);
2890}
2891
Michael Chanb177a5d52010-06-24 14:58:41 +00002892static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00002893{
Michael Chanb177a5d52010-06-24 14:58:41 +00002894 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00002895 int kcqe_cnt;
2896
Michael Chanb177a5d52010-06-24 14:58:41 +00002897 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00002898
2899 service_kcqes(dev, kcqe_cnt);
2900
2901 /* Tell compiler that sblk fields can change. */
2902 barrier();
Michael Chanb177a5d52010-06-24 14:58:41 +00002903 if (last_status == *info->status_idx_ptr)
Michael Chan71034ba2009-10-10 13:46:59 +00002904 break;
2905
Michael Chanb177a5d52010-06-24 14:58:41 +00002906 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00002907 }
Michael Chanb177a5d52010-06-24 14:58:41 +00002908 return last_status;
2909}
2910
2911static void cnic_service_bnx2x_bh(unsigned long data)
2912{
2913 struct cnic_dev *dev = (struct cnic_dev *) data;
2914 struct cnic_local *cp = dev->cnic_priv;
2915 u32 status_idx;
2916
2917 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2918 return;
2919
2920 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00002921
Michael Chan644b9d42010-06-24 14:58:40 +00002922 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00002923
2924 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
2925 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2926
2927 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2928 MAX_KCQ_IDX);
2929
Michael Chanee87a822010-10-13 14:06:51 +00002930 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2931 status_idx, IGU_INT_ENABLE, 1);
Michael Chane21ba412010-12-23 07:43:03 +00002932 } else {
Michael Chanee87a822010-10-13 14:06:51 +00002933 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2934 status_idx, IGU_INT_ENABLE, 1);
Michael Chane21ba412010-12-23 07:43:03 +00002935 }
Michael Chan71034ba2009-10-10 13:46:59 +00002936}
2937
2938static int cnic_service_bnx2x(void *data, void *status_blk)
2939{
2940 struct cnic_dev *dev = data;
2941 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00002942
Michael Chan66fee9e2010-06-24 14:58:38 +00002943 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2944 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00002945
Michael Chan66fee9e2010-06-24 14:58:38 +00002946 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00002947
2948 return 0;
2949}
2950
Michael Chana4636962009-06-08 18:14:43 -07002951static void cnic_ulp_stop(struct cnic_dev *dev)
2952{
2953 struct cnic_local *cp = dev->cnic_priv;
2954 int if_type;
2955
Michael Chancd801532010-10-13 14:06:49 +00002956 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Michael Chan6d7760a2009-07-27 11:25:58 -07002957
Michael Chana4636962009-06-08 18:14:43 -07002958 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2959 struct cnic_ulp_ops *ulp_ops;
2960
Michael Chan681dbd72009-08-14 15:49:46 +00002961 mutex_lock(&cnic_lock);
2962 ulp_ops = cp->ulp_ops[if_type];
2963 if (!ulp_ops) {
2964 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002965 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002966 }
2967 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2968 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002969
2970 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2971 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002972
2973 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002974 }
Michael Chana4636962009-06-08 18:14:43 -07002975}
2976
2977static void cnic_ulp_start(struct cnic_dev *dev)
2978{
2979 struct cnic_local *cp = dev->cnic_priv;
2980 int if_type;
2981
Michael Chana4636962009-06-08 18:14:43 -07002982 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2983 struct cnic_ulp_ops *ulp_ops;
2984
Michael Chan681dbd72009-08-14 15:49:46 +00002985 mutex_lock(&cnic_lock);
2986 ulp_ops = cp->ulp_ops[if_type];
2987 if (!ulp_ops || !ulp_ops->cnic_start) {
2988 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002989 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002990 }
2991 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2992 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002993
2994 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2995 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002996
2997 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002998 }
Michael Chana4636962009-06-08 18:14:43 -07002999}
3000
3001static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3002{
3003 struct cnic_dev *dev = data;
3004
3005 switch (info->cmd) {
3006 case CNIC_CTL_STOP_CMD:
3007 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003008
3009 cnic_ulp_stop(dev);
3010 cnic_stop_hw(dev);
3011
Michael Chana4636962009-06-08 18:14:43 -07003012 cnic_put(dev);
3013 break;
3014 case CNIC_CTL_START_CMD:
3015 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07003016
3017 if (!cnic_start_hw(dev))
3018 cnic_ulp_start(dev);
3019
Michael Chana4636962009-06-08 18:14:43 -07003020 cnic_put(dev);
3021 break;
Michael Chan71034ba2009-10-10 13:46:59 +00003022 case CNIC_CTL_COMPLETION_CMD: {
3023 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3024 u32 l5_cid;
3025 struct cnic_local *cp = dev->cnic_priv;
3026
3027 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3028 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3029
3030 ctx->wait_cond = 1;
3031 wake_up(&ctx->waitq);
3032 }
3033 break;
3034 }
Michael Chana4636962009-06-08 18:14:43 -07003035 default:
3036 return -EINVAL;
3037 }
3038 return 0;
3039}
3040
3041static void cnic_ulp_init(struct cnic_dev *dev)
3042{
3043 int i;
3044 struct cnic_local *cp = dev->cnic_priv;
3045
Michael Chana4636962009-06-08 18:14:43 -07003046 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3047 struct cnic_ulp_ops *ulp_ops;
3048
Michael Chan7fc1ece2009-08-14 15:49:47 +00003049 mutex_lock(&cnic_lock);
3050 ulp_ops = cnic_ulp_tbl[i];
3051 if (!ulp_ops || !ulp_ops->cnic_init) {
3052 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003053 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003054 }
3055 ulp_get(ulp_ops);
3056 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003057
3058 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3059 ulp_ops->cnic_init(dev);
3060
Michael Chan7fc1ece2009-08-14 15:49:47 +00003061 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003062 }
Michael Chana4636962009-06-08 18:14:43 -07003063}
3064
3065static void cnic_ulp_exit(struct cnic_dev *dev)
3066{
3067 int i;
3068 struct cnic_local *cp = dev->cnic_priv;
3069
Michael Chana4636962009-06-08 18:14:43 -07003070 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3071 struct cnic_ulp_ops *ulp_ops;
3072
Michael Chan7fc1ece2009-08-14 15:49:47 +00003073 mutex_lock(&cnic_lock);
3074 ulp_ops = cnic_ulp_tbl[i];
3075 if (!ulp_ops || !ulp_ops->cnic_exit) {
3076 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003077 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00003078 }
3079 ulp_get(ulp_ops);
3080 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07003081
3082 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3083 ulp_ops->cnic_exit(dev);
3084
Michael Chan7fc1ece2009-08-14 15:49:47 +00003085 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07003086 }
Michael Chana4636962009-06-08 18:14:43 -07003087}
3088
3089static int cnic_cm_offload_pg(struct cnic_sock *csk)
3090{
3091 struct cnic_dev *dev = csk->dev;
3092 struct l4_kwq_offload_pg *l4kwqe;
3093 struct kwqe *wqes[1];
3094
3095 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3096 memset(l4kwqe, 0, sizeof(*l4kwqe));
3097 wqes[0] = (struct kwqe *) l4kwqe;
3098
3099 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3100 l4kwqe->flags =
3101 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3102 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3103
3104 l4kwqe->da0 = csk->ha[0];
3105 l4kwqe->da1 = csk->ha[1];
3106 l4kwqe->da2 = csk->ha[2];
3107 l4kwqe->da3 = csk->ha[3];
3108 l4kwqe->da4 = csk->ha[4];
3109 l4kwqe->da5 = csk->ha[5];
3110
3111 l4kwqe->sa0 = dev->mac_addr[0];
3112 l4kwqe->sa1 = dev->mac_addr[1];
3113 l4kwqe->sa2 = dev->mac_addr[2];
3114 l4kwqe->sa3 = dev->mac_addr[3];
3115 l4kwqe->sa4 = dev->mac_addr[4];
3116 l4kwqe->sa5 = dev->mac_addr[5];
3117
3118 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00003119 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07003120 l4kwqe->host_opaque = csk->l5_cid;
3121
3122 if (csk->vlan_id) {
3123 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3124 l4kwqe->vlan_tag = csk->vlan_id;
3125 l4kwqe->l2hdr_nbytes += 4;
3126 }
3127
3128 return dev->submit_kwqes(dev, wqes, 1);
3129}
3130
3131static int cnic_cm_update_pg(struct cnic_sock *csk)
3132{
3133 struct cnic_dev *dev = csk->dev;
3134 struct l4_kwq_update_pg *l4kwqe;
3135 struct kwqe *wqes[1];
3136
3137 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3138 memset(l4kwqe, 0, sizeof(*l4kwqe));
3139 wqes[0] = (struct kwqe *) l4kwqe;
3140
3141 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3142 l4kwqe->flags =
3143 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3144 l4kwqe->pg_cid = csk->pg_cid;
3145
3146 l4kwqe->da0 = csk->ha[0];
3147 l4kwqe->da1 = csk->ha[1];
3148 l4kwqe->da2 = csk->ha[2];
3149 l4kwqe->da3 = csk->ha[3];
3150 l4kwqe->da4 = csk->ha[4];
3151 l4kwqe->da5 = csk->ha[5];
3152
3153 l4kwqe->pg_host_opaque = csk->l5_cid;
3154 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3155
3156 return dev->submit_kwqes(dev, wqes, 1);
3157}
3158
3159static int cnic_cm_upload_pg(struct cnic_sock *csk)
3160{
3161 struct cnic_dev *dev = csk->dev;
3162 struct l4_kwq_upload *l4kwqe;
3163 struct kwqe *wqes[1];
3164
3165 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3166 memset(l4kwqe, 0, sizeof(*l4kwqe));
3167 wqes[0] = (struct kwqe *) l4kwqe;
3168
3169 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3170 l4kwqe->flags =
3171 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3172 l4kwqe->cid = csk->pg_cid;
3173
3174 return dev->submit_kwqes(dev, wqes, 1);
3175}
3176
3177static int cnic_cm_conn_req(struct cnic_sock *csk)
3178{
3179 struct cnic_dev *dev = csk->dev;
3180 struct l4_kwq_connect_req1 *l4kwqe1;
3181 struct l4_kwq_connect_req2 *l4kwqe2;
3182 struct l4_kwq_connect_req3 *l4kwqe3;
3183 struct kwqe *wqes[3];
3184 u8 tcp_flags = 0;
3185 int num_wqes = 2;
3186
3187 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3188 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3189 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3190 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3191 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3192 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3193
3194 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3195 l4kwqe3->flags =
3196 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3197 l4kwqe3->ka_timeout = csk->ka_timeout;
3198 l4kwqe3->ka_interval = csk->ka_interval;
3199 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3200 l4kwqe3->tos = csk->tos;
3201 l4kwqe3->ttl = csk->ttl;
3202 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3203 l4kwqe3->pmtu = csk->mtu;
3204 l4kwqe3->rcv_buf = csk->rcv_buf;
3205 l4kwqe3->snd_buf = csk->snd_buf;
3206 l4kwqe3->seed = csk->seed;
3207
3208 wqes[0] = (struct kwqe *) l4kwqe1;
3209 if (test_bit(SK_F_IPV6, &csk->flags)) {
3210 wqes[1] = (struct kwqe *) l4kwqe2;
3211 wqes[2] = (struct kwqe *) l4kwqe3;
3212 num_wqes = 3;
3213
3214 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3215 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3216 l4kwqe2->flags =
3217 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3218 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3219 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3220 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3221 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3222 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3223 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3224 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3225 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3226 sizeof(struct tcphdr);
3227 } else {
3228 wqes[1] = (struct kwqe *) l4kwqe3;
3229 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3230 sizeof(struct tcphdr);
3231 }
3232
3233 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3234 l4kwqe1->flags =
3235 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3236 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3237 l4kwqe1->cid = csk->cid;
3238 l4kwqe1->pg_cid = csk->pg_cid;
3239 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3240 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3241 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3242 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3243 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3244 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3245 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3246 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3247 if (csk->tcp_flags & SK_TCP_NAGLE)
3248 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3249 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3250 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3251 if (csk->tcp_flags & SK_TCP_SACK)
3252 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3253 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3254 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3255
3256 l4kwqe1->tcp_flags = tcp_flags;
3257
3258 return dev->submit_kwqes(dev, wqes, num_wqes);
3259}
3260
3261static int cnic_cm_close_req(struct cnic_sock *csk)
3262{
3263 struct cnic_dev *dev = csk->dev;
3264 struct l4_kwq_close_req *l4kwqe;
3265 struct kwqe *wqes[1];
3266
3267 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3268 memset(l4kwqe, 0, sizeof(*l4kwqe));
3269 wqes[0] = (struct kwqe *) l4kwqe;
3270
3271 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3272 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3273 l4kwqe->cid = csk->cid;
3274
3275 return dev->submit_kwqes(dev, wqes, 1);
3276}
3277
3278static int cnic_cm_abort_req(struct cnic_sock *csk)
3279{
3280 struct cnic_dev *dev = csk->dev;
3281 struct l4_kwq_reset_req *l4kwqe;
3282 struct kwqe *wqes[1];
3283
3284 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3285 memset(l4kwqe, 0, sizeof(*l4kwqe));
3286 wqes[0] = (struct kwqe *) l4kwqe;
3287
3288 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3289 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3290 l4kwqe->cid = csk->cid;
3291
3292 return dev->submit_kwqes(dev, wqes, 1);
3293}
3294
3295static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3296 u32 l5_cid, struct cnic_sock **csk, void *context)
3297{
3298 struct cnic_local *cp = dev->cnic_priv;
3299 struct cnic_sock *csk1;
3300
3301 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3302 return -EINVAL;
3303
Michael Chanfdf24082010-10-13 14:06:47 +00003304 if (cp->ctx_tbl) {
3305 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3306
3307 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3308 return -EAGAIN;
3309 }
3310
Michael Chana4636962009-06-08 18:14:43 -07003311 csk1 = &cp->csk_tbl[l5_cid];
3312 if (atomic_read(&csk1->ref_count))
3313 return -EAGAIN;
3314
3315 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3316 return -EBUSY;
3317
3318 csk1->dev = dev;
3319 csk1->cid = cid;
3320 csk1->l5_cid = l5_cid;
3321 csk1->ulp_type = ulp_type;
3322 csk1->context = context;
3323
3324 csk1->ka_timeout = DEF_KA_TIMEOUT;
3325 csk1->ka_interval = DEF_KA_INTERVAL;
3326 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3327 csk1->tos = DEF_TOS;
3328 csk1->ttl = DEF_TTL;
3329 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3330 csk1->rcv_buf = DEF_RCV_BUF;
3331 csk1->snd_buf = DEF_SND_BUF;
3332 csk1->seed = DEF_SEED;
3333
3334 *csk = csk1;
3335 return 0;
3336}
3337
3338static void cnic_cm_cleanup(struct cnic_sock *csk)
3339{
3340 if (csk->src_port) {
3341 struct cnic_dev *dev = csk->dev;
3342 struct cnic_local *cp = dev->cnic_priv;
3343
Michael Chan9b093362010-12-23 07:42:56 +00003344 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07003345 csk->src_port = 0;
3346 }
3347}
3348
3349static void cnic_close_conn(struct cnic_sock *csk)
3350{
3351 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3352 cnic_cm_upload_pg(csk);
3353 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3354 }
3355 cnic_cm_cleanup(csk);
3356}
3357
3358static int cnic_cm_destroy(struct cnic_sock *csk)
3359{
3360 if (!cnic_in_use(csk))
3361 return -EINVAL;
3362
3363 csk_hold(csk);
3364 clear_bit(SK_F_INUSE, &csk->flags);
3365 smp_mb__after_clear_bit();
3366 while (atomic_read(&csk->ref_count) != 1)
3367 msleep(1);
3368 cnic_cm_cleanup(csk);
3369
3370 csk->flags = 0;
3371 csk_put(csk);
3372 return 0;
3373}
3374
3375static inline u16 cnic_get_vlan(struct net_device *dev,
3376 struct net_device **vlan_dev)
3377{
3378 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3379 *vlan_dev = vlan_dev_real_dev(dev);
3380 return vlan_dev_vlan_id(dev);
3381 }
3382 *vlan_dev = dev;
3383 return 0;
3384}
3385
3386static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3387 struct dst_entry **dst)
3388{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003389#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07003390 struct flowi fl;
3391 int err;
3392 struct rtable *rt;
3393
3394 memset(&fl, 0, sizeof(fl));
3395 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
3396
3397 err = ip_route_output_key(&init_net, &rt, &fl);
3398 if (!err)
Changli Gaod8d1f302010-06-10 23:31:35 -07003399 *dst = &rt->dst;
Michael Chana4636962009-06-08 18:14:43 -07003400 return err;
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003401#else
3402 return -ENETUNREACH;
3403#endif
Michael Chana4636962009-06-08 18:14:43 -07003404}
3405
3406static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3407 struct dst_entry **dst)
3408{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07003409#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
Michael Chana4636962009-06-08 18:14:43 -07003410 struct flowi fl;
3411
3412 memset(&fl, 0, sizeof(fl));
3413 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
3414 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
3415 fl.oif = dst_addr->sin6_scope_id;
3416
3417 *dst = ip6_route_output(&init_net, NULL, &fl);
3418 if (*dst)
3419 return 0;
3420#endif
3421
3422 return -ENETUNREACH;
3423}
3424
3425static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3426 int ulp_type)
3427{
3428 struct cnic_dev *dev = NULL;
3429 struct dst_entry *dst;
3430 struct net_device *netdev = NULL;
3431 int err = -ENETUNREACH;
3432
3433 if (dst_addr->sin_family == AF_INET)
3434 err = cnic_get_v4_route(dst_addr, &dst);
3435 else if (dst_addr->sin_family == AF_INET6) {
3436 struct sockaddr_in6 *dst_addr6 =
3437 (struct sockaddr_in6 *) dst_addr;
3438
3439 err = cnic_get_v6_route(dst_addr6, &dst);
3440 } else
3441 return NULL;
3442
3443 if (err)
3444 return NULL;
3445
3446 if (!dst->dev)
3447 goto done;
3448
3449 cnic_get_vlan(dst->dev, &netdev);
3450
3451 dev = cnic_from_netdev(netdev);
3452
3453done:
3454 dst_release(dst);
3455 if (dev)
3456 cnic_put(dev);
3457 return dev;
3458}
3459
3460static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3461{
3462 struct cnic_dev *dev = csk->dev;
3463 struct cnic_local *cp = dev->cnic_priv;
3464
3465 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3466}
3467
3468static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3469{
3470 struct cnic_dev *dev = csk->dev;
3471 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003472 int is_v6, rc = 0;
3473 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003474 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003475 __be16 local_port;
3476 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003477
3478 if (saddr->local.v6.sin6_family == AF_INET6 &&
3479 saddr->remote.v6.sin6_family == AF_INET6)
3480 is_v6 = 1;
3481 else if (saddr->local.v4.sin_family == AF_INET &&
3482 saddr->remote.v4.sin_family == AF_INET)
3483 is_v6 = 0;
3484 else
3485 return -EINVAL;
3486
3487 clear_bit(SK_F_IPV6, &csk->flags);
3488
3489 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003490 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003491 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003492
3493 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3494 sizeof(struct in6_addr));
3495 csk->dst_port = saddr->remote.v6.sin6_port;
3496 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003497
3498 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003499 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003500
3501 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3502 csk->dst_port = saddr->remote.v4.sin_port;
3503 local_port = saddr->local.v4.sin_port;
3504 }
3505
Michael Chanc76284a2010-02-24 14:42:07 +00003506 csk->vlan_id = 0;
3507 csk->mtu = dev->netdev->mtu;
3508 if (dst && dst->dev) {
3509 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3510 if (realdev == dev->netdev) {
3511 csk->vlan_id = vlan;
3512 csk->mtu = dst_mtu(dst);
3513 }
3514 }
Michael Chana4636962009-06-08 18:14:43 -07003515
Michael Chan9b093362010-12-23 07:42:56 +00003516 port_id = be16_to_cpu(local_port);
3517 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3518 port_id < CNIC_LOCAL_PORT_MAX) {
3519 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3520 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003521 } else
Michael Chan9b093362010-12-23 07:42:56 +00003522 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003523
Michael Chan9b093362010-12-23 07:42:56 +00003524 if (!port_id) {
3525 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3526 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003527 rc = -ENOMEM;
3528 goto err_out;
3529 }
Michael Chan9b093362010-12-23 07:42:56 +00003530 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003531 }
3532 csk->src_port = local_port;
3533
Michael Chana4636962009-06-08 18:14:43 -07003534err_out:
3535 dst_release(dst);
3536 return rc;
3537}
3538
3539static void cnic_init_csk_state(struct cnic_sock *csk)
3540{
3541 csk->state = 0;
3542 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3543 clear_bit(SK_F_CLOSING, &csk->flags);
3544}
3545
3546static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3547{
3548 int err = 0;
3549
3550 if (!cnic_in_use(csk))
3551 return -EINVAL;
3552
3553 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3554 return -EINVAL;
3555
3556 cnic_init_csk_state(csk);
3557
3558 err = cnic_get_route(csk, saddr);
3559 if (err)
3560 goto err_out;
3561
3562 err = cnic_resolve_addr(csk, saddr);
3563 if (!err)
3564 return 0;
3565
3566err_out:
3567 clear_bit(SK_F_CONNECT_START, &csk->flags);
3568 return err;
3569}
3570
3571static int cnic_cm_abort(struct cnic_sock *csk)
3572{
3573 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003574 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003575
3576 if (!cnic_in_use(csk))
3577 return -EINVAL;
3578
3579 if (cnic_abort_prep(csk))
3580 return cnic_cm_abort_req(csk);
3581
3582 /* Getting here means that we haven't started connect, or
3583 * connect was not successful.
3584 */
3585
Michael Chana4636962009-06-08 18:14:43 -07003586 cp->close_conn(csk, opcode);
Michael Chan7b34a462010-06-15 08:57:03 +00003587 if (csk->state != opcode)
3588 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003589
3590 return 0;
3591}
3592
3593static int cnic_cm_close(struct cnic_sock *csk)
3594{
3595 if (!cnic_in_use(csk))
3596 return -EINVAL;
3597
3598 if (cnic_close_prep(csk)) {
3599 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3600 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003601 } else {
3602 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003603 }
3604 return 0;
3605}
3606
3607static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3608 u8 opcode)
3609{
3610 struct cnic_ulp_ops *ulp_ops;
3611 int ulp_type = csk->ulp_type;
3612
3613 rcu_read_lock();
3614 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3615 if (ulp_ops) {
3616 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3617 ulp_ops->cm_connect_complete(csk);
3618 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3619 ulp_ops->cm_close_complete(csk);
3620 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3621 ulp_ops->cm_remote_abort(csk);
3622 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3623 ulp_ops->cm_abort_complete(csk);
3624 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3625 ulp_ops->cm_remote_close(csk);
3626 }
3627 rcu_read_unlock();
3628}
3629
3630static int cnic_cm_set_pg(struct cnic_sock *csk)
3631{
3632 if (cnic_offld_prep(csk)) {
3633 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3634 cnic_cm_update_pg(csk);
3635 else
3636 cnic_cm_offload_pg(csk);
3637 }
3638 return 0;
3639}
3640
3641static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3642{
3643 struct cnic_local *cp = dev->cnic_priv;
3644 u32 l5_cid = kcqe->pg_host_opaque;
3645 u8 opcode = kcqe->op_code;
3646 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3647
3648 csk_hold(csk);
3649 if (!cnic_in_use(csk))
3650 goto done;
3651
3652 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3653 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3654 goto done;
3655 }
Eddie Waia9736c02010-02-24 14:42:04 +00003656 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3657 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3658 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3659 cnic_cm_upcall(cp, csk,
3660 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3661 goto done;
3662 }
3663
Michael Chana4636962009-06-08 18:14:43 -07003664 csk->pg_cid = kcqe->pg_cid;
3665 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3666 cnic_cm_conn_req(csk);
3667
3668done:
3669 csk_put(csk);
3670}
3671
Michael Chane1928c82010-12-23 07:43:04 +00003672static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3673{
3674 struct cnic_local *cp = dev->cnic_priv;
3675 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3676 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3677 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3678
3679 ctx->timestamp = jiffies;
3680 ctx->wait_cond = 1;
3681 wake_up(&ctx->waitq);
3682}
3683
Michael Chana4636962009-06-08 18:14:43 -07003684static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3685{
3686 struct cnic_local *cp = dev->cnic_priv;
3687 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3688 u8 opcode = l4kcqe->op_code;
3689 u32 l5_cid;
3690 struct cnic_sock *csk;
3691
Michael Chane1928c82010-12-23 07:43:04 +00003692 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3693 cnic_process_fcoe_term_conn(dev, kcqe);
3694 return;
3695 }
Michael Chana4636962009-06-08 18:14:43 -07003696 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3697 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3698 cnic_cm_process_offld_pg(dev, l4kcqe);
3699 return;
3700 }
3701
3702 l5_cid = l4kcqe->conn_id;
3703 if (opcode & 0x80)
3704 l5_cid = l4kcqe->cid;
3705 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3706 return;
3707
3708 csk = &cp->csk_tbl[l5_cid];
3709 csk_hold(csk);
3710
3711 if (!cnic_in_use(csk)) {
3712 csk_put(csk);
3713 return;
3714 }
3715
3716 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00003717 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3718 if (l4kcqe->status != 0) {
3719 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3720 cnic_cm_upcall(cp, csk,
3721 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3722 }
3723 break;
Michael Chana4636962009-06-08 18:14:43 -07003724 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3725 if (l4kcqe->status == 0)
3726 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3727
3728 smp_mb__before_clear_bit();
3729 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3730 cnic_cm_upcall(cp, csk, opcode);
3731 break;
3732
3733 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07003734 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3735 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00003736 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3737 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chana4636962009-06-08 18:14:43 -07003738 cp->close_conn(csk, opcode);
3739 break;
3740
3741 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3742 cnic_cm_upcall(cp, csk, opcode);
3743 break;
3744 }
3745 csk_put(csk);
3746}
3747
3748static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3749{
3750 struct cnic_dev *dev = data;
3751 int i;
3752
3753 for (i = 0; i < num; i++)
3754 cnic_cm_process_kcqe(dev, kcqe[i]);
3755}
3756
3757static struct cnic_ulp_ops cm_ulp_ops = {
3758 .indicate_kcqes = cnic_cm_indicate_kcqe,
3759};
3760
3761static void cnic_cm_free_mem(struct cnic_dev *dev)
3762{
3763 struct cnic_local *cp = dev->cnic_priv;
3764
3765 kfree(cp->csk_tbl);
3766 cp->csk_tbl = NULL;
3767 cnic_free_id_tbl(&cp->csk_port_tbl);
3768}
3769
3770static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3771{
3772 struct cnic_local *cp = dev->cnic_priv;
3773
3774 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3775 GFP_KERNEL);
3776 if (!cp->csk_tbl)
3777 return -ENOMEM;
3778
3779 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3780 CNIC_LOCAL_PORT_MIN)) {
3781 cnic_cm_free_mem(dev);
3782 return -ENOMEM;
3783 }
3784 return 0;
3785}
3786
3787static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3788{
Michael Chan943189f2010-06-15 08:57:02 +00003789 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3790 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3791 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3792 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00003793 }
Michael Chan943189f2010-06-15 08:57:02 +00003794
3795 /* 1. If event opcode matches the expected event in csk->state
3796 * 2. If the expected event is CLOSE_COMP, we accept any event
Michael Chan7b34a462010-06-15 08:57:03 +00003797 * 3. If the expected event is 0, meaning the connection was never
3798 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00003799 */
Michael Chan7b34a462010-06-15 08:57:03 +00003800 if (opcode == csk->state || csk->state == 0 ||
3801 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3802 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3803 if (csk->state == 0)
3804 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07003805 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00003806 }
Michael Chana4636962009-06-08 18:14:43 -07003807 }
3808 return 0;
3809}
3810
3811static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3812{
3813 struct cnic_dev *dev = csk->dev;
3814 struct cnic_local *cp = dev->cnic_priv;
3815
Michael Chana1e621b2010-06-15 08:57:01 +00003816 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3817 cnic_cm_upcall(cp, csk, opcode);
3818 return;
3819 }
3820
Michael Chana4636962009-06-08 18:14:43 -07003821 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00003822 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00003823 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00003824 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07003825}
3826
3827static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3828{
3829}
3830
3831static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3832{
3833 u32 seed;
3834
3835 get_random_bytes(&seed, 4);
3836 cnic_ctx_wr(dev, 45, 0, seed);
3837 return 0;
3838}
3839
Michael Chan71034ba2009-10-10 13:46:59 +00003840static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3841{
3842 struct cnic_dev *dev = csk->dev;
3843 struct cnic_local *cp = dev->cnic_priv;
3844 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3845 union l5cm_specific_data l5_data;
3846 u32 cmd = 0;
3847 int close_complete = 0;
3848
3849 switch (opcode) {
3850 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3851 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3852 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00003853 if (cnic_ready_to_close(csk, opcode)) {
3854 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3855 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3856 else
3857 close_complete = 1;
3858 }
Michael Chan71034ba2009-10-10 13:46:59 +00003859 break;
3860 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3861 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3862 break;
3863 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3864 close_complete = 1;
3865 break;
3866 }
3867 if (cmd) {
3868 memset(&l5_data, 0, sizeof(l5_data));
3869
3870 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3871 &l5_data);
3872 } else if (close_complete) {
3873 ctx->timestamp = jiffies;
3874 cnic_close_conn(csk);
3875 cnic_cm_upcall(cp, csk, csk->state);
3876 }
3877}
3878
3879static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3880{
Michael Chanfdf24082010-10-13 14:06:47 +00003881 struct cnic_local *cp = dev->cnic_priv;
3882 int i;
3883
3884 if (!cp->ctx_tbl)
3885 return;
3886
3887 if (!netif_running(dev->netdev))
3888 return;
3889
3890 for (i = 0; i < cp->max_cid_space; i++) {
3891 struct cnic_context *ctx = &cp->ctx_tbl[i];
3892
3893 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3894 msleep(10);
3895
3896 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3897 netdev_warn(dev->netdev, "CID %x not deleted\n",
3898 ctx->cid);
3899 }
3900
3901 cancel_delayed_work(&cp->delete_task);
3902 flush_workqueue(cnic_wq);
3903
3904 if (atomic_read(&cp->iscsi_conn) != 0)
3905 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3906 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00003907}
3908
3909static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3910{
3911 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00003912 u32 pfid = cp->pfid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003913 u32 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003914
3915 cnic_init_bnx2x_mac(dev);
3916 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3917
3918 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003919 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00003920
3921 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003922 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00003923 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003924 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00003925 DEF_MAX_DA_COUNT);
3926
3927 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003928 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00003929 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003930 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00003931 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003932 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00003933 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003934 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00003935
Michael Chan14203982010-10-06 03:16:06 +00003936 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00003937 DEF_MAX_CWND);
3938 return 0;
3939}
3940
Michael Chanfdf24082010-10-13 14:06:47 +00003941static void cnic_delete_task(struct work_struct *work)
3942{
3943 struct cnic_local *cp;
3944 struct cnic_dev *dev;
3945 u32 i;
3946 int need_resched = 0;
3947
3948 cp = container_of(work, struct cnic_local, delete_task.work);
3949 dev = cp->dev;
3950
3951 for (i = 0; i < cp->max_cid_space; i++) {
3952 struct cnic_context *ctx = &cp->ctx_tbl[i];
3953
3954 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3955 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3956 continue;
3957
3958 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
3959 need_resched = 1;
3960 continue;
3961 }
3962
3963 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3964 continue;
3965
3966 cnic_bnx2x_destroy_ramrod(dev, i);
3967
3968 cnic_free_bnx2x_conn_resc(dev, i);
3969 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
3970 atomic_dec(&cp->iscsi_conn);
3971
3972 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
3973 }
3974
3975 if (need_resched)
3976 queue_delayed_work(cnic_wq, &cp->delete_task,
3977 msecs_to_jiffies(10));
3978
3979}
3980
Michael Chana4636962009-06-08 18:14:43 -07003981static int cnic_cm_open(struct cnic_dev *dev)
3982{
3983 struct cnic_local *cp = dev->cnic_priv;
3984 int err;
3985
3986 err = cnic_cm_alloc_mem(dev);
3987 if (err)
3988 return err;
3989
3990 err = cp->start_cm(dev);
3991
3992 if (err)
3993 goto err_out;
3994
Michael Chanfdf24082010-10-13 14:06:47 +00003995 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
3996
Michael Chana4636962009-06-08 18:14:43 -07003997 dev->cm_create = cnic_cm_create;
3998 dev->cm_destroy = cnic_cm_destroy;
3999 dev->cm_connect = cnic_cm_connect;
4000 dev->cm_abort = cnic_cm_abort;
4001 dev->cm_close = cnic_cm_close;
4002 dev->cm_select_dev = cnic_cm_select_dev;
4003
4004 cp->ulp_handle[CNIC_ULP_L4] = dev;
4005 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4006 return 0;
4007
4008err_out:
4009 cnic_cm_free_mem(dev);
4010 return err;
4011}
4012
4013static int cnic_cm_shutdown(struct cnic_dev *dev)
4014{
4015 struct cnic_local *cp = dev->cnic_priv;
4016 int i;
4017
4018 cp->stop_cm(dev);
4019
4020 if (!cp->csk_tbl)
4021 return 0;
4022
4023 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4024 struct cnic_sock *csk = &cp->csk_tbl[i];
4025
4026 clear_bit(SK_F_INUSE, &csk->flags);
4027 cnic_cm_cleanup(csk);
4028 }
4029 cnic_cm_free_mem(dev);
4030
4031 return 0;
4032}
4033
4034static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4035{
Michael Chana4636962009-06-08 18:14:43 -07004036 u32 cid_addr;
4037 int i;
4038
Michael Chana4636962009-06-08 18:14:43 -07004039 cid_addr = GET_CID_ADDR(cid);
4040
4041 for (i = 0; i < CTX_SIZE; i += 4)
4042 cnic_ctx_wr(dev, cid_addr, i, 0);
4043}
4044
4045static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4046{
4047 struct cnic_local *cp = dev->cnic_priv;
4048 int ret = 0, i;
4049 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4050
4051 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4052 return 0;
4053
4054 for (i = 0; i < cp->ctx_blks; i++) {
4055 int j;
4056 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4057 u32 val;
4058
4059 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4060
4061 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4062 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4063 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4064 (u64) cp->ctx_arr[i].mapping >> 32);
4065 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4066 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4067 for (j = 0; j < 10; j++) {
4068
4069 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4070 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4071 break;
4072 udelay(5);
4073 }
4074 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4075 ret = -EBUSY;
4076 break;
4077 }
4078 }
4079 return ret;
4080}
4081
4082static void cnic_free_irq(struct cnic_dev *dev)
4083{
4084 struct cnic_local *cp = dev->cnic_priv;
4085 struct cnic_eth_dev *ethdev = cp->ethdev;
4086
4087 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4088 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004089 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07004090 free_irq(ethdev->irq_arr[0].vector, dev);
4091 }
4092}
4093
Michael Chan6e0dc642010-10-13 14:06:44 +00004094static int cnic_request_irq(struct cnic_dev *dev)
4095{
4096 struct cnic_local *cp = dev->cnic_priv;
4097 struct cnic_eth_dev *ethdev = cp->ethdev;
4098 int err;
4099
4100 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4101 if (err)
4102 tasklet_disable(&cp->cnic_irq_task);
4103
4104 return err;
4105}
4106
Michael Chana4636962009-06-08 18:14:43 -07004107static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4108{
4109 struct cnic_local *cp = dev->cnic_priv;
4110 struct cnic_eth_dev *ethdev = cp->ethdev;
4111
4112 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4113 int err, i = 0;
4114 int sblk_num = cp->status_blk_num;
4115 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4116 BNX2_HC_SB_CONFIG_1;
4117
4118 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4119
4120 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4121 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4122 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4123
Michael Chana4dde3a2010-02-24 14:42:08 +00004124 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00004125 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07004126 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004127 err = cnic_request_irq(dev);
4128 if (err)
Michael Chana4636962009-06-08 18:14:43 -07004129 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00004130
Michael Chana4dde3a2010-02-24 14:42:08 +00004131 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07004132 i < 10) {
4133 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4134 1 << (11 + sblk_num));
4135 udelay(10);
4136 i++;
4137 barrier();
4138 }
Michael Chana4dde3a2010-02-24 14:42:08 +00004139 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07004140 cnic_free_irq(dev);
4141 goto failed;
4142 }
4143
4144 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00004145 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004146 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4147 int i = 0;
4148
4149 while (sblk->status_completion_producer_index && i < 10) {
4150 CNIC_WR(dev, BNX2_HC_COMMAND,
4151 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4152 udelay(10);
4153 i++;
4154 barrier();
4155 }
4156 if (sblk->status_completion_producer_index)
4157 goto failed;
4158
4159 }
4160 return 0;
4161
4162failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00004163 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07004164 return -EBUSY;
4165}
4166
4167static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4168{
4169 struct cnic_local *cp = dev->cnic_priv;
4170 struct cnic_eth_dev *ethdev = cp->ethdev;
4171
4172 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4173 return;
4174
4175 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4176 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4177}
4178
4179static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4180{
4181 struct cnic_local *cp = dev->cnic_priv;
4182 struct cnic_eth_dev *ethdev = cp->ethdev;
4183
4184 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4185 return;
4186
4187 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4188 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4189 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4190 synchronize_irq(ethdev->irq_arr[0].vector);
4191}
4192
4193static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4194{
4195 struct cnic_local *cp = dev->cnic_priv;
4196 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004197 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004198 u32 cid_addr, tx_cid, sb_id;
4199 u32 val, offset0, offset1, offset2, offset3;
4200 int i;
4201 struct tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00004202 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00004203 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07004204
4205 sb_id = cp->status_blk_num;
4206 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07004207 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4208 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004209 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004210
4211 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07004212 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4213 (TX_TSS_CID << 7));
4214 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4215 }
4216 cp->tx_cons = *cp->tx_cons_ptr;
4217
4218 cid_addr = GET_CID_ADDR(tx_cid);
4219 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4220 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4221
4222 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4223 cnic_ctx_wr(dev, cid_addr2, i, 0);
4224
4225 offset0 = BNX2_L2CTX_TYPE_XI;
4226 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4227 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4228 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4229 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07004230 cnic_init_context(dev, tx_cid);
4231 cnic_init_context(dev, tx_cid + 1);
4232
Michael Chana4636962009-06-08 18:14:43 -07004233 offset0 = BNX2_L2CTX_TYPE;
4234 offset1 = BNX2_L2CTX_CMD_TYPE;
4235 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4236 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4237 }
4238 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4239 cnic_ctx_wr(dev, cid_addr, offset0, val);
4240
4241 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4242 cnic_ctx_wr(dev, cid_addr, offset1, val);
4243
Michael Chancd801532010-10-13 14:06:49 +00004244 txbd = (struct tx_bd *) udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07004245
Michael Chancd801532010-10-13 14:06:49 +00004246 buf_map = udev->l2_buf_map;
Michael Chana4636962009-06-08 18:14:43 -07004247 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4248 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4249 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4250 }
Michael Chancd801532010-10-13 14:06:49 +00004251 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004252 cnic_ctx_wr(dev, cid_addr, offset2, val);
4253 txbd->tx_bd_haddr_hi = val;
4254
Michael Chancd801532010-10-13 14:06:49 +00004255 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004256 cnic_ctx_wr(dev, cid_addr, offset3, val);
4257 txbd->tx_bd_haddr_lo = val;
4258}
4259
4260static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4261{
4262 struct cnic_local *cp = dev->cnic_priv;
4263 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00004264 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07004265 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4266 int i;
4267 struct rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00004268 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00004269 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07004270
4271 sb_id = cp->status_blk_num;
4272 cnic_init_context(dev, 2);
4273 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4274 coal_reg = BNX2_HC_COMMAND;
4275 coal_val = CNIC_RD(dev, coal_reg);
4276 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00004277 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004278
4279 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4280 coal_reg = BNX2_HC_COALESCE_NOW;
4281 coal_val = 1 << (11 + sb_id);
4282 }
4283 i = 0;
4284 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4285 CNIC_WR(dev, coal_reg, coal_val);
4286 udelay(10);
4287 i++;
4288 barrier();
4289 }
4290 cp->rx_cons = *cp->rx_cons_ptr;
4291
4292 cid_addr = GET_CID_ADDR(2);
4293 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4294 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4295 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4296
4297 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07004298 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07004299 else
Michael Chand0549382009-10-28 03:41:59 -07004300 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004301 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4302
Michael Chancd801532010-10-13 14:06:49 +00004303 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07004304 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4305 dma_addr_t buf_map;
4306 int n = (i % cp->l2_rx_ring_size) + 1;
4307
Michael Chancd801532010-10-13 14:06:49 +00004308 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07004309 rxbd->rx_bd_len = cp->l2_single_buf_size;
4310 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4311 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4312 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4313 }
Michael Chancd801532010-10-13 14:06:49 +00004314 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07004315 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4316 rxbd->rx_bd_haddr_hi = val;
4317
Michael Chancd801532010-10-13 14:06:49 +00004318 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07004319 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4320 rxbd->rx_bd_haddr_lo = val;
4321
4322 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4323 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4324}
4325
4326static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4327{
4328 struct kwqe *wqes[1], l2kwqe;
4329
4330 memset(&l2kwqe, 0, sizeof(l2kwqe));
4331 wqes[0] = &l2kwqe;
Michael Chane1928c82010-12-23 07:43:04 +00004332 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
Michael Chana4636962009-06-08 18:14:43 -07004333 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4334 KWQE_OPCODE_SHIFT) | 2;
4335 dev->submit_kwqes(dev, wqes, 1);
4336}
4337
4338static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4339{
4340 struct cnic_local *cp = dev->cnic_priv;
4341 u32 val;
4342
4343 val = cp->func << 2;
4344
4345 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4346
4347 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4348 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4349 dev->mac_addr[0] = (u8) (val >> 8);
4350 dev->mac_addr[1] = (u8) val;
4351
4352 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4353
4354 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4355 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4356 dev->mac_addr[2] = (u8) (val >> 24);
4357 dev->mac_addr[3] = (u8) (val >> 16);
4358 dev->mac_addr[4] = (u8) (val >> 8);
4359 dev->mac_addr[5] = (u8) val;
4360
4361 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4362
4363 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4364 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4365 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4366
4367 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4368 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4369 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4370}
4371
4372static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4373{
4374 struct cnic_local *cp = dev->cnic_priv;
4375 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00004376 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00004377 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07004378 int err;
4379
4380 cnic_set_bnx2_mac(dev);
4381
4382 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4383 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4384 if (BCM_PAGE_BITS > 12)
4385 val |= (12 - 8) << 4;
4386 else
4387 val |= (BCM_PAGE_BITS - 8) << 4;
4388
4389 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4390
4391 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4392 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4393 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4394
4395 err = cnic_setup_5709_context(dev, 1);
4396 if (err)
4397 return err;
4398
4399 cnic_init_context(dev, KWQ_CID);
4400 cnic_init_context(dev, KCQ_CID);
4401
Michael Chane6c28892010-06-24 14:58:39 +00004402 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07004403 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4404
4405 cp->max_kwq_idx = MAX_KWQ_IDX;
4406 cp->kwq_prod_idx = 0;
4407 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00004408 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07004409
4410 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4411 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4412 else
4413 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4414
4415 /* Initialize the kernel work queue context. */
4416 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4417 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004418 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004419
4420 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004421 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004422
4423 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004424 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004425
4426 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00004427 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004428
4429 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00004430 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004431
Michael Chane6c28892010-06-24 14:58:39 +00004432 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4433 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07004434
Michael Chane6c28892010-06-24 14:58:39 +00004435 cp->kcq1.sw_prod_idx = 0;
4436 cp->kcq1.hw_prod_idx_ptr =
4437 (u16 *) &sblk->status_completion_producer_index;
4438
4439 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004440
4441 /* Initialize the kernel complete queue context. */
4442 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4443 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004444 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004445
4446 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004447 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004448
4449 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004450 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004451
Michael Chane6c28892010-06-24 14:58:39 +00004452 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4453 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004454
Michael Chane6c28892010-06-24 14:58:39 +00004455 val = (u32) cp->kcq1.dma.pgtbl_map;
4456 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004457
4458 cp->int_num = 0;
4459 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004460 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004461 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004462 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004463
Michael Chane6c28892010-06-24 14:58:39 +00004464 cp->kcq1.hw_prod_idx_ptr =
4465 (u16 *) &msblk->status_completion_producer_index;
4466 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00004467 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004468 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004469 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4470 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004471 }
4472
4473 /* Enable Commnad Scheduler notification when we write to the
4474 * host producer index of the kernel contexts. */
4475 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4476
4477 /* Enable Command Scheduler notification when we write to either
4478 * the Send Queue or Receive Queue producer indexes of the kernel
4479 * bypass contexts. */
4480 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4481 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4482
4483 /* Notify COM when the driver post an application buffer. */
4484 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4485
4486 /* Set the CP and COM doorbells. These two processors polls the
4487 * doorbell for a non zero value before running. This must be done
4488 * after setting up the kernel queue contexts. */
4489 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4490 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4491
4492 cnic_init_bnx2_tx_ring(dev);
4493 cnic_init_bnx2_rx_ring(dev);
4494
4495 err = cnic_init_bnx2_irq(dev);
4496 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004497 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004498 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4499 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4500 return err;
4501 }
4502
4503 return 0;
4504}
4505
Michael Chan71034ba2009-10-10 13:46:59 +00004506static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4507{
4508 struct cnic_local *cp = dev->cnic_priv;
4509 struct cnic_eth_dev *ethdev = cp->ethdev;
4510 u32 start_offset = ethdev->ctx_tbl_offset;
4511 int i;
4512
4513 for (i = 0; i < cp->ctx_blks; i++) {
4514 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4515 dma_addr_t map = ctx->mapping;
4516
4517 if (cp->ctx_align) {
4518 unsigned long mask = cp->ctx_align - 1;
4519
4520 map = (map + mask) & ~mask;
4521 }
4522
4523 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4524 }
4525}
4526
4527static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4528{
4529 struct cnic_local *cp = dev->cnic_priv;
4530 struct cnic_eth_dev *ethdev = cp->ethdev;
4531 int err = 0;
4532
Joe Perches164165d2009-11-19 09:30:10 +00004533 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004534 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004535 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4536 err = cnic_request_irq(dev);
4537
Michael Chan71034ba2009-10-10 13:46:59 +00004538 return err;
4539}
4540
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004541static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4542 u16 sb_id, u8 sb_index,
4543 u8 disable)
4544{
4545
4546 u32 addr = BAR_CSTRORM_INTMEM +
4547 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4548 offsetof(struct hc_status_block_data_e1x, index_data) +
4549 sizeof(struct hc_index_data)*sb_index +
4550 offsetof(struct hc_index_data, flags);
4551 u16 flags = CNIC_RD16(dev, addr);
4552 /* clear and set */
4553 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4554 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4555 HC_INDEX_DATA_HC_ENABLED);
4556 CNIC_WR16(dev, addr, flags);
4557}
4558
Michael Chan71034ba2009-10-10 13:46:59 +00004559static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4560{
4561 struct cnic_local *cp = dev->cnic_priv;
4562 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004563
4564 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004565 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4566 offsetof(struct hc_status_block_data_e1x, index_data) +
4567 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4568 offsetof(struct hc_index_data, timeout), 64 / 12);
4569 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004570}
4571
4572static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4573{
4574}
4575
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004576static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4577 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004578{
4579 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004580 struct cnic_uio_dev *udev = cp->udev;
4581 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4582 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004583 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004584 int port = CNIC_PORT(cp);
4585 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004586 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004587 u32 val;
4588
4589 memset(txbd, 0, BCM_PAGE_SIZE);
4590
Michael Chancd801532010-10-13 14:06:49 +00004591 buf_map = udev->l2_buf_map;
Michael Chan71034ba2009-10-10 13:46:59 +00004592 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4593 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4594 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4595
4596 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4597 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4598 reg_bd->addr_hi = start_bd->addr_hi;
4599 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4600 start_bd->nbytes = cpu_to_le16(0x10);
4601 start_bd->nbd = cpu_to_le16(3);
4602 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4603 start_bd->general_data = (UNICAST_ADDRESS <<
4604 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4605 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4606
4607 }
Michael Chan71034ba2009-10-10 13:46:59 +00004608
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004609 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004610 txbd->next_bd.addr_hi = cpu_to_le32(val);
4611
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004612 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004613
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004614 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004615 txbd->next_bd.addr_lo = cpu_to_le32(val);
4616
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004617 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004618
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004619 /* Other ramrod params */
4620 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4621 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004622
4623 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004624 if (cli < MAX_STAT_COUNTER_ID) {
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004625 val = BAR_XSTRORM_INTMEM +
4626 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4627 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4628 CNIC_WR(dev, val + i * 4, 0);
4629 }
Michael Chan71034ba2009-10-10 13:46:59 +00004630
4631 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004632 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004633}
4634
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004635static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4636 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004637{
4638 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004639 struct cnic_uio_dev *udev = cp->udev;
4640 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chan71034ba2009-10-10 13:46:59 +00004641 BCM_PAGE_SIZE);
4642 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chancd801532010-10-13 14:06:49 +00004643 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004644 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004645 int i;
4646 int port = CNIC_PORT(cp);
Michael Chan5159fdc2010-12-23 07:42:59 +00004647 u32 cli = cp->ethdev->iscsi_l2_client_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004648 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004649 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004650 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004651
4652 /* General data */
4653 data->general.client_id = cli;
4654 data->general.statistics_en_flg = 1;
4655 data->general.statistics_counter_id = cli;
4656 data->general.activate_flg = 1;
4657 data->general.sp_client_id = cli;
Michael Chan71034ba2009-10-10 13:46:59 +00004658
4659 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4660 dma_addr_t buf_map;
4661 int n = (i % cp->l2_rx_ring_size) + 1;
4662
Michael Chancd801532010-10-13 14:06:49 +00004663 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004664 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4665 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4666 }
Michael Chan71034ba2009-10-10 13:46:59 +00004667
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004668 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004669 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004670 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004672 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004673 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004674 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004675
4676 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004677 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004678 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004679 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004680
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004681 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004682 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004683 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004684
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004685 /* Other ramrod params */
4686 data->rx.client_qzone_id = cl_qzone_id;
4687 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4688 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004689
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004690 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4691 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004692
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004693 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4694 data->rx.outer_vlan_removal_enable_flg = 1;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004695
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004696 /* reset tstorm and ustorm per client statistics */
4697 if (cli < MAX_STAT_COUNTER_ID) {
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004698 val = BAR_TSTRORM_INTMEM +
4699 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4700 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4701 CNIC_WR(dev, val + i * 4, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004702
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004703 val = BAR_USTRORM_INTMEM +
4704 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4705 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4706 CNIC_WR(dev, val + i * 4, 0);
4707 }
Michael Chan71034ba2009-10-10 13:46:59 +00004708
4709 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004710 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00004711 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00004712}
4713
Michael Chan4aacb7a2010-12-23 07:43:01 +00004714static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4715 u32 lower_addr)
4716{
4717 u32 val;
4718 u8 mac[6];
4719
4720 val = CNIC_RD(dev, upper_addr);
4721
4722 mac[0] = (u8) (val >> 8);
4723 mac[1] = (u8) val;
4724
4725 val = CNIC_RD(dev, lower_addr);
4726
4727 mac[2] = (u8) (val >> 24);
4728 mac[3] = (u8) (val >> 16);
4729 mac[4] = (u8) (val >> 8);
4730 mac[5] = (u8) val;
4731
4732 if (is_valid_ether_addr(mac)) {
4733 memcpy(dev->mac_addr, mac, 6);
4734 return 0;
4735 } else {
4736 return -EINVAL;
4737 }
4738}
4739
Michael Chan71034ba2009-10-10 13:46:59 +00004740static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4741{
4742 struct cnic_local *cp = dev->cnic_priv;
Michael Chan4aacb7a2010-12-23 07:43:01 +00004743 u32 base, base2, addr, addr1, val;
Michael Chan71034ba2009-10-10 13:46:59 +00004744 int port = CNIC_PORT(cp);
4745
4746 dev->max_iscsi_conn = 0;
4747 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004748 if (base == 0)
Michael Chan71034ba2009-10-10 13:46:59 +00004749 return;
4750
Michael Chanee87a822010-10-13 14:06:51 +00004751 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4752 MISC_REG_GENERIC_CR_0));
Michael Chandd2e4db2009-12-02 15:15:37 +00004753 addr = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004754 dev_info.port_hw_config[port].iscsi_mac_upper);
4755
Michael Chan4aacb7a2010-12-23 07:43:01 +00004756 addr1 = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004757 dev_info.port_hw_config[port].iscsi_mac_lower);
4758
Michael Chan4aacb7a2010-12-23 07:43:01 +00004759 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
Michael Chan71034ba2009-10-10 13:46:59 +00004760
4761 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4762 val = CNIC_RD(dev, addr);
4763
4764 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4765 u16 val16;
4766
4767 addr = BNX2X_SHMEM_ADDR(base,
4768 drv_lic_key[port].max_iscsi_init_conn);
4769 val16 = CNIC_RD16(dev, addr);
4770
4771 if (val16)
4772 val16 ^= 0x1e1e;
4773 dev->max_iscsi_conn = val16;
4774 }
Michael Chane1928c82010-12-23 07:43:04 +00004775
4776 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4777 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4778
Michael Chanee87a822010-10-13 14:06:51 +00004779 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
Michael Chan71034ba2009-10-10 13:46:59 +00004780 int func = CNIC_FUNC(cp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004781 u32 mf_cfg_addr;
Michael Chan71034ba2009-10-10 13:46:59 +00004782
Michael Chanee87a822010-10-13 14:06:51 +00004783 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4784 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4785 mf_cfg_addr));
4786 else
4787 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004788
Michael Chan4aacb7a2010-12-23 07:43:01 +00004789 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4790 /* Must determine if the MF is SD vs SI mode */
4791 addr = BNX2X_SHMEM_ADDR(base,
4792 dev_info.shared_feature_config.config);
4793 val = CNIC_RD(dev, addr);
4794 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4795 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4796 int rc;
4797
4798 /* MULTI_FUNCTION_SI mode */
4799 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4800 func_ext_config[func].func_cfg);
4801 val = CNIC_RD(dev, addr);
4802 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4803 dev->max_iscsi_conn = 0;
4804
Michael Chane1928c82010-12-23 07:43:04 +00004805 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4806 dev->max_fcoe_conn = 0;
4807
Michael Chan4aacb7a2010-12-23 07:43:01 +00004808 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4809 func_ext_config[func].
4810 iscsi_mac_addr_upper);
4811 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4812 func_ext_config[func].
4813 iscsi_mac_addr_lower);
4814 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4815 addr1);
4816 if (rc && func > 1)
4817 dev->max_iscsi_conn = 0;
4818
4819 return;
4820 }
4821 }
4822
4823 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4824 func_mf_config[func].e1hov_tag);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004825
Michael Chan71034ba2009-10-10 13:46:59 +00004826 val = CNIC_RD(dev, addr);
4827 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4828 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Michael Chan57045c92011-01-03 15:21:45 +00004829 dev->max_fcoe_conn = 0;
4830 dev->max_iscsi_conn = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00004831 }
4832 }
Michael Chan4aacb7a2010-12-23 07:43:01 +00004833 if (!is_valid_ether_addr(dev->mac_addr))
4834 dev->max_iscsi_conn = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00004835}
4836
Michael Chane21ba412010-12-23 07:43:03 +00004837static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4838{
4839 struct cnic_local *cp = dev->cnic_priv;
4840 u32 pfid = cp->pfid;
4841
4842 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4843 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4844 cp->kcq1.sw_prod_idx = 0;
4845
4846 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4847 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4848
4849 cp->kcq1.hw_prod_idx_ptr =
4850 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4851 cp->kcq1.status_idx_ptr =
4852 &sb->sb.running_index[SM_RX_ID];
4853 } else {
4854 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4855
4856 cp->kcq1.hw_prod_idx_ptr =
4857 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4858 cp->kcq1.status_idx_ptr =
4859 &sb->sb.running_index[SM_RX_ID];
4860 }
4861
4862 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4863 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4864
4865 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4866 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4867 cp->kcq2.sw_prod_idx = 0;
4868 cp->kcq2.hw_prod_idx_ptr =
4869 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4870 cp->kcq2.status_idx_ptr =
4871 &sb->sb.running_index[SM_RX_ID];
4872 }
4873}
4874
Michael Chan71034ba2009-10-10 13:46:59 +00004875static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4876{
4877 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004878 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan71034ba2009-10-10 13:46:59 +00004879 int func = CNIC_FUNC(cp), ret, i;
Michael Chan14203982010-10-06 03:16:06 +00004880 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004881
Michael Chanee87a822010-10-13 14:06:51 +00004882 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4883 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4884
4885 if (!(val & 1))
4886 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4887 else
4888 val = (val >> 1) & 1;
4889
4890 if (val)
4891 cp->pfid = func >> 1;
4892 else
4893 cp->pfid = func & 0x6;
4894 } else {
4895 cp->pfid = func;
4896 }
Michael Chan14203982010-10-06 03:16:06 +00004897 pfid = cp->pfid;
4898
Michael Chan71034ba2009-10-10 13:46:59 +00004899 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Michael Chan520efdf2010-06-24 14:58:37 +00004900 cp->iscsi_start_cid);
Michael Chan71034ba2009-10-10 13:46:59 +00004901
4902 if (ret)
4903 return -ENOMEM;
4904
Michael Chane1928c82010-12-23 07:43:04 +00004905 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4906 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4907 BNX2X_FCOE_NUM_CONNECTIONS,
4908 cp->fcoe_start_cid);
4909
4910 if (ret)
4911 return -ENOMEM;
4912 }
4913
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004914 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4915
Michael Chane21ba412010-12-23 07:43:03 +00004916 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004917
4918 cnic_get_bnx2x_iscsi_info(dev);
4919
4920 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00004921 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00004922 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004923 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004924 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004925 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00004926 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00004927 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004928 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00004929 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00004930 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004931 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00004932 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00004933 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004934 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00004935 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00004936 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004937 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004938 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004939 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00004940 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004941 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004942 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00004943
4944 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4945 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004946 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
Michael Chan71034ba2009-10-10 13:46:59 +00004947 cp->conn_buf_info.pgtbl[2 * i]);
4948 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004949 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00004950 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4951 }
4952
4953 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004954 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004955 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4956 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004957 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00004958 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4959
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004960 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4961 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4962
Michael Chan71034ba2009-10-10 13:46:59 +00004963 cnic_setup_bnx2x_context(dev);
4964
Michael Chan71034ba2009-10-10 13:46:59 +00004965 ret = cnic_init_bnx2x_irq(dev);
4966 if (ret)
4967 return ret;
4968
Michael Chan71034ba2009-10-10 13:46:59 +00004969 return 0;
4970}
4971
Michael Chan86b53602009-10-10 13:46:57 +00004972static void cnic_init_rings(struct cnic_dev *dev)
4973{
Michael Chan541a7812010-10-06 03:17:22 +00004974 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004975 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00004976
4977 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4978 return;
4979
Michael Chan86b53602009-10-10 13:46:57 +00004980 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4981 cnic_init_bnx2_tx_ring(dev);
4982 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00004983 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00004984 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00004985 u32 cli = cp->ethdev->iscsi_l2_client_id;
4986 u32 cid = cp->ethdev->iscsi_l2_cid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004987 u32 cl_qzone_id, type;
4988 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00004989 union l5cm_specific_data l5_data;
4990 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chanc7596b72009-12-02 15:15:35 +00004991 u32 off, i;
Michael Chan71034ba2009-10-10 13:46:59 +00004992
4993 rx_prods.bd_prod = 0;
4994 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4995 barrier();
4996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004997 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4998
Michael Chanc7596b72009-12-02 15:15:35 +00004999 off = BAR_USTRORM_INTMEM +
Michael Chanee87a822010-10-13 14:06:51 +00005000 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
5001 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5002 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00005003
5004 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00005005 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00005006
Michael Chan48f753d2010-05-18 11:32:53 +00005007 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5008
Michael Chancd801532010-10-13 14:06:49 +00005009 data = udev->l2_buf;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005010
5011 memset(data, 0, sizeof(*data));
5012
5013 cnic_init_bnx2x_tx_ring(dev, data);
5014 cnic_init_bnx2x_rx_ring(dev, data);
5015
Michael Chancd801532010-10-13 14:06:49 +00005016 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5017 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005018
5019 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
5020 & SPE_HDR_CONN_TYPE;
5021 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
5022 SPE_HDR_FUNCTION_ID);
Michael Chan71034ba2009-10-10 13:46:59 +00005023
Michael Chan541a7812010-10-06 03:17:22 +00005024 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5025
Michael Chan71034ba2009-10-10 13:46:59 +00005026 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan5159fdc2010-12-23 07:42:59 +00005027 cid, type, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005028
Michael Chan48f753d2010-05-18 11:32:53 +00005029 i = 0;
5030 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5031 ++i < 10)
5032 msleep(1);
5033
5034 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5035 netdev_err(dev->netdev,
5036 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005037 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00005038 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chan86b53602009-10-10 13:46:57 +00005039 }
5040}
5041
5042static void cnic_shutdown_rings(struct cnic_dev *dev)
5043{
Michael Chan541a7812010-10-06 03:17:22 +00005044 struct cnic_local *cp = dev->cnic_priv;
5045
5046 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5047 return;
5048
Michael Chan86b53602009-10-10 13:46:57 +00005049 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5050 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005051 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5052 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5159fdc2010-12-23 07:42:59 +00005053 u32 cli = cp->ethdev->iscsi_l2_client_id;
5054 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00005055 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00005056 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005057 u32 type;
Michael Chan71034ba2009-10-10 13:46:59 +00005058
Michael Chan5159fdc2010-12-23 07:42:59 +00005059 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00005060
Michael Chan48f753d2010-05-18 11:32:53 +00005061 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5062
Michael Chan8b065b62009-12-02 15:15:36 +00005063 l5_data.phy_address.lo = cli;
5064 l5_data.phy_address.hi = 0;
5065 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00005066 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00005067 i = 0;
5068 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5069 ++i < 10)
5070 msleep(1);
5071
5072 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5073 netdev_err(dev->netdev,
5074 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00005075 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00005076
5077 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005078 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
5079 & SPE_HDR_CONN_TYPE;
5080 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
5081 SPE_HDR_FUNCTION_ID);
5082 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan5159fdc2010-12-23 07:42:59 +00005083 cid, type, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00005084 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00005085 }
Michael Chan541a7812010-10-06 03:17:22 +00005086 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan86b53602009-10-10 13:46:57 +00005087}
5088
Michael Chana3059b12009-08-14 15:49:44 +00005089static int cnic_register_netdev(struct cnic_dev *dev)
5090{
5091 struct cnic_local *cp = dev->cnic_priv;
5092 struct cnic_eth_dev *ethdev = cp->ethdev;
5093 int err;
5094
5095 if (!ethdev)
5096 return -ENODEV;
5097
5098 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5099 return 0;
5100
5101 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5102 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00005103 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00005104
5105 return err;
5106}
5107
5108static void cnic_unregister_netdev(struct cnic_dev *dev)
5109{
5110 struct cnic_local *cp = dev->cnic_priv;
5111 struct cnic_eth_dev *ethdev = cp->ethdev;
5112
5113 if (!ethdev)
5114 return;
5115
5116 ethdev->drv_unregister_cnic(dev->netdev);
5117}
5118
Michael Chana4636962009-06-08 18:14:43 -07005119static int cnic_start_hw(struct cnic_dev *dev)
5120{
5121 struct cnic_local *cp = dev->cnic_priv;
5122 struct cnic_eth_dev *ethdev = cp->ethdev;
5123 int err;
5124
5125 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5126 return -EALREADY;
5127
Michael Chana4636962009-06-08 18:14:43 -07005128 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07005129 pci_dev_get(dev->pcidev);
5130 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00005131 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07005132 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5133
5134 err = cp->alloc_resc(dev);
5135 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005136 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005137 goto err1;
5138 }
5139
5140 err = cp->start_hw(dev);
5141 if (err)
5142 goto err1;
5143
5144 err = cnic_cm_open(dev);
5145 if (err)
5146 goto err1;
5147
5148 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5149
5150 cp->enable_int(dev);
5151
5152 return 0;
5153
5154err1:
Michael Chana4636962009-06-08 18:14:43 -07005155 cp->free_resc(dev);
5156 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07005157 return err;
5158}
5159
5160static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5161{
Michael Chana4636962009-06-08 18:14:43 -07005162 cnic_disable_bnx2_int_sync(dev);
5163
5164 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5165 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5166
5167 cnic_init_context(dev, KWQ_CID);
5168 cnic_init_context(dev, KCQ_CID);
5169
5170 cnic_setup_5709_context(dev, 0);
5171 cnic_free_irq(dev);
5172
Michael Chana4636962009-06-08 18:14:43 -07005173 cnic_free_resc(dev);
5174}
5175
Michael Chan71034ba2009-10-10 13:46:59 +00005176
5177static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5178{
5179 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00005180
5181 cnic_free_irq(dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005182 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00005183 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00005184 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00005185 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00005186 cnic_free_resc(dev);
5187}
5188
Michael Chana4636962009-06-08 18:14:43 -07005189static void cnic_stop_hw(struct cnic_dev *dev)
5190{
5191 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5192 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00005193 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07005194
Michael Chan48f753d2010-05-18 11:32:53 +00005195 /* Need to wait for the ring shutdown event to complete
5196 * before clearing the CNIC_UP flag.
5197 */
Michael Chancd801532010-10-13 14:06:49 +00005198 while (cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00005199 msleep(100);
5200 i++;
5201 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005202 cnic_shutdown_rings(dev);
Michael Chana4636962009-06-08 18:14:43 -07005203 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5204 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5205 synchronize_rcu();
5206 cnic_cm_shutdown(dev);
5207 cp->stop_hw(dev);
5208 pci_dev_put(dev->pcidev);
5209 }
5210}
5211
5212static void cnic_free_dev(struct cnic_dev *dev)
5213{
5214 int i = 0;
5215
5216 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5217 msleep(100);
5218 i++;
5219 }
5220 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00005221 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07005222
Joe Perchesddf79b22010-02-17 15:01:54 +00005223 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005224 dev_put(dev->netdev);
5225 kfree(dev);
5226}
5227
5228static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5229 struct pci_dev *pdev)
5230{
5231 struct cnic_dev *cdev;
5232 struct cnic_local *cp;
5233 int alloc_size;
5234
5235 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5236
5237 cdev = kzalloc(alloc_size , GFP_KERNEL);
5238 if (cdev == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +00005239 netdev_err(dev, "allocate dev struct failure\n");
Michael Chana4636962009-06-08 18:14:43 -07005240 return NULL;
5241 }
5242
5243 cdev->netdev = dev;
5244 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5245 cdev->register_device = cnic_register_device;
5246 cdev->unregister_device = cnic_unregister_device;
5247 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5248
5249 cp = cdev->cnic_priv;
5250 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07005251 cp->l2_single_buf_size = 0x400;
5252 cp->l2_rx_ring_size = 3;
5253
5254 spin_lock_init(&cp->cnic_ulp_lock);
5255
Joe Perchesddf79b22010-02-17 15:01:54 +00005256 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07005257
5258 return cdev;
5259}
5260
5261static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5262{
5263 struct pci_dev *pdev;
5264 struct cnic_dev *cdev;
5265 struct cnic_local *cp;
5266 struct cnic_eth_dev *ethdev = NULL;
Michael Chane2ee3612009-06-13 17:43:02 -07005267 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
Michael Chana4636962009-06-08 18:14:43 -07005268
Michael Chane2ee3612009-06-13 17:43:02 -07005269 probe = symbol_get(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07005270 if (probe) {
5271 ethdev = (*probe)(dev);
Michael Chan64c64602009-08-14 15:49:43 +00005272 symbol_put(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07005273 }
5274 if (!ethdev)
5275 return NULL;
5276
5277 pdev = ethdev->pdev;
5278 if (!pdev)
5279 return NULL;
5280
5281 dev_hold(dev);
5282 pci_dev_get(pdev);
5283 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5284 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
5285 u8 rev;
5286
5287 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
5288 if (rev < 0x10) {
5289 pci_dev_put(pdev);
5290 goto cnic_err;
5291 }
5292 }
5293 pci_dev_put(pdev);
5294
5295 cdev = cnic_alloc_dev(dev, pdev);
5296 if (cdev == NULL)
5297 goto cnic_err;
5298
5299 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5300 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5301
5302 cp = cdev->cnic_priv;
5303 cp->ethdev = ethdev;
5304 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005305 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07005306
5307 cp->cnic_ops = &cnic_bnx2_ops;
5308 cp->start_hw = cnic_start_bnx2_hw;
5309 cp->stop_hw = cnic_stop_bnx2_hw;
5310 cp->setup_pgtbl = cnic_setup_page_tbl;
5311 cp->alloc_resc = cnic_alloc_bnx2_resc;
5312 cp->free_resc = cnic_free_resc;
5313 cp->start_cm = cnic_cm_init_bnx2_hw;
5314 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5315 cp->enable_int = cnic_enable_bnx2_int;
5316 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5317 cp->close_conn = cnic_close_bnx2_conn;
5318 cp->next_idx = cnic_bnx2_next_idx;
5319 cp->hw_idx = cnic_bnx2_hw_idx;
5320 return cdev;
5321
5322cnic_err:
5323 dev_put(dev);
5324 return NULL;
5325}
5326
Michael Chan71034ba2009-10-10 13:46:59 +00005327static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5328{
5329 struct pci_dev *pdev;
5330 struct cnic_dev *cdev;
5331 struct cnic_local *cp;
5332 struct cnic_eth_dev *ethdev = NULL;
5333 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5334
5335 probe = symbol_get(bnx2x_cnic_probe);
5336 if (probe) {
5337 ethdev = (*probe)(dev);
5338 symbol_put(bnx2x_cnic_probe);
5339 }
5340 if (!ethdev)
5341 return NULL;
5342
5343 pdev = ethdev->pdev;
5344 if (!pdev)
5345 return NULL;
5346
5347 dev_hold(dev);
5348 cdev = cnic_alloc_dev(dev, pdev);
5349 if (cdev == NULL) {
5350 dev_put(dev);
5351 return NULL;
5352 }
5353
5354 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5355 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5356
5357 cp = cdev->cnic_priv;
5358 cp->ethdev = ethdev;
5359 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00005360 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00005361
5362 cp->cnic_ops = &cnic_bnx2x_ops;
5363 cp->start_hw = cnic_start_bnx2x_hw;
5364 cp->stop_hw = cnic_stop_bnx2x_hw;
5365 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5366 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5367 cp->free_resc = cnic_free_resc;
5368 cp->start_cm = cnic_cm_init_bnx2x_hw;
5369 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5370 cp->enable_int = cnic_enable_bnx2x_int;
5371 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chanee87a822010-10-13 14:06:51 +00005372 if (BNX2X_CHIP_IS_E2(cp->chip_id))
5373 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5374 else
5375 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan71034ba2009-10-10 13:46:59 +00005376 cp->close_conn = cnic_close_bnx2x_conn;
5377 cp->next_idx = cnic_bnx2x_next_idx;
5378 cp->hw_idx = cnic_bnx2x_hw_idx;
5379 return cdev;
5380}
5381
Michael Chana4636962009-06-08 18:14:43 -07005382static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5383{
5384 struct ethtool_drvinfo drvinfo;
5385 struct cnic_dev *cdev = NULL;
5386
5387 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5388 memset(&drvinfo, 0, sizeof(drvinfo));
5389 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5390
5391 if (!strcmp(drvinfo.driver, "bnx2"))
5392 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00005393 if (!strcmp(drvinfo.driver, "bnx2x"))
5394 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07005395 if (cdev) {
5396 write_lock(&cnic_dev_lock);
5397 list_add(&cdev->list, &cnic_dev_list);
5398 write_unlock(&cnic_dev_lock);
5399 }
5400 }
5401 return cdev;
5402}
5403
5404/**
5405 * netdev event handler
5406 */
5407static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5408 void *ptr)
5409{
5410 struct net_device *netdev = ptr;
5411 struct cnic_dev *dev;
5412 int if_type;
5413 int new_dev = 0;
5414
5415 dev = cnic_from_netdev(netdev);
5416
5417 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
5418 /* Check for the hot-plug device */
5419 dev = is_cnic_dev(netdev);
5420 if (dev) {
5421 new_dev = 1;
5422 cnic_hold(dev);
5423 }
5424 }
5425 if (dev) {
5426 struct cnic_local *cp = dev->cnic_priv;
5427
5428 if (new_dev)
5429 cnic_ulp_init(dev);
5430 else if (event == NETDEV_UNREGISTER)
5431 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07005432
5433 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00005434 if (cnic_register_netdev(dev) != 0) {
5435 cnic_put(dev);
5436 goto done;
5437 }
Michael Chana4636962009-06-08 18:14:43 -07005438 if (!cnic_start_hw(dev))
5439 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07005440 }
5441
5442 rcu_read_lock();
5443 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5444 struct cnic_ulp_ops *ulp_ops;
5445 void *ctx;
5446
5447 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5448 if (!ulp_ops || !ulp_ops->indicate_netevent)
5449 continue;
5450
5451 ctx = cp->ulp_handle[if_type];
5452
5453 ulp_ops->indicate_netevent(ctx, event);
5454 }
5455 rcu_read_unlock();
5456
5457 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005458 cnic_ulp_stop(dev);
5459 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005460 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005461 } else if (event == NETDEV_UNREGISTER) {
5462 write_lock(&cnic_dev_lock);
5463 list_del_init(&dev->list);
5464 write_unlock(&cnic_dev_lock);
5465
5466 cnic_put(dev);
5467 cnic_free_dev(dev);
5468 goto done;
5469 }
5470 cnic_put(dev);
5471 }
5472done:
5473 return NOTIFY_DONE;
5474}
5475
5476static struct notifier_block cnic_netdev_notifier = {
5477 .notifier_call = cnic_netdev_event
5478};
5479
5480static void cnic_release(void)
5481{
5482 struct cnic_dev *dev;
Michael Chana3ceeeb2010-10-13 14:06:50 +00005483 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005484
5485 while (!list_empty(&cnic_dev_list)) {
5486 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5487 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5488 cnic_ulp_stop(dev);
5489 cnic_stop_hw(dev);
5490 }
5491
5492 cnic_ulp_exit(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005493 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005494 list_del_init(&dev->list);
5495 cnic_free_dev(dev);
5496 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005497 while (!list_empty(&cnic_udev_list)) {
5498 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5499 list);
5500 cnic_free_uio(udev);
5501 }
Michael Chana4636962009-06-08 18:14:43 -07005502}
5503
5504static int __init cnic_init(void)
5505{
5506 int rc = 0;
5507
Joe Perchesddf79b22010-02-17 15:01:54 +00005508 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005509
5510 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5511 if (rc) {
5512 cnic_release();
5513 return rc;
5514 }
5515
Michael Chanfdf24082010-10-13 14:06:47 +00005516 cnic_wq = create_singlethread_workqueue("cnic_wq");
5517 if (!cnic_wq) {
5518 cnic_release();
5519 unregister_netdevice_notifier(&cnic_netdev_notifier);
5520 return -ENOMEM;
5521 }
5522
Michael Chana4636962009-06-08 18:14:43 -07005523 return 0;
5524}
5525
5526static void __exit cnic_exit(void)
5527{
5528 unregister_netdevice_notifier(&cnic_netdev_notifier);
5529 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005530 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005531}
5532
5533module_init(cnic_init);
5534module_exit(cnic_exit);