blob: 6ce739859ac3c524757f2cdd97a67fec40226285 [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
Michael Chan1d9cfc42010-02-24 14:42:09 +00003 * Copyright (c) 2006-2010 Broadcom Corporation
Michael Chana4636962009-06-08 18:14:43 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31#define BCM_VLAN 1
32#endif
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/route.h>
36#include <net/ipv6.h>
37#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070038#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070039#include <scsi/iscsi_if.h>
40
41#include "cnic_if.h"
42#include "bnx2.h"
Dmitry Kravkov5d1e8592010-07-27 12:31:10 +000043#include "bnx2x/bnx2x_reg.h"
44#include "bnx2x/bnx2x_fw_defs.h"
45#include "bnx2x/bnx2x_hsi.h"
Michael Chane2513062009-10-10 13:46:58 +000046#include "../scsi/bnx2i/57xx_iscsi_constants.h"
47#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chana4636962009-06-08 18:14:43 -070048#include "cnic.h"
49#include "cnic_defs.h"
50
51#define DRV_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070052
53static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
55
56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59MODULE_LICENSE("GPL");
60MODULE_VERSION(CNIC_MODULE_VERSION);
61
Michael Chan8adc92402010-12-23 07:42:57 +000062/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
Michael Chana4636962009-06-08 18:14:43 -070063static LIST_HEAD(cnic_dev_list);
Michael Chana3ceeeb2010-10-13 14:06:50 +000064static LIST_HEAD(cnic_udev_list);
Michael Chana4636962009-06-08 18:14:43 -070065static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock);
67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000071static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070072static int cnic_ctl(void *, struct cnic_ctl_info *);
73
74static struct cnic_ops cnic_bnx2_ops = {
75 .cnic_owner = THIS_MODULE,
76 .cnic_handler = cnic_service_bnx2,
77 .cnic_ctl = cnic_ctl,
78};
79
Michael Chan71034ba2009-10-10 13:46:59 +000080static struct cnic_ops cnic_bnx2x_ops = {
81 .cnic_owner = THIS_MODULE,
82 .cnic_handler = cnic_service_bnx2x,
83 .cnic_ctl = cnic_ctl,
84};
85
Michael Chanfdf24082010-10-13 14:06:47 +000086static struct workqueue_struct *cnic_wq;
87
Michael Chan86b53602009-10-10 13:46:57 +000088static void cnic_shutdown_rings(struct cnic_dev *);
89static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -070090static int cnic_cm_set_pg(struct cnic_sock *);
91
92static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
93{
Michael Chancd801532010-10-13 14:06:49 +000094 struct cnic_uio_dev *udev = uinfo->priv;
95 struct cnic_dev *dev;
Michael Chana4636962009-06-08 18:14:43 -070096
97 if (!capable(CAP_NET_ADMIN))
98 return -EPERM;
99
Michael Chancd801532010-10-13 14:06:49 +0000100 if (udev->uio_dev != -1)
Michael Chana4636962009-06-08 18:14:43 -0700101 return -EBUSY;
102
Michael Chan86b53602009-10-10 13:46:57 +0000103 rtnl_lock();
Michael Chancd801532010-10-13 14:06:49 +0000104 dev = udev->dev;
105
Michael Chana3ceeeb2010-10-13 14:06:50 +0000106 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000107 rtnl_unlock();
108 return -ENODEV;
109 }
110
Michael Chancd801532010-10-13 14:06:49 +0000111 udev->uio_dev = iminor(inode);
Michael Chana4636962009-06-08 18:14:43 -0700112
Michael Chana3ceeeb2010-10-13 14:06:50 +0000113 cnic_shutdown_rings(dev);
Michael Chan86b53602009-10-10 13:46:57 +0000114 cnic_init_rings(dev);
115 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700116
117 return 0;
118}
119
120static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
121{
Michael Chancd801532010-10-13 14:06:49 +0000122 struct cnic_uio_dev *udev = uinfo->priv;
Michael Chan6ef57a02009-09-21 15:39:37 +0000123
Michael Chancd801532010-10-13 14:06:49 +0000124 udev->uio_dev = -1;
Michael Chana4636962009-06-08 18:14:43 -0700125 return 0;
126}
127
128static inline void cnic_hold(struct cnic_dev *dev)
129{
130 atomic_inc(&dev->ref_count);
131}
132
133static inline void cnic_put(struct cnic_dev *dev)
134{
135 atomic_dec(&dev->ref_count);
136}
137
138static inline void csk_hold(struct cnic_sock *csk)
139{
140 atomic_inc(&csk->ref_count);
141}
142
143static inline void csk_put(struct cnic_sock *csk)
144{
145 atomic_dec(&csk->ref_count);
146}
147
148static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
149{
150 struct cnic_dev *cdev;
151
152 read_lock(&cnic_dev_lock);
153 list_for_each_entry(cdev, &cnic_dev_list, list) {
154 if (netdev == cdev->netdev) {
155 cnic_hold(cdev);
156 read_unlock(&cnic_dev_lock);
157 return cdev;
158 }
159 }
160 read_unlock(&cnic_dev_lock);
161 return NULL;
162}
163
Michael Chan7fc1ece2009-08-14 15:49:47 +0000164static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
165{
166 atomic_inc(&ulp_ops->ref_count);
167}
168
169static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
170{
171 atomic_dec(&ulp_ops->ref_count);
172}
173
Michael Chana4636962009-06-08 18:14:43 -0700174static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
175{
176 struct cnic_local *cp = dev->cnic_priv;
177 struct cnic_eth_dev *ethdev = cp->ethdev;
178 struct drv_ctl_info info;
179 struct drv_ctl_io *io = &info.data.io;
180
181 info.cmd = DRV_CTL_CTX_WR_CMD;
182 io->cid_addr = cid_addr;
183 io->offset = off;
184 io->data = val;
185 ethdev->drv_ctl(dev->netdev, &info);
186}
187
Michael Chan71034ba2009-10-10 13:46:59 +0000188static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
194
195 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
196 io->offset = off;
197 io->dma_addr = addr;
198 ethdev->drv_ctl(dev->netdev, &info);
199}
200
201static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
202{
203 struct cnic_local *cp = dev->cnic_priv;
204 struct cnic_eth_dev *ethdev = cp->ethdev;
205 struct drv_ctl_info info;
206 struct drv_ctl_l2_ring *ring = &info.data.ring;
207
208 if (start)
209 info.cmd = DRV_CTL_START_L2_CMD;
210 else
211 info.cmd = DRV_CTL_STOP_L2_CMD;
212
213 ring->cid = cid;
214 ring->client_id = cl_id;
215 ethdev->drv_ctl(dev->netdev, &info);
216}
217
Michael Chana4636962009-06-08 18:14:43 -0700218static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
219{
220 struct cnic_local *cp = dev->cnic_priv;
221 struct cnic_eth_dev *ethdev = cp->ethdev;
222 struct drv_ctl_info info;
223 struct drv_ctl_io *io = &info.data.io;
224
225 info.cmd = DRV_CTL_IO_WR_CMD;
226 io->offset = off;
227 io->data = val;
228 ethdev->drv_ctl(dev->netdev, &info);
229}
230
231static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
232{
233 struct cnic_local *cp = dev->cnic_priv;
234 struct cnic_eth_dev *ethdev = cp->ethdev;
235 struct drv_ctl_info info;
236 struct drv_ctl_io *io = &info.data.io;
237
238 info.cmd = DRV_CTL_IO_RD_CMD;
239 io->offset = off;
240 ethdev->drv_ctl(dev->netdev, &info);
241 return io->data;
242}
243
244static int cnic_in_use(struct cnic_sock *csk)
245{
246 return test_bit(SK_F_INUSE, &csk->flags);
247}
248
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000249static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
Michael Chana4636962009-06-08 18:14:43 -0700250{
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254
Dmitry Kravkovc2bff632010-10-06 03:33:18 +0000255 info.cmd = cmd;
256 info.data.credit.credit_count = count;
Michael Chana4636962009-06-08 18:14:43 -0700257 ethdev->drv_ctl(dev->netdev, &info);
258}
259
Michael Chan71034ba2009-10-10 13:46:59 +0000260static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
261{
262 u32 i;
263
Michael Chan520efdf2010-06-24 14:58:37 +0000264 for (i = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +0000265 if (cp->ctx_tbl[i].cid == cid) {
266 *l5_cid = i;
267 return 0;
268 }
269 }
270 return -EINVAL;
271}
272
Michael Chana4636962009-06-08 18:14:43 -0700273static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
274 struct cnic_sock *csk)
275{
276 struct iscsi_path path_req;
277 char *buf = NULL;
278 u16 len = 0;
279 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
280 struct cnic_ulp_ops *ulp_ops;
Michael Chancd801532010-10-13 14:06:49 +0000281 struct cnic_uio_dev *udev = cp->udev;
Michael Chan939b82e2010-12-23 07:42:58 +0000282 int rc = 0, retry = 0;
Michael Chana4636962009-06-08 18:14:43 -0700283
Michael Chancd801532010-10-13 14:06:49 +0000284 if (!udev || udev->uio_dev == -1)
Michael Chana4636962009-06-08 18:14:43 -0700285 return -ENODEV;
286
287 if (csk) {
288 len = sizeof(path_req);
289 buf = (char *) &path_req;
290 memset(&path_req, 0, len);
291
292 msg_type = ISCSI_KEVENT_PATH_REQ;
293 path_req.handle = (u64) csk->l5_cid;
294 if (test_bit(SK_F_IPV6, &csk->flags)) {
295 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
296 sizeof(struct in6_addr));
297 path_req.ip_addr_len = 16;
298 } else {
299 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
300 sizeof(struct in_addr));
301 path_req.ip_addr_len = 4;
302 }
303 path_req.vlan_id = csk->vlan_id;
304 path_req.pmtu = csk->mtu;
305 }
306
Michael Chan939b82e2010-12-23 07:42:58 +0000307 while (retry < 3) {
308 rc = 0;
309 rcu_read_lock();
310 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
311 if (ulp_ops)
312 rc = ulp_ops->iscsi_nl_send_msg(
313 cp->ulp_handle[CNIC_ULP_ISCSI],
314 msg_type, buf, len);
315 rcu_read_unlock();
316 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
317 break;
318
319 msleep(100);
320 retry++;
321 }
Michael Chana4636962009-06-08 18:14:43 -0700322 return 0;
323}
324
Eddie Wai42ecbb82010-12-23 07:43:02 +0000325static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
326
Michael Chana4636962009-06-08 18:14:43 -0700327static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
328 char *buf, u16 len)
329{
330 int rc = -EINVAL;
331
332 switch (msg_type) {
333 case ISCSI_UEVENT_PATH_UPDATE: {
334 struct cnic_local *cp;
335 u32 l5_cid;
336 struct cnic_sock *csk;
337 struct iscsi_path *path_resp;
338
339 if (len < sizeof(*path_resp))
340 break;
341
342 path_resp = (struct iscsi_path *) buf;
343 cp = dev->cnic_priv;
344 l5_cid = (u32) path_resp->handle;
345 if (l5_cid >= MAX_CM_SK_TBL_SZ)
346 break;
347
Michael Chand02a5e62010-02-24 14:42:06 +0000348 rcu_read_lock();
349 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
350 rc = -ENODEV;
351 rcu_read_unlock();
352 break;
353 }
Michael Chana4636962009-06-08 18:14:43 -0700354 csk = &cp->csk_tbl[l5_cid];
355 csk_hold(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000356 if (cnic_in_use(csk) &&
357 test_bit(SK_F_CONNECT_START, &csk->flags)) {
358
Michael Chana4636962009-06-08 18:14:43 -0700359 memcpy(csk->ha, path_resp->mac_addr, 6);
360 if (test_bit(SK_F_IPV6, &csk->flags))
361 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
362 sizeof(struct in6_addr));
363 else
364 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
365 sizeof(struct in_addr));
Eddie Wai42ecbb82010-12-23 07:43:02 +0000366
367 if (is_valid_ether_addr(csk->ha)) {
Michael Chana4636962009-06-08 18:14:43 -0700368 cnic_cm_set_pg(csk);
Eddie Wai42ecbb82010-12-23 07:43:02 +0000369 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
370 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
371
372 cnic_cm_upcall(cp, csk,
373 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
374 clear_bit(SK_F_CONNECT_START, &csk->flags);
375 }
Michael Chana4636962009-06-08 18:14:43 -0700376 }
377 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000378 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700379 rc = 0;
380 }
381 }
382
383 return rc;
384}
385
386static int cnic_offld_prep(struct cnic_sock *csk)
387{
388 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
389 return 0;
390
391 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
392 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
393 return 0;
394 }
395
396 return 1;
397}
398
399static int cnic_close_prep(struct cnic_sock *csk)
400{
401 clear_bit(SK_F_CONNECT_START, &csk->flags);
402 smp_mb__after_clear_bit();
403
404 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
405 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
406 msleep(1);
407
408 return 1;
409 }
410 return 0;
411}
412
413static int cnic_abort_prep(struct cnic_sock *csk)
414{
415 clear_bit(SK_F_CONNECT_START, &csk->flags);
416 smp_mb__after_clear_bit();
417
418 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
419 msleep(1);
420
421 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
422 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
423 return 1;
424 }
425
426 return 0;
427}
428
Michael Chan6d7760a2009-07-27 11:25:58 -0700429static void cnic_uio_stop(void)
430{
431 struct cnic_dev *dev;
432
433 read_lock(&cnic_dev_lock);
434 list_for_each_entry(dev, &cnic_dev_list, list) {
435 struct cnic_local *cp = dev->cnic_priv;
436
Michael Chancd801532010-10-13 14:06:49 +0000437 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Michael Chan6d7760a2009-07-27 11:25:58 -0700438 }
439 read_unlock(&cnic_dev_lock);
440}
441
Michael Chana4636962009-06-08 18:14:43 -0700442int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
443{
444 struct cnic_dev *dev;
445
roel kluin0d37f362009-11-02 06:53:44 +0000446 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000447 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700448 return -EINVAL;
449 }
450 mutex_lock(&cnic_lock);
451 if (cnic_ulp_tbl[ulp_type]) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000452 pr_err("%s: Type %d has already been registered\n",
453 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700454 mutex_unlock(&cnic_lock);
455 return -EBUSY;
456 }
457
458 read_lock(&cnic_dev_lock);
459 list_for_each_entry(dev, &cnic_dev_list, list) {
460 struct cnic_local *cp = dev->cnic_priv;
461
462 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
463 }
464 read_unlock(&cnic_dev_lock);
465
Michael Chan7fc1ece2009-08-14 15:49:47 +0000466 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700467 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
468 mutex_unlock(&cnic_lock);
469
470 /* Prevent race conditions with netdev_event */
471 rtnl_lock();
Michael Chana4636962009-06-08 18:14:43 -0700472 list_for_each_entry(dev, &cnic_dev_list, list) {
473 struct cnic_local *cp = dev->cnic_priv;
474
475 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
476 ulp_ops->cnic_init(dev);
477 }
Michael Chana4636962009-06-08 18:14:43 -0700478 rtnl_unlock();
479
480 return 0;
481}
482
483int cnic_unregister_driver(int ulp_type)
484{
485 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000486 struct cnic_ulp_ops *ulp_ops;
487 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700488
roel kluin0d37f362009-11-02 06:53:44 +0000489 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000490 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700491 return -EINVAL;
492 }
493 mutex_lock(&cnic_lock);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000494 ulp_ops = cnic_ulp_tbl[ulp_type];
495 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000496 pr_err("%s: Type %d has not been registered\n",
497 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700498 goto out_unlock;
499 }
500 read_lock(&cnic_dev_lock);
501 list_for_each_entry(dev, &cnic_dev_list, list) {
502 struct cnic_local *cp = dev->cnic_priv;
503
504 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000505 pr_err("%s: Type %d still has devices registered\n",
506 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700507 read_unlock(&cnic_dev_lock);
508 goto out_unlock;
509 }
510 }
511 read_unlock(&cnic_dev_lock);
512
Michael Chan6d7760a2009-07-27 11:25:58 -0700513 if (ulp_type == CNIC_ULP_ISCSI)
514 cnic_uio_stop();
515
Michael Chana4636962009-06-08 18:14:43 -0700516 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
517
518 mutex_unlock(&cnic_lock);
519 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000520 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
521 msleep(100);
522 i++;
523 }
524
525 if (atomic_read(&ulp_ops->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +0000526 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -0700527 return 0;
528
529out_unlock:
530 mutex_unlock(&cnic_lock);
531 return -EINVAL;
532}
533
534static int cnic_start_hw(struct cnic_dev *);
535static void cnic_stop_hw(struct cnic_dev *);
536
537static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
538 void *ulp_ctx)
539{
540 struct cnic_local *cp = dev->cnic_priv;
541 struct cnic_ulp_ops *ulp_ops;
542
roel kluin0d37f362009-11-02 06:53:44 +0000543 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000544 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700545 return -EINVAL;
546 }
547 mutex_lock(&cnic_lock);
548 if (cnic_ulp_tbl[ulp_type] == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000549 pr_err("%s: Driver with type %d has not been registered\n",
550 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700551 mutex_unlock(&cnic_lock);
552 return -EAGAIN;
553 }
554 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000555 pr_err("%s: Type %d has already been registered to this device\n",
556 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700557 mutex_unlock(&cnic_lock);
558 return -EBUSY;
559 }
560
561 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
562 cp->ulp_handle[ulp_type] = ulp_ctx;
563 ulp_ops = cnic_ulp_tbl[ulp_type];
564 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
565 cnic_hold(dev);
566
567 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
568 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
569 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
570
571 mutex_unlock(&cnic_lock);
572
573 return 0;
574
575}
576EXPORT_SYMBOL(cnic_register_driver);
577
578static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
579{
580 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000581 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700582
roel kluin0d37f362009-11-02 06:53:44 +0000583 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000584 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700585 return -EINVAL;
586 }
587 mutex_lock(&cnic_lock);
588 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
589 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
590 cnic_put(dev);
591 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000592 pr_err("%s: device not registered to this ulp type %d\n",
593 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700594 mutex_unlock(&cnic_lock);
595 return -EINVAL;
596 }
597 mutex_unlock(&cnic_lock);
598
599 synchronize_rcu();
600
Michael Chan681dbd72009-08-14 15:49:46 +0000601 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
602 i < 20) {
603 msleep(100);
604 i++;
605 }
606 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000607 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000608
Michael Chana4636962009-06-08 18:14:43 -0700609 return 0;
610}
611EXPORT_SYMBOL(cnic_unregister_driver);
612
613static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
614{
615 id_tbl->start = start_id;
616 id_tbl->max = size;
617 id_tbl->next = 0;
618 spin_lock_init(&id_tbl->lock);
619 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
620 if (!id_tbl->table)
621 return -ENOMEM;
622
623 return 0;
624}
625
626static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
627{
628 kfree(id_tbl->table);
629 id_tbl->table = NULL;
630}
631
632static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
633{
634 int ret = -1;
635
636 id -= id_tbl->start;
637 if (id >= id_tbl->max)
638 return ret;
639
640 spin_lock(&id_tbl->lock);
641 if (!test_bit(id, id_tbl->table)) {
642 set_bit(id, id_tbl->table);
643 ret = 0;
644 }
645 spin_unlock(&id_tbl->lock);
646 return ret;
647}
648
649/* Returns -1 if not successful */
650static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
651{
652 u32 id;
653
654 spin_lock(&id_tbl->lock);
655 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
656 if (id >= id_tbl->max) {
657 id = -1;
658 if (id_tbl->next != 0) {
659 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
660 if (id >= id_tbl->next)
661 id = -1;
662 }
663 }
664
665 if (id < id_tbl->max) {
666 set_bit(id, id_tbl->table);
667 id_tbl->next = (id + 1) & (id_tbl->max - 1);
668 id += id_tbl->start;
669 }
670
671 spin_unlock(&id_tbl->lock);
672
673 return id;
674}
675
676static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
677{
678 if (id == -1)
679 return;
680
681 id -= id_tbl->start;
682 if (id >= id_tbl->max)
683 return;
684
685 clear_bit(id, id_tbl->table);
686}
687
688static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
689{
690 int i;
691
692 if (!dma->pg_arr)
693 return;
694
695 for (i = 0; i < dma->num_pages; i++) {
696 if (dma->pg_arr[i]) {
Michael Chan3248e162009-12-02 15:15:39 +0000697 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
698 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700699 dma->pg_arr[i] = NULL;
700 }
701 }
702 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000703 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
704 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700705 dma->pgtbl = NULL;
706 }
707 kfree(dma->pg_arr);
708 dma->pg_arr = NULL;
709 dma->num_pages = 0;
710}
711
712static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713{
714 int i;
715 u32 *page_table = dma->pgtbl;
716
717 for (i = 0; i < dma->num_pages; i++) {
718 /* Each entry needs to be in big endian format. */
719 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
720 page_table++;
721 *page_table = (u32) dma->pg_map_arr[i];
722 page_table++;
723 }
724}
725
Michael Chan71034ba2009-10-10 13:46:59 +0000726static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
727{
728 int i;
729 u32 *page_table = dma->pgtbl;
730
731 for (i = 0; i < dma->num_pages; i++) {
732 /* Each entry needs to be in little endian format. */
733 *page_table = dma->pg_map_arr[i] & 0xffffffff;
734 page_table++;
735 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
736 page_table++;
737 }
738}
739
Michael Chana4636962009-06-08 18:14:43 -0700740static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
741 int pages, int use_pg_tbl)
742{
743 int i, size;
744 struct cnic_local *cp = dev->cnic_priv;
745
746 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
747 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
748 if (dma->pg_arr == NULL)
749 return -ENOMEM;
750
751 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
752 dma->num_pages = pages;
753
754 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000755 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
756 BCM_PAGE_SIZE,
757 &dma->pg_map_arr[i],
758 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700759 if (dma->pg_arr[i] == NULL)
760 goto error;
761 }
762 if (!use_pg_tbl)
763 return 0;
764
765 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
766 ~(BCM_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000767 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
768 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700769 if (dma->pgtbl == NULL)
770 goto error;
771
772 cp->setup_pgtbl(dev, dma);
773
774 return 0;
775
776error:
777 cnic_free_dma(dev, dma);
778 return -ENOMEM;
779}
780
Michael Chan86b53602009-10-10 13:46:57 +0000781static void cnic_free_context(struct cnic_dev *dev)
782{
783 struct cnic_local *cp = dev->cnic_priv;
784 int i;
785
786 for (i = 0; i < cp->ctx_blks; i++) {
787 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000788 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
789 cp->ctx_arr[i].ctx,
790 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000791 cp->ctx_arr[i].ctx = NULL;
792 }
793 }
794}
795
Michael Chancd801532010-10-13 14:06:49 +0000796static void __cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chana4636962009-06-08 18:14:43 -0700797{
Michael Chancd801532010-10-13 14:06:49 +0000798 uio_unregister_device(&udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -0700799
Michael Chancd801532010-10-13 14:06:49 +0000800 if (udev->l2_buf) {
801 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
802 udev->l2_buf, udev->l2_buf_map);
803 udev->l2_buf = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700804 }
805
Michael Chancd801532010-10-13 14:06:49 +0000806 if (udev->l2_ring) {
807 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
808 udev->l2_ring, udev->l2_ring_map);
809 udev->l2_ring = NULL;
Michael Chana4636962009-06-08 18:14:43 -0700810 }
Michael Chana3ceeeb2010-10-13 14:06:50 +0000811
812 pci_dev_put(udev->pdev);
813 kfree(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000814}
815
Michael Chancd801532010-10-13 14:06:49 +0000816static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000817{
Michael Chancd801532010-10-13 14:06:49 +0000818 if (!udev)
Michael Chanc06c0462010-10-13 14:06:48 +0000819 return;
820
Michael Chana3ceeeb2010-10-13 14:06:50 +0000821 write_lock(&cnic_dev_lock);
822 list_del_init(&udev->list);
823 write_unlock(&cnic_dev_lock);
Michael Chancd801532010-10-13 14:06:49 +0000824 __cnic_free_uio(udev);
Michael Chanc06c0462010-10-13 14:06:48 +0000825}
826
827static void cnic_free_resc(struct cnic_dev *dev)
828{
829 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000830 struct cnic_uio_dev *udev = cp->udev;
Michael Chanc06c0462010-10-13 14:06:48 +0000831
Michael Chancd801532010-10-13 14:06:49 +0000832 if (udev) {
Michael Chana3ceeeb2010-10-13 14:06:50 +0000833 udev->dev = NULL;
Michael Chancd801532010-10-13 14:06:49 +0000834 cp->udev = NULL;
Michael Chanc06c0462010-10-13 14:06:48 +0000835 }
Michael Chana4636962009-06-08 18:14:43 -0700836
Michael Chan86b53602009-10-10 13:46:57 +0000837 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700838 kfree(cp->ctx_arr);
839 cp->ctx_arr = NULL;
840 cp->ctx_blks = 0;
841
842 cnic_free_dma(dev, &cp->gbl_buf_info);
843 cnic_free_dma(dev, &cp->conn_buf_info);
844 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000845 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chane21ba412010-12-23 07:43:03 +0000846 cnic_free_dma(dev, &cp->kcq2.dma);
Michael Chane6c28892010-06-24 14:58:39 +0000847 cnic_free_dma(dev, &cp->kcq1.dma);
Michael Chana4636962009-06-08 18:14:43 -0700848 kfree(cp->iscsi_tbl);
849 cp->iscsi_tbl = NULL;
850 kfree(cp->ctx_tbl);
851 cp->ctx_tbl = NULL;
852
853 cnic_free_id_tbl(&cp->cid_tbl);
854}
855
856static int cnic_alloc_context(struct cnic_dev *dev)
857{
858 struct cnic_local *cp = dev->cnic_priv;
859
860 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
861 int i, k, arr_size;
862
863 cp->ctx_blk_size = BCM_PAGE_SIZE;
864 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
865 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
866 sizeof(struct cnic_ctx);
867 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
868 if (cp->ctx_arr == NULL)
869 return -ENOMEM;
870
871 k = 0;
872 for (i = 0; i < 2; i++) {
873 u32 j, reg, off, lo, hi;
874
875 if (i == 0)
876 off = BNX2_PG_CTX_MAP;
877 else
878 off = BNX2_ISCSI_CTX_MAP;
879
880 reg = cnic_reg_rd_ind(dev, off);
881 lo = reg >> 16;
882 hi = reg & 0xffff;
883 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
884 cp->ctx_arr[k].cid = j;
885 }
886
887 cp->ctx_blks = k;
888 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
889 cp->ctx_blks = 0;
890 return -ENOMEM;
891 }
892
893 for (i = 0; i < cp->ctx_blks; i++) {
894 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000895 dma_alloc_coherent(&dev->pcidev->dev,
896 BCM_PAGE_SIZE,
897 &cp->ctx_arr[i].mapping,
898 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700899 if (cp->ctx_arr[i].ctx == NULL)
900 return -ENOMEM;
901 }
902 }
903 return 0;
904}
905
Michael Chane6c28892010-06-24 14:58:39 +0000906static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
907{
908 int err, i, is_bnx2 = 0;
909 struct kcqe **kcq;
910
911 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
912 is_bnx2 = 1;
913
914 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
915 if (err)
916 return err;
917
918 kcq = (struct kcqe **) info->dma.pg_arr;
919 info->kcq = kcq;
920
921 if (is_bnx2)
922 return 0;
923
924 for (i = 0; i < KCQ_PAGE_CNT; i++) {
925 struct bnx2x_bd_chain_next *next =
926 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
927 int j = i + 1;
928
929 if (j >= KCQ_PAGE_CNT)
930 j = 0;
931 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
932 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
933 }
934 return 0;
935}
936
Michael Chancd801532010-10-13 14:06:49 +0000937static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
Michael Chanec0248e2009-08-26 09:49:22 +0000938{
939 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000940 struct cnic_uio_dev *udev;
Michael Chanec0248e2009-08-26 09:49:22 +0000941
Michael Chana3ceeeb2010-10-13 14:06:50 +0000942 read_lock(&cnic_dev_lock);
943 list_for_each_entry(udev, &cnic_udev_list, list) {
944 if (udev->pdev == dev->pcidev) {
945 udev->dev = dev;
946 cp->udev = udev;
947 read_unlock(&cnic_dev_lock);
948 return 0;
949 }
950 }
951 read_unlock(&cnic_dev_lock);
952
Michael Chancd801532010-10-13 14:06:49 +0000953 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
954 if (!udev)
Michael Chanec0248e2009-08-26 09:49:22 +0000955 return -ENOMEM;
956
Michael Chancd801532010-10-13 14:06:49 +0000957 udev->uio_dev = -1;
958
959 udev->dev = dev;
960 udev->pdev = dev->pcidev;
961 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
962 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
963 &udev->l2_ring_map,
964 GFP_KERNEL | __GFP_COMP);
965 if (!udev->l2_ring)
Michael Chanec0248e2009-08-26 09:49:22 +0000966 return -ENOMEM;
967
Michael Chancd801532010-10-13 14:06:49 +0000968 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
969 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
970 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
971 &udev->l2_buf_map,
972 GFP_KERNEL | __GFP_COMP);
973 if (!udev->l2_buf)
974 return -ENOMEM;
975
Michael Chana3ceeeb2010-10-13 14:06:50 +0000976 write_lock(&cnic_dev_lock);
977 list_add(&udev->list, &cnic_udev_list);
978 write_unlock(&cnic_dev_lock);
979
980 pci_dev_get(udev->pdev);
981
Michael Chancd801532010-10-13 14:06:49 +0000982 cp->udev = udev;
983
Michael Chanec0248e2009-08-26 09:49:22 +0000984 return 0;
985}
986
Michael Chancd801532010-10-13 14:06:49 +0000987static int cnic_init_uio(struct cnic_dev *dev)
988{
Michael Chan5e9b2db2009-08-26 09:49:23 +0000989 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +0000990 struct cnic_uio_dev *udev = cp->udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +0000991 struct uio_info *uinfo;
Michael Chancd801532010-10-13 14:06:49 +0000992 int ret = 0;
Michael Chan5e9b2db2009-08-26 09:49:23 +0000993
Michael Chancd801532010-10-13 14:06:49 +0000994 if (!udev)
Michael Chan5e9b2db2009-08-26 09:49:23 +0000995 return -ENOMEM;
996
Michael Chancd801532010-10-13 14:06:49 +0000997 uinfo = &udev->cnic_uinfo;
998
Michael Chan5e9b2db2009-08-26 09:49:23 +0000999 uinfo->mem[0].addr = dev->netdev->base_addr;
1000 uinfo->mem[0].internal_addr = dev->regview;
1001 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1002 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1003
Michael Chan5e9b2db2009-08-26 09:49:23 +00001004 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chana4dde3a2010-02-24 14:42:08 +00001005 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chancd801532010-10-13 14:06:49 +00001006 PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001007 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1008 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1009 else
1010 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1011
1012 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +00001013 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1014 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1015 PAGE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001016 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
Michael Chan71034ba2009-10-10 13:46:59 +00001017
1018 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +00001019 }
1020
1021 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1022
Michael Chancd801532010-10-13 14:06:49 +00001023 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1024 uinfo->mem[2].size = udev->l2_ring_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001025 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1026
Michael Chancd801532010-10-13 14:06:49 +00001027 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1028 uinfo->mem[3].size = udev->l2_buf_size;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001029 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1030
1031 uinfo->version = CNIC_MODULE_VERSION;
1032 uinfo->irq = UIO_IRQ_CUSTOM;
1033
1034 uinfo->open = cnic_uio_open;
1035 uinfo->release = cnic_uio_close;
1036
Michael Chana3ceeeb2010-10-13 14:06:50 +00001037 if (udev->uio_dev == -1) {
1038 if (!uinfo->priv) {
1039 uinfo->priv = udev;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001040
Michael Chana3ceeeb2010-10-13 14:06:50 +00001041 ret = uio_register_device(&udev->pdev->dev, uinfo);
1042 }
1043 } else {
1044 cnic_init_rings(dev);
1045 }
Michael Chan5e9b2db2009-08-26 09:49:23 +00001046
Michael Chancd801532010-10-13 14:06:49 +00001047 return ret;
Michael Chan5e9b2db2009-08-26 09:49:23 +00001048}
1049
Michael Chana4636962009-06-08 18:14:43 -07001050static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1051{
1052 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07001053 int ret;
1054
1055 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1056 if (ret)
1057 goto error;
1058 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1059
Michael Chane6c28892010-06-24 14:58:39 +00001060 ret = cnic_alloc_kcq(dev, &cp->kcq1);
Michael Chana4636962009-06-08 18:14:43 -07001061 if (ret)
1062 goto error;
Michael Chana4636962009-06-08 18:14:43 -07001063
1064 ret = cnic_alloc_context(dev);
1065 if (ret)
1066 goto error;
1067
Michael Chancd801532010-10-13 14:06:49 +00001068 ret = cnic_alloc_uio_rings(dev, 2);
Michael Chanec0248e2009-08-26 09:49:22 +00001069 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001070 goto error;
1071
Michael Chancd801532010-10-13 14:06:49 +00001072 ret = cnic_init_uio(dev);
Michael Chan5e9b2db2009-08-26 09:49:23 +00001073 if (ret)
Michael Chana4636962009-06-08 18:14:43 -07001074 goto error;
1075
Michael Chana4636962009-06-08 18:14:43 -07001076 return 0;
1077
1078error:
1079 cnic_free_resc(dev);
1080 return ret;
1081}
1082
Michael Chan71034ba2009-10-10 13:46:59 +00001083static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1084{
1085 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001086 int ctx_blk_size = cp->ethdev->ctx_blk_size;
Michael Chan520efdf2010-06-24 14:58:37 +00001087 int total_mem, blks, i;
Michael Chan71034ba2009-10-10 13:46:59 +00001088
Michael Chan520efdf2010-06-24 14:58:37 +00001089 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
Michael Chan71034ba2009-10-10 13:46:59 +00001090 blks = total_mem / ctx_blk_size;
1091 if (total_mem % ctx_blk_size)
1092 blks++;
1093
1094 if (blks > cp->ethdev->ctx_tbl_len)
1095 return -ENOMEM;
1096
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001097 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001098 if (cp->ctx_arr == NULL)
1099 return -ENOMEM;
1100
1101 cp->ctx_blks = blks;
1102 cp->ctx_blk_size = ctx_blk_size;
Michael Chanee87a822010-10-13 14:06:51 +00001103 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
Michael Chan71034ba2009-10-10 13:46:59 +00001104 cp->ctx_align = 0;
1105 else
1106 cp->ctx_align = ctx_blk_size;
1107
1108 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1109
1110 for (i = 0; i < blks; i++) {
1111 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001112 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1113 &cp->ctx_arr[i].mapping,
1114 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001115 if (cp->ctx_arr[i].ctx == NULL)
1116 return -ENOMEM;
1117
1118 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1119 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1120 cnic_free_context(dev);
1121 cp->ctx_blk_size += cp->ctx_align;
1122 i = -1;
1123 continue;
1124 }
1125 }
1126 }
1127 return 0;
1128}
1129
1130static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1131{
1132 struct cnic_local *cp = dev->cnic_priv;
Michael Chan520efdf2010-06-24 14:58:37 +00001133 struct cnic_eth_dev *ethdev = cp->ethdev;
1134 u32 start_cid = ethdev->starting_cid;
Michael Chan71034ba2009-10-10 13:46:59 +00001135 int i, j, n, ret, pages;
1136 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1137
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001138 cp->iro_arr = ethdev->iro_arr;
1139
Michael Chan520efdf2010-06-24 14:58:37 +00001140 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1141 cp->iscsi_start_cid = start_cid;
1142 if (start_cid < BNX2X_ISCSI_START_CID) {
1143 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1144
1145 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1146 cp->max_cid_space += delta;
1147 }
1148
Michael Chan71034ba2009-10-10 13:46:59 +00001149 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1150 GFP_KERNEL);
1151 if (!cp->iscsi_tbl)
1152 goto error;
1153
1154 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
Michael Chan520efdf2010-06-24 14:58:37 +00001155 cp->max_cid_space, GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001156 if (!cp->ctx_tbl)
1157 goto error;
1158
1159 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1160 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1161 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1162 }
1163
Michael Chan520efdf2010-06-24 14:58:37 +00001164 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
Michael Chan71034ba2009-10-10 13:46:59 +00001165 PAGE_SIZE;
1166
1167 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1168 if (ret)
1169 return -ENOMEM;
1170
1171 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
Michael Chan520efdf2010-06-24 14:58:37 +00001172 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
Michael Chan71034ba2009-10-10 13:46:59 +00001173 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1174
1175 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1176 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1177 off;
1178
1179 if ((i % n) == (n - 1))
1180 j++;
1181 }
1182
Michael Chane6c28892010-06-24 14:58:39 +00001183 ret = cnic_alloc_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00001184 if (ret)
1185 goto error;
Michael Chan71034ba2009-10-10 13:46:59 +00001186
Michael Chane21ba412010-12-23 07:43:03 +00001187 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1188 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1189 if (ret)
1190 goto error;
1191 }
1192
Michael Chan71034ba2009-10-10 13:46:59 +00001193 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1194 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1195 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1196 if (ret)
1197 goto error;
1198
1199 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1200 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1201 if (ret)
1202 goto error;
1203
1204 ret = cnic_alloc_bnx2x_context(dev);
1205 if (ret)
1206 goto error;
1207
Michael Chan71034ba2009-10-10 13:46:59 +00001208 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1209
1210 cp->l2_rx_ring_size = 15;
1211
Michael Chancd801532010-10-13 14:06:49 +00001212 ret = cnic_alloc_uio_rings(dev, 4);
Michael Chan71034ba2009-10-10 13:46:59 +00001213 if (ret)
1214 goto error;
1215
Michael Chancd801532010-10-13 14:06:49 +00001216 ret = cnic_init_uio(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00001217 if (ret)
1218 goto error;
1219
1220 return 0;
1221
1222error:
1223 cnic_free_resc(dev);
1224 return -ENOMEM;
1225}
1226
Michael Chana4636962009-06-08 18:14:43 -07001227static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1228{
1229 return cp->max_kwq_idx -
1230 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1231}
1232
1233static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1234 u32 num_wqes)
1235{
1236 struct cnic_local *cp = dev->cnic_priv;
1237 struct kwqe *prod_qe;
1238 u16 prod, sw_prod, i;
1239
1240 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1241 return -EAGAIN; /* bnx2 is down */
1242
1243 spin_lock_bh(&cp->cnic_ulp_lock);
1244 if (num_wqes > cnic_kwq_avail(cp) &&
Michael Chan1f1332a2010-05-18 11:32:52 +00001245 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
Michael Chana4636962009-06-08 18:14:43 -07001246 spin_unlock_bh(&cp->cnic_ulp_lock);
1247 return -EAGAIN;
1248 }
1249
Michael Chan1f1332a2010-05-18 11:32:52 +00001250 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07001251
1252 prod = cp->kwq_prod_idx;
1253 sw_prod = prod & MAX_KWQ_IDX;
1254 for (i = 0; i < num_wqes; i++) {
1255 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1256 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1257 prod++;
1258 sw_prod = prod & MAX_KWQ_IDX;
1259 }
1260 cp->kwq_prod_idx = prod;
1261
1262 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1263
1264 spin_unlock_bh(&cp->cnic_ulp_lock);
1265 return 0;
1266}
1267
Michael Chan71034ba2009-10-10 13:46:59 +00001268static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1269 union l5cm_specific_data *l5_data)
1270{
1271 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1272 dma_addr_t map;
1273
1274 map = ctx->kwqe_data_mapping;
1275 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1276 l5_data->phy_address.hi = (u64) map >> 32;
1277 return ctx->kwqe_data;
1278}
1279
1280static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1281 u32 type, union l5cm_specific_data *l5_data)
1282{
1283 struct cnic_local *cp = dev->cnic_priv;
1284 struct l5cm_spe kwqe;
1285 struct kwqe_16 *kwq[1];
1286 int ret;
1287
1288 kwqe.hdr.conn_and_cmd_data =
1289 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
Michael Chanceb7e1c2010-10-06 03:14:54 +00001290 BNX2X_HW_CID(cp, cid)));
Michael Chan71034ba2009-10-10 13:46:59 +00001291 kwqe.hdr.type = cpu_to_le16(type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001292 kwqe.hdr.reserved1 = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001293 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1294 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1295
1296 kwq[0] = (struct kwqe_16 *) &kwqe;
1297
1298 spin_lock_bh(&cp->cnic_ulp_lock);
1299 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1300 spin_unlock_bh(&cp->cnic_ulp_lock);
1301
1302 if (ret == 1)
1303 return 0;
1304
1305 return -EBUSY;
1306}
1307
1308static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1309 struct kcqe *cqes[], u32 num_cqes)
1310{
1311 struct cnic_local *cp = dev->cnic_priv;
1312 struct cnic_ulp_ops *ulp_ops;
1313
1314 rcu_read_lock();
1315 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1316 if (likely(ulp_ops)) {
1317 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1318 cqes, num_cqes);
1319 }
1320 rcu_read_unlock();
1321}
1322
1323static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1324{
1325 struct cnic_local *cp = dev->cnic_priv;
1326 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
Michael Chan14203982010-10-06 03:16:06 +00001327 int hq_bds, pages;
1328 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001329
1330 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1331 cp->num_ccells = req1->num_ccells_per_conn;
1332 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1333 cp->num_iscsi_tasks;
1334 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1335 BNX2X_ISCSI_R2TQE_SIZE;
1336 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1337 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1338 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1339 cp->num_cqs = req1->num_cqs;
1340
1341 if (!dev->max_iscsi_conn)
1342 return 0;
1343
1344 /* init Tstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001345 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001346 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001347 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001348 PAGE_SIZE);
1349 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001350 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001351 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001352 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001353 req1->num_tasks_per_conn);
1354
1355 /* init Ustorm RAM */
1356 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001357 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001358 req1->rq_buffer_size);
Michael Chan14203982010-10-06 03:16:06 +00001359 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001360 PAGE_SIZE);
1361 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001362 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001363 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001364 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001365 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001366 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001367 req1->rq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001368 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001369 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001370 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001371 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1372
1373 /* init Xstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001374 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001375 PAGE_SIZE);
1376 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001377 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001378 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001379 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001380 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001381 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001382 hq_bds);
Michael Chan14203982010-10-06 03:16:06 +00001383 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001384 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001385 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001386 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1387
1388 /* init Cstorm RAM */
Michael Chan14203982010-10-06 03:16:06 +00001389 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001390 PAGE_SIZE);
1391 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001392 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
Michael Chan71034ba2009-10-10 13:46:59 +00001393 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001394 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001395 req1->num_tasks_per_conn);
Michael Chan14203982010-10-06 03:16:06 +00001396 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001397 req1->cq_num_wqes);
Michael Chan14203982010-10-06 03:16:06 +00001398 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00001399 hq_bds);
1400
1401 return 0;
1402}
1403
1404static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1405{
1406 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1407 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00001408 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001409 struct iscsi_kcqe kcqe;
1410 struct kcqe *cqes[1];
1411
1412 memset(&kcqe, 0, sizeof(kcqe));
1413 if (!dev->max_iscsi_conn) {
1414 kcqe.completion_status =
1415 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1416 goto done;
1417 }
1418
1419 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001420 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001421 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001422 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001423 req2->error_bit_map[1]);
1424
1425 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001426 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001427 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001428 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001429 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001430 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00001431 req2->error_bit_map[1]);
1432
1433 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001434 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
Michael Chan71034ba2009-10-10 13:46:59 +00001435
1436 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1437
1438done:
1439 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1440 cqes[0] = (struct kcqe *) &kcqe;
1441 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1442
1443 return 0;
1444}
1445
1446static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1447{
1448 struct cnic_local *cp = dev->cnic_priv;
1449 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1450
1451 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1452 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1453
1454 cnic_free_dma(dev, &iscsi->hq_info);
1455 cnic_free_dma(dev, &iscsi->r2tq_info);
1456 cnic_free_dma(dev, &iscsi->task_array_info);
1457 }
1458 cnic_free_id(&cp->cid_tbl, ctx->cid);
1459 ctx->cid = 0;
1460}
1461
1462static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1463{
1464 u32 cid;
1465 int ret, pages;
1466 struct cnic_local *cp = dev->cnic_priv;
1467 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1468 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1469
1470 cid = cnic_alloc_new_id(&cp->cid_tbl);
1471 if (cid == -1) {
1472 ret = -ENOMEM;
1473 goto error;
1474 }
1475
1476 ctx->cid = cid;
1477 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1478
1479 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1480 if (ret)
1481 goto error;
1482
1483 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1484 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1485 if (ret)
1486 goto error;
1487
1488 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1489 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1490 if (ret)
1491 goto error;
1492
1493 return 0;
1494
1495error:
1496 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1497 return ret;
1498}
1499
1500static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1501 struct regpair *ctx_addr)
1502{
1503 struct cnic_local *cp = dev->cnic_priv;
1504 struct cnic_eth_dev *ethdev = cp->ethdev;
1505 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1506 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1507 unsigned long align_off = 0;
1508 dma_addr_t ctx_map;
1509 void *ctx;
1510
1511 if (cp->ctx_align) {
1512 unsigned long mask = cp->ctx_align - 1;
1513
1514 if (cp->ctx_arr[blk].mapping & mask)
1515 align_off = cp->ctx_align -
1516 (cp->ctx_arr[blk].mapping & mask);
1517 }
1518 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1519 (off * BNX2X_CONTEXT_MEM_SIZE);
1520 ctx = cp->ctx_arr[blk].ctx + align_off +
1521 (off * BNX2X_CONTEXT_MEM_SIZE);
1522 if (init)
1523 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1524
1525 ctx_addr->lo = ctx_map & 0xffffffff;
1526 ctx_addr->hi = (u64) ctx_map >> 32;
1527 return ctx;
1528}
1529
1530static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1531 u32 num)
1532{
1533 struct cnic_local *cp = dev->cnic_priv;
1534 struct iscsi_kwqe_conn_offload1 *req1 =
1535 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1536 struct iscsi_kwqe_conn_offload2 *req2 =
1537 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1538 struct iscsi_kwqe_conn_offload3 *req3;
1539 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1540 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1541 u32 cid = ctx->cid;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001542 u32 hw_cid = BNX2X_HW_CID(cp, cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001543 struct iscsi_context *ictx;
1544 struct regpair context_addr;
1545 int i, j, n = 2, n_max;
1546
1547 ctx->ctx_flags = 0;
1548 if (!req2->num_additional_wqes)
1549 return -EINVAL;
1550
1551 n_max = req2->num_additional_wqes + 2;
1552
1553 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1554 if (ictx == NULL)
1555 return -ENOMEM;
1556
1557 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1558
1559 ictx->xstorm_ag_context.hq_prod = 1;
1560
1561 ictx->xstorm_st_context.iscsi.first_burst_length =
1562 ISCSI_DEF_FIRST_BURST_LEN;
1563 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1564 ISCSI_DEF_MAX_RECV_SEG_LEN;
1565 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1566 req1->sq_page_table_addr_lo;
1567 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1568 req1->sq_page_table_addr_hi;
1569 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1570 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1571 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1572 iscsi->hq_info.pgtbl_map & 0xffffffff;
1573 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1574 (u64) iscsi->hq_info.pgtbl_map >> 32;
1575 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1576 iscsi->hq_info.pgtbl[0];
1577 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1578 iscsi->hq_info.pgtbl[1];
1579 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1580 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1581 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1582 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1583 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1584 iscsi->r2tq_info.pgtbl[0];
1585 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1586 iscsi->r2tq_info.pgtbl[1];
1587 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1588 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1589 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1590 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1591 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1592 BNX2X_ISCSI_PBL_NOT_CACHED;
1593 ictx->xstorm_st_context.iscsi.flags.flags |=
1594 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1595 ictx->xstorm_st_context.iscsi.flags.flags |=
1596 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1597
1598 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1599 /* TSTORM requires the base address of RQ DB & not PTE */
1600 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1601 req2->rq_page_table_addr_lo & PAGE_MASK;
1602 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1603 req2->rq_page_table_addr_hi;
1604 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1605 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1606 ictx->tstorm_st_context.tcp.flags2 |=
1607 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001608 ictx->tstorm_st_context.tcp.ooo_support_mode =
1609 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
Michael Chan71034ba2009-10-10 13:46:59 +00001610
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001611 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
Michael Chan71034ba2009-10-10 13:46:59 +00001612
1613 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001614 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001615 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001616 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001617 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1618 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1619 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1620 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1621 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1622 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1623 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1624 iscsi->r2tq_info.pgtbl[0];
1625 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1626 iscsi->r2tq_info.pgtbl[1];
1627 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1628 req1->cq_page_table_addr_lo;
1629 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1630 req1->cq_page_table_addr_hi;
1631 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1632 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1633 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1634 ictx->ustorm_st_context.task_pbe_cache_index =
1635 BNX2X_ISCSI_PBL_NOT_CACHED;
1636 ictx->ustorm_st_context.task_pdu_cache_index =
1637 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1638
1639 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1640 if (j == 3) {
1641 if (n >= n_max)
1642 break;
1643 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1644 j = 0;
1645 }
1646 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1647 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1648 req3->qp_first_pte[j].hi;
1649 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1650 req3->qp_first_pte[j].lo;
1651 }
1652
1653 ictx->ustorm_st_context.task_pbl_base.lo =
1654 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1655 ictx->ustorm_st_context.task_pbl_base.hi =
1656 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1657 ictx->ustorm_st_context.tce_phy_addr.lo =
1658 iscsi->task_array_info.pgtbl[0];
1659 ictx->ustorm_st_context.tce_phy_addr.hi =
1660 iscsi->task_array_info.pgtbl[1];
1661 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1662 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1663 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1664 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1665 ISCSI_DEF_MAX_BURST_LEN;
1666 ictx->ustorm_st_context.negotiated_rx |=
1667 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1668 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1669
1670 ictx->cstorm_st_context.hq_pbl_base.lo =
1671 iscsi->hq_info.pgtbl_map & 0xffffffff;
1672 ictx->cstorm_st_context.hq_pbl_base.hi =
1673 (u64) iscsi->hq_info.pgtbl_map >> 32;
1674 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1675 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1676 ictx->cstorm_st_context.task_pbl_base.lo =
1677 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1678 ictx->cstorm_st_context.task_pbl_base.hi =
1679 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1680 /* CSTORM and USTORM initialization is different, CSTORM requires
1681 * CQ DB base & not PTE addr */
1682 ictx->cstorm_st_context.cq_db_base.lo =
1683 req1->cq_page_table_addr_lo & PAGE_MASK;
1684 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1685 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1686 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1687 for (i = 0; i < cp->num_cqs; i++) {
1688 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1689 ISCSI_INITIAL_SN;
1690 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1691 ISCSI_INITIAL_SN;
1692 }
1693
1694 ictx->xstorm_ag_context.cdu_reserved =
1695 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1696 ISCSI_CONNECTION_TYPE);
1697 ictx->ustorm_ag_context.cdu_usage =
1698 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1699 ISCSI_CONNECTION_TYPE);
1700 return 0;
1701
1702}
1703
1704static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1705 u32 num, int *work)
1706{
1707 struct iscsi_kwqe_conn_offload1 *req1;
1708 struct iscsi_kwqe_conn_offload2 *req2;
1709 struct cnic_local *cp = dev->cnic_priv;
Michael Chanfdf24082010-10-13 14:06:47 +00001710 struct cnic_context *ctx;
Michael Chan71034ba2009-10-10 13:46:59 +00001711 struct iscsi_kcqe kcqe;
1712 struct kcqe *cqes[1];
1713 u32 l5_cid;
Michael Chanfdf24082010-10-13 14:06:47 +00001714 int ret = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00001715
1716 if (num < 2) {
1717 *work = num;
1718 return -EINVAL;
1719 }
1720
1721 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1722 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1723 if ((num - 2) < req2->num_additional_wqes) {
1724 *work = num;
1725 return -EINVAL;
1726 }
Joe Perches779bb412010-11-14 17:04:37 +00001727 *work = 2 + req2->num_additional_wqes;
Michael Chan71034ba2009-10-10 13:46:59 +00001728
1729 l5_cid = req1->iscsi_conn_id;
1730 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1731 return -EINVAL;
1732
1733 memset(&kcqe, 0, sizeof(kcqe));
1734 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1735 kcqe.iscsi_conn_id = l5_cid;
1736 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1737
Michael Chanfdf24082010-10-13 14:06:47 +00001738 ctx = &cp->ctx_tbl[l5_cid];
1739 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1740 kcqe.completion_status =
1741 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1742 goto done;
1743 }
1744
Michael Chan71034ba2009-10-10 13:46:59 +00001745 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1746 atomic_dec(&cp->iscsi_conn);
Michael Chan71034ba2009-10-10 13:46:59 +00001747 goto done;
1748 }
1749 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1750 if (ret) {
1751 atomic_dec(&cp->iscsi_conn);
1752 ret = 0;
1753 goto done;
1754 }
1755 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1756 if (ret < 0) {
1757 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1758 atomic_dec(&cp->iscsi_conn);
1759 goto done;
1760 }
1761
1762 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
Michael Chanceb7e1c2010-10-06 03:14:54 +00001763 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
Michael Chan71034ba2009-10-10 13:46:59 +00001764
1765done:
1766 cqes[0] = (struct kcqe *) &kcqe;
1767 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1768 return ret;
1769}
1770
1771
1772static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1773{
1774 struct cnic_local *cp = dev->cnic_priv;
1775 struct iscsi_kwqe_conn_update *req =
1776 (struct iscsi_kwqe_conn_update *) kwqe;
1777 void *data;
1778 union l5cm_specific_data l5_data;
1779 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1780 int ret;
1781
1782 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1783 return -EINVAL;
1784
1785 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1786 if (!data)
1787 return -ENOMEM;
1788
1789 memcpy(data, kwqe, sizeof(struct kwqe));
1790
1791 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1792 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1793 return ret;
1794}
1795
Michael Chana2c9e762010-10-13 14:06:46 +00001796static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
Michael Chan71034ba2009-10-10 13:46:59 +00001797{
1798 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00001799 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
Michael Chana2c9e762010-10-13 14:06:46 +00001800 union l5cm_specific_data l5_data;
1801 int ret;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001802 u32 hw_cid, type;
Michael Chan71034ba2009-10-10 13:46:59 +00001803
Michael Chan71034ba2009-10-10 13:46:59 +00001804 init_waitqueue_head(&ctx->waitq);
1805 ctx->wait_cond = 0;
1806 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001807 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1808 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1809 & SPE_HDR_CONN_TYPE;
1810 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1811 SPE_HDR_FUNCTION_ID);
1812
1813 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1814 hw_cid, type, &l5_data);
1815
Michael Chan71034ba2009-10-10 13:46:59 +00001816 if (ret == 0)
1817 wait_event(ctx->waitq, ctx->wait_cond);
1818
Michael Chana2c9e762010-10-13 14:06:46 +00001819 return ret;
1820}
1821
1822static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1823{
1824 struct cnic_local *cp = dev->cnic_priv;
1825 struct iscsi_kwqe_conn_destroy *req =
1826 (struct iscsi_kwqe_conn_destroy *) kwqe;
1827 u32 l5_cid = req->reserved0;
1828 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1829 int ret = 0;
1830 struct iscsi_kcqe kcqe;
1831 struct kcqe *cqes[1];
1832
1833 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1834 goto skip_cfc_delete;
1835
Michael Chanfdf24082010-10-13 14:06:47 +00001836 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1837 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1838
1839 if (delta > (2 * HZ))
1840 delta = 0;
1841
1842 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1843 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1844 goto destroy_reply;
1845 }
Michael Chana2c9e762010-10-13 14:06:46 +00001846
1847 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1848
Michael Chan71034ba2009-10-10 13:46:59 +00001849skip_cfc_delete:
1850 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1851
1852 atomic_dec(&cp->iscsi_conn);
Michael Chanfdf24082010-10-13 14:06:47 +00001853 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001854
Michael Chanfdf24082010-10-13 14:06:47 +00001855destroy_reply:
Michael Chan71034ba2009-10-10 13:46:59 +00001856 memset(&kcqe, 0, sizeof(kcqe));
1857 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1858 kcqe.iscsi_conn_id = l5_cid;
1859 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1860 kcqe.iscsi_conn_context_id = req->context_id;
1861
1862 cqes[0] = (struct kcqe *) &kcqe;
1863 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1864
1865 return ret;
1866}
1867
1868static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1869 struct l4_kwq_connect_req1 *kwqe1,
1870 struct l4_kwq_connect_req3 *kwqe3,
1871 struct l5cm_active_conn_buffer *conn_buf)
1872{
1873 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1874 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1875 &conn_buf->xstorm_conn_buffer;
1876 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1877 &conn_buf->tstorm_conn_buffer;
1878 struct regpair context_addr;
1879 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1880 struct in6_addr src_ip, dst_ip;
1881 int i;
1882 u32 *addrp;
1883
1884 addrp = (u32 *) &conn_addr->local_ip_addr;
1885 for (i = 0; i < 4; i++, addrp++)
1886 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1887
1888 addrp = (u32 *) &conn_addr->remote_ip_addr;
1889 for (i = 0; i < 4; i++, addrp++)
1890 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1891
1892 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1893
1894 xstorm_buf->context_addr.hi = context_addr.hi;
1895 xstorm_buf->context_addr.lo = context_addr.lo;
1896 xstorm_buf->mss = 0xffff;
1897 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1898 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1899 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1900 xstorm_buf->pseudo_header_checksum =
1901 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1902
1903 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1904 tstorm_buf->params |=
1905 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1906 if (kwqe3->ka_timeout) {
1907 tstorm_buf->ka_enable = 1;
1908 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1909 tstorm_buf->ka_interval = kwqe3->ka_interval;
1910 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1911 }
1912 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1913 tstorm_buf->snd_buf = kwqe3->snd_buf;
1914 tstorm_buf->max_rt_time = 0xffffffff;
1915}
1916
1917static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1918{
1919 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00001920 u32 pfid = cp->pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00001921 u8 *mac = dev->mac_addr;
1922
1923 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001924 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
Michael Chan71034ba2009-10-10 13:46:59 +00001925 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001926 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
Michael Chan71034ba2009-10-10 13:46:59 +00001927 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001928 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
Michael Chan71034ba2009-10-10 13:46:59 +00001929 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001930 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00001931 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001932 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
Michael Chan71034ba2009-10-10 13:46:59 +00001933 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001934 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00001935
1936 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001937 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
Michael Chan71034ba2009-10-10 13:46:59 +00001938 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001939 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00001940 mac[4]);
1941 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001942 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
Michael Chan71034ba2009-10-10 13:46:59 +00001943 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001944 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
Michael Chan71034ba2009-10-10 13:46:59 +00001945 mac[2]);
1946 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001947 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
Michael Chan71034ba2009-10-10 13:46:59 +00001948 mac[1]);
1949 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001950 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
Michael Chan71034ba2009-10-10 13:46:59 +00001951 mac[0]);
1952}
1953
1954static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1955{
1956 struct cnic_local *cp = dev->cnic_priv;
1957 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1958 u16 tstorm_flags = 0;
1959
1960 if (tcp_ts) {
1961 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1962 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1963 }
1964
1965 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001966 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001967
1968 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00001969 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00001970}
1971
1972static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1973 u32 num, int *work)
1974{
1975 struct cnic_local *cp = dev->cnic_priv;
1976 struct l4_kwq_connect_req1 *kwqe1 =
1977 (struct l4_kwq_connect_req1 *) wqes[0];
1978 struct l4_kwq_connect_req3 *kwqe3;
1979 struct l5cm_active_conn_buffer *conn_buf;
1980 struct l5cm_conn_addr_params *conn_addr;
1981 union l5cm_specific_data l5_data;
1982 u32 l5_cid = kwqe1->pg_cid;
1983 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1984 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1985 int ret;
1986
1987 if (num < 2) {
1988 *work = num;
1989 return -EINVAL;
1990 }
1991
1992 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1993 *work = 3;
1994 else
1995 *work = 2;
1996
1997 if (num < *work) {
1998 *work = num;
1999 return -EINVAL;
2000 }
2001
2002 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00002003 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00002004 return -ENOMEM;
2005 }
2006 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2007 if (!conn_buf)
2008 return -ENOMEM;
2009
2010 memset(conn_buf, 0, sizeof(*conn_buf));
2011
2012 conn_addr = &conn_buf->conn_addr_buf;
2013 conn_addr->remote_addr_0 = csk->ha[0];
2014 conn_addr->remote_addr_1 = csk->ha[1];
2015 conn_addr->remote_addr_2 = csk->ha[2];
2016 conn_addr->remote_addr_3 = csk->ha[3];
2017 conn_addr->remote_addr_4 = csk->ha[4];
2018 conn_addr->remote_addr_5 = csk->ha[5];
2019
2020 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2021 struct l4_kwq_connect_req2 *kwqe2 =
2022 (struct l4_kwq_connect_req2 *) wqes[1];
2023
2024 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2025 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2026 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2027
2028 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2029 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2030 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2031 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2032 }
2033 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2034
2035 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2036 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2037 conn_addr->local_tcp_port = kwqe1->src_port;
2038 conn_addr->remote_tcp_port = kwqe1->dst_port;
2039
2040 conn_addr->pmtu = kwqe3->pmtu;
2041 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2042
2043 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00002044 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
Michael Chan71034ba2009-10-10 13:46:59 +00002045
2046 cnic_bnx2x_set_tcp_timestamp(dev,
2047 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2048
2049 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2050 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2051 if (!ret)
Michael Chan6e0dda02010-10-13 14:06:45 +00002052 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00002053
2054 return ret;
2055}
2056
2057static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2058{
2059 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2060 union l5cm_specific_data l5_data;
2061 int ret;
2062
2063 memset(&l5_data, 0, sizeof(l5_data));
2064 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2065 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2066 return ret;
2067}
2068
2069static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2070{
2071 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2072 union l5cm_specific_data l5_data;
2073 int ret;
2074
2075 memset(&l5_data, 0, sizeof(l5_data));
2076 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2077 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2078 return ret;
2079}
2080static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2081{
2082 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2083 struct l4_kcq kcqe;
2084 struct kcqe *cqes[1];
2085
2086 memset(&kcqe, 0, sizeof(kcqe));
2087 kcqe.pg_host_opaque = req->host_opaque;
2088 kcqe.pg_cid = req->host_opaque;
2089 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2090 cqes[0] = (struct kcqe *) &kcqe;
2091 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2092 return 0;
2093}
2094
2095static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2096{
2097 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2098 struct l4_kcq kcqe;
2099 struct kcqe *cqes[1];
2100
2101 memset(&kcqe, 0, sizeof(kcqe));
2102 kcqe.pg_host_opaque = req->pg_host_opaque;
2103 kcqe.pg_cid = req->pg_cid;
2104 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2105 cqes[0] = (struct kcqe *) &kcqe;
2106 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2107 return 0;
2108}
2109
2110static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2111 u32 num_wqes)
2112{
2113 int i, work, ret;
2114 u32 opcode;
2115 struct kwqe *kwqe;
2116
2117 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2118 return -EAGAIN; /* bnx2 is down */
2119
2120 for (i = 0; i < num_wqes; ) {
2121 kwqe = wqes[i];
2122 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2123 work = 1;
2124
2125 switch (opcode) {
2126 case ISCSI_KWQE_OPCODE_INIT1:
2127 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2128 break;
2129 case ISCSI_KWQE_OPCODE_INIT2:
2130 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2131 break;
2132 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2133 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2134 num_wqes - i, &work);
2135 break;
2136 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2137 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2138 break;
2139 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2140 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2141 break;
2142 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2143 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2144 &work);
2145 break;
2146 case L4_KWQE_OPCODE_VALUE_CLOSE:
2147 ret = cnic_bnx2x_close(dev, kwqe);
2148 break;
2149 case L4_KWQE_OPCODE_VALUE_RESET:
2150 ret = cnic_bnx2x_reset(dev, kwqe);
2151 break;
2152 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2153 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2154 break;
2155 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2156 ret = cnic_bnx2x_update_pg(dev, kwqe);
2157 break;
2158 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2159 ret = 0;
2160 break;
2161 default:
2162 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002163 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2164 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002165 break;
2166 }
2167 if (ret < 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00002168 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2169 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002170 i += work;
2171 }
2172 return 0;
2173}
2174
Michael Chana4636962009-06-08 18:14:43 -07002175static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2176{
2177 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002178 int i, j, comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002179
2180 i = 0;
2181 j = 1;
2182 while (num_cqes) {
2183 struct cnic_ulp_ops *ulp_ops;
2184 int ulp_type;
2185 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2186 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
2187
2188 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002189 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002190
2191 while (j < num_cqes) {
2192 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2193
2194 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
2195 break;
2196
2197 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002198 comp++;
Michael Chana4636962009-06-08 18:14:43 -07002199 j++;
2200 }
2201
2202 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2203 ulp_type = CNIC_ULP_RDMA;
2204 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2205 ulp_type = CNIC_ULP_ISCSI;
2206 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2207 ulp_type = CNIC_ULP_L4;
2208 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2209 goto end;
2210 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002211 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2212 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002213 goto end;
2214 }
2215
2216 rcu_read_lock();
2217 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2218 if (likely(ulp_ops)) {
2219 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2220 cp->completed_kcq + i, j);
2221 }
2222 rcu_read_unlock();
2223end:
2224 num_cqes -= j;
2225 i += j;
2226 j = 1;
2227 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00002228 if (unlikely(comp))
2229 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
Michael Chana4636962009-06-08 18:14:43 -07002230}
2231
2232static u16 cnic_bnx2_next_idx(u16 idx)
2233{
2234 return idx + 1;
2235}
2236
2237static u16 cnic_bnx2_hw_idx(u16 idx)
2238{
2239 return idx;
2240}
2241
Michael Chan71034ba2009-10-10 13:46:59 +00002242static u16 cnic_bnx2x_next_idx(u16 idx)
2243{
2244 idx++;
2245 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2246 idx++;
2247
2248 return idx;
2249}
2250
2251static u16 cnic_bnx2x_hw_idx(u16 idx)
2252{
2253 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2254 idx++;
2255 return idx;
2256}
2257
Michael Chan644b9d42010-06-24 14:58:40 +00002258static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
Michael Chana4636962009-06-08 18:14:43 -07002259{
2260 struct cnic_local *cp = dev->cnic_priv;
Michael Chan644b9d42010-06-24 14:58:40 +00002261 u16 i, ri, hw_prod, last;
Michael Chana4636962009-06-08 18:14:43 -07002262 struct kcqe *kcqe;
2263 int kcqe_cnt = 0, last_cnt = 0;
2264
Michael Chan644b9d42010-06-24 14:58:40 +00002265 i = ri = last = info->sw_prod_idx;
Michael Chana4636962009-06-08 18:14:43 -07002266 ri &= MAX_KCQ_IDX;
Michael Chan644b9d42010-06-24 14:58:40 +00002267 hw_prod = *info->hw_prod_idx_ptr;
2268 hw_prod = cp->hw_idx(hw_prod);
Michael Chana4636962009-06-08 18:14:43 -07002269
2270 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
Michael Chan644b9d42010-06-24 14:58:40 +00002271 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
Michael Chana4636962009-06-08 18:14:43 -07002272 cp->completed_kcq[kcqe_cnt++] = kcqe;
2273 i = cp->next_idx(i);
2274 ri = i & MAX_KCQ_IDX;
2275 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2276 last_cnt = kcqe_cnt;
2277 last = i;
2278 }
2279 }
2280
Michael Chan644b9d42010-06-24 14:58:40 +00002281 info->sw_prod_idx = last;
Michael Chana4636962009-06-08 18:14:43 -07002282 return last_cnt;
2283}
2284
Michael Chan48f753d2010-05-18 11:32:53 +00002285static int cnic_l2_completion(struct cnic_local *cp)
2286{
2287 u16 hw_cons, sw_cons;
Michael Chancd801532010-10-13 14:06:49 +00002288 struct cnic_uio_dev *udev = cp->udev;
Michael Chan48f753d2010-05-18 11:32:53 +00002289 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
Michael Chancd801532010-10-13 14:06:49 +00002290 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
Michael Chan48f753d2010-05-18 11:32:53 +00002291 u32 cmd;
2292 int comp = 0;
2293
2294 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2295 return 0;
2296
2297 hw_cons = *cp->rx_cons_ptr;
2298 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2299 hw_cons++;
2300
2301 sw_cons = cp->rx_cons;
2302 while (sw_cons != hw_cons) {
2303 u8 cqe_fp_flags;
2304
2305 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2306 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2307 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2308 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2309 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2310 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2311 cmd == RAMROD_CMD_ID_ETH_HALT)
2312 comp++;
2313 }
2314 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2315 }
2316 return comp;
2317}
2318
Michael Chan86b53602009-10-10 13:46:57 +00002319static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002320{
Michael Chan541a7812010-10-06 03:17:22 +00002321 u16 rx_cons, tx_cons;
Michael Chan48f753d2010-05-18 11:32:53 +00002322 int comp = 0;
Michael Chana4636962009-06-08 18:14:43 -07002323
Michael Chan541a7812010-10-06 03:17:22 +00002324 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
Michael Chan66fee9e2010-06-24 14:58:38 +00002325 return;
2326
Michael Chan541a7812010-10-06 03:17:22 +00002327 rx_cons = *cp->rx_cons_ptr;
2328 tx_cons = *cp->tx_cons_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002329 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
Michael Chan48f753d2010-05-18 11:32:53 +00002330 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2331 comp = cnic_l2_completion(cp);
2332
Michael Chana4636962009-06-08 18:14:43 -07002333 cp->tx_cons = tx_cons;
2334 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002335
Michael Chancd801532010-10-13 14:06:49 +00002336 if (cp->udev)
2337 uio_event_notify(&cp->udev->cnic_uinfo);
Michael Chana4636962009-06-08 18:14:43 -07002338 }
Michael Chan48f753d2010-05-18 11:32:53 +00002339 if (comp)
2340 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07002341}
2342
Michael Chanb177a5d52010-06-24 14:58:41 +00002343static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
Michael Chana4636962009-06-08 18:14:43 -07002344{
Michael Chana4636962009-06-08 18:14:43 -07002345 struct cnic_local *cp = dev->cnic_priv;
Michael Chanb177a5d52010-06-24 14:58:41 +00002346 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002347 int kcqe_cnt;
2348
Michael Chana4636962009-06-08 18:14:43 -07002349 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2350
Michael Chan644b9d42010-06-24 14:58:40 +00002351 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
Michael Chana4636962009-06-08 18:14:43 -07002352
2353 service_kcqes(dev, kcqe_cnt);
2354
2355 /* Tell compiler that status_blk fields can change. */
2356 barrier();
Michael Chan644b9d42010-06-24 14:58:40 +00002357 if (status_idx != *cp->kcq1.status_idx_ptr) {
Michael Chanb177a5d52010-06-24 14:58:41 +00002358 status_idx = (u16) *cp->kcq1.status_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002359 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
Michael Chana4636962009-06-08 18:14:43 -07002360 } else
2361 break;
2362 }
2363
Michael Chan644b9d42010-06-24 14:58:40 +00002364 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
Michael Chana4636962009-06-08 18:14:43 -07002365
Michael Chan86b53602009-10-10 13:46:57 +00002366 cnic_chk_pkt_rings(cp);
Michael Chanb177a5d52010-06-24 14:58:41 +00002367
Michael Chana4636962009-06-08 18:14:43 -07002368 return status_idx;
2369}
2370
Michael Chanb177a5d52010-06-24 14:58:41 +00002371static int cnic_service_bnx2(void *data, void *status_blk)
2372{
2373 struct cnic_dev *dev = data;
Michael Chanb177a5d52010-06-24 14:58:41 +00002374
Michael Chaneaaa6e92010-12-23 08:38:30 +00002375 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2376 struct status_block *sblk = status_blk;
2377
2378 return sblk->status_idx;
2379 }
Michael Chanb177a5d52010-06-24 14:58:41 +00002380
2381 return cnic_service_bnx2_queues(dev);
2382}
2383
Michael Chana4636962009-06-08 18:14:43 -07002384static void cnic_service_bnx2_msix(unsigned long data)
2385{
2386 struct cnic_dev *dev = (struct cnic_dev *) data;
2387 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07002388
Michael Chanb177a5d52010-06-24 14:58:41 +00002389 cp->last_status_idx = cnic_service_bnx2_queues(dev);
Michael Chana4636962009-06-08 18:14:43 -07002390
Michael Chana4636962009-06-08 18:14:43 -07002391 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2393}
2394
Michael Chan66fee9e2010-06-24 14:58:38 +00002395static void cnic_doirq(struct cnic_dev *dev)
2396{
2397 struct cnic_local *cp = dev->cnic_priv;
Michael Chan66fee9e2010-06-24 14:58:38 +00002398
2399 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
Michael Chaneaaa6e92010-12-23 08:38:30 +00002400 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2401
Michael Chan66fee9e2010-06-24 14:58:38 +00002402 prefetch(cp->status_blk.gen);
Michael Chane6c28892010-06-24 14:58:39 +00002403 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
Michael Chan66fee9e2010-06-24 14:58:38 +00002404
2405 tasklet_schedule(&cp->cnic_irq_task);
2406 }
2407}
2408
Michael Chana4636962009-06-08 18:14:43 -07002409static irqreturn_t cnic_irq(int irq, void *dev_instance)
2410{
2411 struct cnic_dev *dev = dev_instance;
2412 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -07002413
2414 if (cp->ack_int)
2415 cp->ack_int(dev);
2416
Michael Chan66fee9e2010-06-24 14:58:38 +00002417 cnic_doirq(dev);
Michael Chana4636962009-06-08 18:14:43 -07002418
2419 return IRQ_HANDLED;
2420}
2421
Michael Chan71034ba2009-10-10 13:46:59 +00002422static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2423 u16 index, u8 op, u8 update)
2424{
2425 struct cnic_local *cp = dev->cnic_priv;
2426 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2427 COMMAND_REG_INT_ACK);
2428 struct igu_ack_register igu_ack;
2429
2430 igu_ack.status_block_index = index;
2431 igu_ack.sb_id_and_flags =
2432 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2433 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2434 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2435 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2436
2437 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2438}
2439
Michael Chanee87a822010-10-13 14:06:51 +00002440static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2441 u16 index, u8 op, u8 update)
2442{
2443 struct igu_regular cmd_data;
2444 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2445
2446 cmd_data.sb_id_and_flags =
2447 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2448 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2449 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2450 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2451
2452
2453 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2454}
2455
Michael Chan71034ba2009-10-10 13:46:59 +00002456static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2457{
2458 struct cnic_local *cp = dev->cnic_priv;
2459
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002460 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
Michael Chan71034ba2009-10-10 13:46:59 +00002461 IGU_INT_DISABLE, 0);
2462}
2463
Michael Chanee87a822010-10-13 14:06:51 +00002464static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2465{
2466 struct cnic_local *cp = dev->cnic_priv;
2467
2468 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2469 IGU_INT_DISABLE, 0);
2470}
2471
Michael Chanb177a5d52010-06-24 14:58:41 +00002472static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
Michael Chan71034ba2009-10-10 13:46:59 +00002473{
Michael Chanb177a5d52010-06-24 14:58:41 +00002474 u32 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00002475 int kcqe_cnt;
2476
Michael Chanb177a5d52010-06-24 14:58:41 +00002477 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
Michael Chan71034ba2009-10-10 13:46:59 +00002478
2479 service_kcqes(dev, kcqe_cnt);
2480
2481 /* Tell compiler that sblk fields can change. */
2482 barrier();
Michael Chanb177a5d52010-06-24 14:58:41 +00002483 if (last_status == *info->status_idx_ptr)
Michael Chan71034ba2009-10-10 13:46:59 +00002484 break;
2485
Michael Chanb177a5d52010-06-24 14:58:41 +00002486 last_status = *info->status_idx_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00002487 }
Michael Chanb177a5d52010-06-24 14:58:41 +00002488 return last_status;
2489}
2490
2491static void cnic_service_bnx2x_bh(unsigned long data)
2492{
2493 struct cnic_dev *dev = (struct cnic_dev *) data;
2494 struct cnic_local *cp = dev->cnic_priv;
2495 u32 status_idx;
2496
2497 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2498 return;
2499
2500 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
Michael Chan71034ba2009-10-10 13:46:59 +00002501
Michael Chan644b9d42010-06-24 14:58:40 +00002502 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
Michael Chane21ba412010-12-23 07:43:03 +00002503
2504 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
2505 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2506
2507 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2508 MAX_KCQ_IDX);
2509
Michael Chanee87a822010-10-13 14:06:51 +00002510 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2511 status_idx, IGU_INT_ENABLE, 1);
Michael Chane21ba412010-12-23 07:43:03 +00002512 } else {
Michael Chanee87a822010-10-13 14:06:51 +00002513 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2514 status_idx, IGU_INT_ENABLE, 1);
Michael Chane21ba412010-12-23 07:43:03 +00002515 }
Michael Chan71034ba2009-10-10 13:46:59 +00002516}
2517
2518static int cnic_service_bnx2x(void *data, void *status_blk)
2519{
2520 struct cnic_dev *dev = data;
2521 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00002522
Michael Chan66fee9e2010-06-24 14:58:38 +00002523 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2524 cnic_doirq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00002525
Michael Chan66fee9e2010-06-24 14:58:38 +00002526 cnic_chk_pkt_rings(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00002527
2528 return 0;
2529}
2530
Michael Chana4636962009-06-08 18:14:43 -07002531static void cnic_ulp_stop(struct cnic_dev *dev)
2532{
2533 struct cnic_local *cp = dev->cnic_priv;
2534 int if_type;
2535
Michael Chancd801532010-10-13 14:06:49 +00002536 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
Michael Chan6d7760a2009-07-27 11:25:58 -07002537
Michael Chana4636962009-06-08 18:14:43 -07002538 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2539 struct cnic_ulp_ops *ulp_ops;
2540
Michael Chan681dbd72009-08-14 15:49:46 +00002541 mutex_lock(&cnic_lock);
2542 ulp_ops = cp->ulp_ops[if_type];
2543 if (!ulp_ops) {
2544 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002545 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002546 }
2547 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2548 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002549
2550 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2551 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002552
2553 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002554 }
Michael Chana4636962009-06-08 18:14:43 -07002555}
2556
2557static void cnic_ulp_start(struct cnic_dev *dev)
2558{
2559 struct cnic_local *cp = dev->cnic_priv;
2560 int if_type;
2561
Michael Chana4636962009-06-08 18:14:43 -07002562 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2563 struct cnic_ulp_ops *ulp_ops;
2564
Michael Chan681dbd72009-08-14 15:49:46 +00002565 mutex_lock(&cnic_lock);
2566 ulp_ops = cp->ulp_ops[if_type];
2567 if (!ulp_ops || !ulp_ops->cnic_start) {
2568 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002569 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002570 }
2571 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2572 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002573
2574 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2575 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002576
2577 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002578 }
Michael Chana4636962009-06-08 18:14:43 -07002579}
2580
2581static int cnic_ctl(void *data, struct cnic_ctl_info *info)
2582{
2583 struct cnic_dev *dev = data;
2584
2585 switch (info->cmd) {
2586 case CNIC_CTL_STOP_CMD:
2587 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07002588
2589 cnic_ulp_stop(dev);
2590 cnic_stop_hw(dev);
2591
Michael Chana4636962009-06-08 18:14:43 -07002592 cnic_put(dev);
2593 break;
2594 case CNIC_CTL_START_CMD:
2595 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07002596
2597 if (!cnic_start_hw(dev))
2598 cnic_ulp_start(dev);
2599
Michael Chana4636962009-06-08 18:14:43 -07002600 cnic_put(dev);
2601 break;
Michael Chan71034ba2009-10-10 13:46:59 +00002602 case CNIC_CTL_COMPLETION_CMD: {
2603 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2604 u32 l5_cid;
2605 struct cnic_local *cp = dev->cnic_priv;
2606
2607 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2608 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2609
2610 ctx->wait_cond = 1;
2611 wake_up(&ctx->waitq);
2612 }
2613 break;
2614 }
Michael Chana4636962009-06-08 18:14:43 -07002615 default:
2616 return -EINVAL;
2617 }
2618 return 0;
2619}
2620
2621static void cnic_ulp_init(struct cnic_dev *dev)
2622{
2623 int i;
2624 struct cnic_local *cp = dev->cnic_priv;
2625
Michael Chana4636962009-06-08 18:14:43 -07002626 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2627 struct cnic_ulp_ops *ulp_ops;
2628
Michael Chan7fc1ece2009-08-14 15:49:47 +00002629 mutex_lock(&cnic_lock);
2630 ulp_ops = cnic_ulp_tbl[i];
2631 if (!ulp_ops || !ulp_ops->cnic_init) {
2632 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002633 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00002634 }
2635 ulp_get(ulp_ops);
2636 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002637
2638 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2639 ulp_ops->cnic_init(dev);
2640
Michael Chan7fc1ece2009-08-14 15:49:47 +00002641 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07002642 }
Michael Chana4636962009-06-08 18:14:43 -07002643}
2644
2645static void cnic_ulp_exit(struct cnic_dev *dev)
2646{
2647 int i;
2648 struct cnic_local *cp = dev->cnic_priv;
2649
Michael Chana4636962009-06-08 18:14:43 -07002650 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2651 struct cnic_ulp_ops *ulp_ops;
2652
Michael Chan7fc1ece2009-08-14 15:49:47 +00002653 mutex_lock(&cnic_lock);
2654 ulp_ops = cnic_ulp_tbl[i];
2655 if (!ulp_ops || !ulp_ops->cnic_exit) {
2656 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002657 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00002658 }
2659 ulp_get(ulp_ops);
2660 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002661
2662 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2663 ulp_ops->cnic_exit(dev);
2664
Michael Chan7fc1ece2009-08-14 15:49:47 +00002665 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07002666 }
Michael Chana4636962009-06-08 18:14:43 -07002667}
2668
2669static int cnic_cm_offload_pg(struct cnic_sock *csk)
2670{
2671 struct cnic_dev *dev = csk->dev;
2672 struct l4_kwq_offload_pg *l4kwqe;
2673 struct kwqe *wqes[1];
2674
2675 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
2676 memset(l4kwqe, 0, sizeof(*l4kwqe));
2677 wqes[0] = (struct kwqe *) l4kwqe;
2678
2679 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
2680 l4kwqe->flags =
2681 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
2682 l4kwqe->l2hdr_nbytes = ETH_HLEN;
2683
2684 l4kwqe->da0 = csk->ha[0];
2685 l4kwqe->da1 = csk->ha[1];
2686 l4kwqe->da2 = csk->ha[2];
2687 l4kwqe->da3 = csk->ha[3];
2688 l4kwqe->da4 = csk->ha[4];
2689 l4kwqe->da5 = csk->ha[5];
2690
2691 l4kwqe->sa0 = dev->mac_addr[0];
2692 l4kwqe->sa1 = dev->mac_addr[1];
2693 l4kwqe->sa2 = dev->mac_addr[2];
2694 l4kwqe->sa3 = dev->mac_addr[3];
2695 l4kwqe->sa4 = dev->mac_addr[4];
2696 l4kwqe->sa5 = dev->mac_addr[5];
2697
2698 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00002699 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07002700 l4kwqe->host_opaque = csk->l5_cid;
2701
2702 if (csk->vlan_id) {
2703 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
2704 l4kwqe->vlan_tag = csk->vlan_id;
2705 l4kwqe->l2hdr_nbytes += 4;
2706 }
2707
2708 return dev->submit_kwqes(dev, wqes, 1);
2709}
2710
2711static int cnic_cm_update_pg(struct cnic_sock *csk)
2712{
2713 struct cnic_dev *dev = csk->dev;
2714 struct l4_kwq_update_pg *l4kwqe;
2715 struct kwqe *wqes[1];
2716
2717 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
2718 memset(l4kwqe, 0, sizeof(*l4kwqe));
2719 wqes[0] = (struct kwqe *) l4kwqe;
2720
2721 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
2722 l4kwqe->flags =
2723 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
2724 l4kwqe->pg_cid = csk->pg_cid;
2725
2726 l4kwqe->da0 = csk->ha[0];
2727 l4kwqe->da1 = csk->ha[1];
2728 l4kwqe->da2 = csk->ha[2];
2729 l4kwqe->da3 = csk->ha[3];
2730 l4kwqe->da4 = csk->ha[4];
2731 l4kwqe->da5 = csk->ha[5];
2732
2733 l4kwqe->pg_host_opaque = csk->l5_cid;
2734 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
2735
2736 return dev->submit_kwqes(dev, wqes, 1);
2737}
2738
2739static int cnic_cm_upload_pg(struct cnic_sock *csk)
2740{
2741 struct cnic_dev *dev = csk->dev;
2742 struct l4_kwq_upload *l4kwqe;
2743 struct kwqe *wqes[1];
2744
2745 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
2746 memset(l4kwqe, 0, sizeof(*l4kwqe));
2747 wqes[0] = (struct kwqe *) l4kwqe;
2748
2749 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
2750 l4kwqe->flags =
2751 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
2752 l4kwqe->cid = csk->pg_cid;
2753
2754 return dev->submit_kwqes(dev, wqes, 1);
2755}
2756
2757static int cnic_cm_conn_req(struct cnic_sock *csk)
2758{
2759 struct cnic_dev *dev = csk->dev;
2760 struct l4_kwq_connect_req1 *l4kwqe1;
2761 struct l4_kwq_connect_req2 *l4kwqe2;
2762 struct l4_kwq_connect_req3 *l4kwqe3;
2763 struct kwqe *wqes[3];
2764 u8 tcp_flags = 0;
2765 int num_wqes = 2;
2766
2767 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
2768 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
2769 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
2770 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
2771 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
2772 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
2773
2774 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
2775 l4kwqe3->flags =
2776 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
2777 l4kwqe3->ka_timeout = csk->ka_timeout;
2778 l4kwqe3->ka_interval = csk->ka_interval;
2779 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
2780 l4kwqe3->tos = csk->tos;
2781 l4kwqe3->ttl = csk->ttl;
2782 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
2783 l4kwqe3->pmtu = csk->mtu;
2784 l4kwqe3->rcv_buf = csk->rcv_buf;
2785 l4kwqe3->snd_buf = csk->snd_buf;
2786 l4kwqe3->seed = csk->seed;
2787
2788 wqes[0] = (struct kwqe *) l4kwqe1;
2789 if (test_bit(SK_F_IPV6, &csk->flags)) {
2790 wqes[1] = (struct kwqe *) l4kwqe2;
2791 wqes[2] = (struct kwqe *) l4kwqe3;
2792 num_wqes = 3;
2793
2794 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
2795 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
2796 l4kwqe2->flags =
2797 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
2798 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
2799 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
2800 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
2801 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
2802 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
2803 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
2804 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
2805 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
2806 sizeof(struct tcphdr);
2807 } else {
2808 wqes[1] = (struct kwqe *) l4kwqe3;
2809 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
2810 sizeof(struct tcphdr);
2811 }
2812
2813 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
2814 l4kwqe1->flags =
2815 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
2816 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
2817 l4kwqe1->cid = csk->cid;
2818 l4kwqe1->pg_cid = csk->pg_cid;
2819 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
2820 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
2821 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
2822 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
2823 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
2824 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
2825 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
2826 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
2827 if (csk->tcp_flags & SK_TCP_NAGLE)
2828 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
2829 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
2830 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
2831 if (csk->tcp_flags & SK_TCP_SACK)
2832 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
2833 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
2834 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
2835
2836 l4kwqe1->tcp_flags = tcp_flags;
2837
2838 return dev->submit_kwqes(dev, wqes, num_wqes);
2839}
2840
2841static int cnic_cm_close_req(struct cnic_sock *csk)
2842{
2843 struct cnic_dev *dev = csk->dev;
2844 struct l4_kwq_close_req *l4kwqe;
2845 struct kwqe *wqes[1];
2846
2847 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
2848 memset(l4kwqe, 0, sizeof(*l4kwqe));
2849 wqes[0] = (struct kwqe *) l4kwqe;
2850
2851 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
2852 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
2853 l4kwqe->cid = csk->cid;
2854
2855 return dev->submit_kwqes(dev, wqes, 1);
2856}
2857
2858static int cnic_cm_abort_req(struct cnic_sock *csk)
2859{
2860 struct cnic_dev *dev = csk->dev;
2861 struct l4_kwq_reset_req *l4kwqe;
2862 struct kwqe *wqes[1];
2863
2864 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
2865 memset(l4kwqe, 0, sizeof(*l4kwqe));
2866 wqes[0] = (struct kwqe *) l4kwqe;
2867
2868 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
2869 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
2870 l4kwqe->cid = csk->cid;
2871
2872 return dev->submit_kwqes(dev, wqes, 1);
2873}
2874
2875static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
2876 u32 l5_cid, struct cnic_sock **csk, void *context)
2877{
2878 struct cnic_local *cp = dev->cnic_priv;
2879 struct cnic_sock *csk1;
2880
2881 if (l5_cid >= MAX_CM_SK_TBL_SZ)
2882 return -EINVAL;
2883
Michael Chanfdf24082010-10-13 14:06:47 +00002884 if (cp->ctx_tbl) {
2885 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2886
2887 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2888 return -EAGAIN;
2889 }
2890
Michael Chana4636962009-06-08 18:14:43 -07002891 csk1 = &cp->csk_tbl[l5_cid];
2892 if (atomic_read(&csk1->ref_count))
2893 return -EAGAIN;
2894
2895 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
2896 return -EBUSY;
2897
2898 csk1->dev = dev;
2899 csk1->cid = cid;
2900 csk1->l5_cid = l5_cid;
2901 csk1->ulp_type = ulp_type;
2902 csk1->context = context;
2903
2904 csk1->ka_timeout = DEF_KA_TIMEOUT;
2905 csk1->ka_interval = DEF_KA_INTERVAL;
2906 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
2907 csk1->tos = DEF_TOS;
2908 csk1->ttl = DEF_TTL;
2909 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
2910 csk1->rcv_buf = DEF_RCV_BUF;
2911 csk1->snd_buf = DEF_SND_BUF;
2912 csk1->seed = DEF_SEED;
2913
2914 *csk = csk1;
2915 return 0;
2916}
2917
2918static void cnic_cm_cleanup(struct cnic_sock *csk)
2919{
2920 if (csk->src_port) {
2921 struct cnic_dev *dev = csk->dev;
2922 struct cnic_local *cp = dev->cnic_priv;
2923
Michael Chan9b093362010-12-23 07:42:56 +00002924 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
Michael Chana4636962009-06-08 18:14:43 -07002925 csk->src_port = 0;
2926 }
2927}
2928
2929static void cnic_close_conn(struct cnic_sock *csk)
2930{
2931 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
2932 cnic_cm_upload_pg(csk);
2933 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
2934 }
2935 cnic_cm_cleanup(csk);
2936}
2937
2938static int cnic_cm_destroy(struct cnic_sock *csk)
2939{
2940 if (!cnic_in_use(csk))
2941 return -EINVAL;
2942
2943 csk_hold(csk);
2944 clear_bit(SK_F_INUSE, &csk->flags);
2945 smp_mb__after_clear_bit();
2946 while (atomic_read(&csk->ref_count) != 1)
2947 msleep(1);
2948 cnic_cm_cleanup(csk);
2949
2950 csk->flags = 0;
2951 csk_put(csk);
2952 return 0;
2953}
2954
2955static inline u16 cnic_get_vlan(struct net_device *dev,
2956 struct net_device **vlan_dev)
2957{
2958 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2959 *vlan_dev = vlan_dev_real_dev(dev);
2960 return vlan_dev_vlan_id(dev);
2961 }
2962 *vlan_dev = dev;
2963 return 0;
2964}
2965
2966static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2967 struct dst_entry **dst)
2968{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002969#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07002970 struct flowi fl;
2971 int err;
2972 struct rtable *rt;
2973
2974 memset(&fl, 0, sizeof(fl));
2975 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
2976
2977 err = ip_route_output_key(&init_net, &rt, &fl);
2978 if (!err)
Changli Gaod8d1f302010-06-10 23:31:35 -07002979 *dst = &rt->dst;
Michael Chana4636962009-06-08 18:14:43 -07002980 return err;
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002981#else
2982 return -ENETUNREACH;
2983#endif
Michael Chana4636962009-06-08 18:14:43 -07002984}
2985
2986static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
2987 struct dst_entry **dst)
2988{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002989#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
Michael Chana4636962009-06-08 18:14:43 -07002990 struct flowi fl;
2991
2992 memset(&fl, 0, sizeof(fl));
2993 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
2994 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
2995 fl.oif = dst_addr->sin6_scope_id;
2996
2997 *dst = ip6_route_output(&init_net, NULL, &fl);
2998 if (*dst)
2999 return 0;
3000#endif
3001
3002 return -ENETUNREACH;
3003}
3004
3005static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3006 int ulp_type)
3007{
3008 struct cnic_dev *dev = NULL;
3009 struct dst_entry *dst;
3010 struct net_device *netdev = NULL;
3011 int err = -ENETUNREACH;
3012
3013 if (dst_addr->sin_family == AF_INET)
3014 err = cnic_get_v4_route(dst_addr, &dst);
3015 else if (dst_addr->sin_family == AF_INET6) {
3016 struct sockaddr_in6 *dst_addr6 =
3017 (struct sockaddr_in6 *) dst_addr;
3018
3019 err = cnic_get_v6_route(dst_addr6, &dst);
3020 } else
3021 return NULL;
3022
3023 if (err)
3024 return NULL;
3025
3026 if (!dst->dev)
3027 goto done;
3028
3029 cnic_get_vlan(dst->dev, &netdev);
3030
3031 dev = cnic_from_netdev(netdev);
3032
3033done:
3034 dst_release(dst);
3035 if (dev)
3036 cnic_put(dev);
3037 return dev;
3038}
3039
3040static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3041{
3042 struct cnic_dev *dev = csk->dev;
3043 struct cnic_local *cp = dev->cnic_priv;
3044
3045 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3046}
3047
3048static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3049{
3050 struct cnic_dev *dev = csk->dev;
3051 struct cnic_local *cp = dev->cnic_priv;
Michael Chanc76284a2010-02-24 14:42:07 +00003052 int is_v6, rc = 0;
3053 struct dst_entry *dst = NULL;
Michael Chana4636962009-06-08 18:14:43 -07003054 struct net_device *realdev;
Michael Chan9b093362010-12-23 07:42:56 +00003055 __be16 local_port;
3056 u32 port_id;
Michael Chana4636962009-06-08 18:14:43 -07003057
3058 if (saddr->local.v6.sin6_family == AF_INET6 &&
3059 saddr->remote.v6.sin6_family == AF_INET6)
3060 is_v6 = 1;
3061 else if (saddr->local.v4.sin_family == AF_INET &&
3062 saddr->remote.v4.sin_family == AF_INET)
3063 is_v6 = 0;
3064 else
3065 return -EINVAL;
3066
3067 clear_bit(SK_F_IPV6, &csk->flags);
3068
3069 if (is_v6) {
Michael Chana4636962009-06-08 18:14:43 -07003070 set_bit(SK_F_IPV6, &csk->flags);
Michael Chanc76284a2010-02-24 14:42:07 +00003071 cnic_get_v6_route(&saddr->remote.v6, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003072
3073 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3074 sizeof(struct in6_addr));
3075 csk->dst_port = saddr->remote.v6.sin6_port;
3076 local_port = saddr->local.v6.sin6_port;
Michael Chana4636962009-06-08 18:14:43 -07003077
3078 } else {
Michael Chanc76284a2010-02-24 14:42:07 +00003079 cnic_get_v4_route(&saddr->remote.v4, &dst);
Michael Chana4636962009-06-08 18:14:43 -07003080
3081 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3082 csk->dst_port = saddr->remote.v4.sin_port;
3083 local_port = saddr->local.v4.sin_port;
3084 }
3085
Michael Chanc76284a2010-02-24 14:42:07 +00003086 csk->vlan_id = 0;
3087 csk->mtu = dev->netdev->mtu;
3088 if (dst && dst->dev) {
3089 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3090 if (realdev == dev->netdev) {
3091 csk->vlan_id = vlan;
3092 csk->mtu = dst_mtu(dst);
3093 }
3094 }
Michael Chana4636962009-06-08 18:14:43 -07003095
Michael Chan9b093362010-12-23 07:42:56 +00003096 port_id = be16_to_cpu(local_port);
3097 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3098 port_id < CNIC_LOCAL_PORT_MAX) {
3099 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3100 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003101 } else
Michael Chan9b093362010-12-23 07:42:56 +00003102 port_id = 0;
Michael Chana4636962009-06-08 18:14:43 -07003103
Michael Chan9b093362010-12-23 07:42:56 +00003104 if (!port_id) {
3105 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3106 if (port_id == -1) {
Michael Chana4636962009-06-08 18:14:43 -07003107 rc = -ENOMEM;
3108 goto err_out;
3109 }
Michael Chan9b093362010-12-23 07:42:56 +00003110 local_port = cpu_to_be16(port_id);
Michael Chana4636962009-06-08 18:14:43 -07003111 }
3112 csk->src_port = local_port;
3113
Michael Chana4636962009-06-08 18:14:43 -07003114err_out:
3115 dst_release(dst);
3116 return rc;
3117}
3118
3119static void cnic_init_csk_state(struct cnic_sock *csk)
3120{
3121 csk->state = 0;
3122 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3123 clear_bit(SK_F_CLOSING, &csk->flags);
3124}
3125
3126static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3127{
3128 int err = 0;
3129
3130 if (!cnic_in_use(csk))
3131 return -EINVAL;
3132
3133 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3134 return -EINVAL;
3135
3136 cnic_init_csk_state(csk);
3137
3138 err = cnic_get_route(csk, saddr);
3139 if (err)
3140 goto err_out;
3141
3142 err = cnic_resolve_addr(csk, saddr);
3143 if (!err)
3144 return 0;
3145
3146err_out:
3147 clear_bit(SK_F_CONNECT_START, &csk->flags);
3148 return err;
3149}
3150
3151static int cnic_cm_abort(struct cnic_sock *csk)
3152{
3153 struct cnic_local *cp = csk->dev->cnic_priv;
Michael Chan7b34a462010-06-15 08:57:03 +00003154 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
Michael Chana4636962009-06-08 18:14:43 -07003155
3156 if (!cnic_in_use(csk))
3157 return -EINVAL;
3158
3159 if (cnic_abort_prep(csk))
3160 return cnic_cm_abort_req(csk);
3161
3162 /* Getting here means that we haven't started connect, or
3163 * connect was not successful.
3164 */
3165
Michael Chana4636962009-06-08 18:14:43 -07003166 cp->close_conn(csk, opcode);
Michael Chan7b34a462010-06-15 08:57:03 +00003167 if (csk->state != opcode)
3168 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003169
3170 return 0;
3171}
3172
3173static int cnic_cm_close(struct cnic_sock *csk)
3174{
3175 if (!cnic_in_use(csk))
3176 return -EINVAL;
3177
3178 if (cnic_close_prep(csk)) {
3179 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3180 return cnic_cm_close_req(csk);
Michael Chaned99daa52010-06-15 08:57:00 +00003181 } else {
3182 return -EALREADY;
Michael Chana4636962009-06-08 18:14:43 -07003183 }
3184 return 0;
3185}
3186
3187static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3188 u8 opcode)
3189{
3190 struct cnic_ulp_ops *ulp_ops;
3191 int ulp_type = csk->ulp_type;
3192
3193 rcu_read_lock();
3194 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3195 if (ulp_ops) {
3196 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3197 ulp_ops->cm_connect_complete(csk);
3198 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3199 ulp_ops->cm_close_complete(csk);
3200 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3201 ulp_ops->cm_remote_abort(csk);
3202 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3203 ulp_ops->cm_abort_complete(csk);
3204 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3205 ulp_ops->cm_remote_close(csk);
3206 }
3207 rcu_read_unlock();
3208}
3209
3210static int cnic_cm_set_pg(struct cnic_sock *csk)
3211{
3212 if (cnic_offld_prep(csk)) {
3213 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3214 cnic_cm_update_pg(csk);
3215 else
3216 cnic_cm_offload_pg(csk);
3217 }
3218 return 0;
3219}
3220
3221static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3222{
3223 struct cnic_local *cp = dev->cnic_priv;
3224 u32 l5_cid = kcqe->pg_host_opaque;
3225 u8 opcode = kcqe->op_code;
3226 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3227
3228 csk_hold(csk);
3229 if (!cnic_in_use(csk))
3230 goto done;
3231
3232 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3233 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3234 goto done;
3235 }
Eddie Waia9736c02010-02-24 14:42:04 +00003236 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3237 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3238 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3239 cnic_cm_upcall(cp, csk,
3240 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3241 goto done;
3242 }
3243
Michael Chana4636962009-06-08 18:14:43 -07003244 csk->pg_cid = kcqe->pg_cid;
3245 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3246 cnic_cm_conn_req(csk);
3247
3248done:
3249 csk_put(csk);
3250}
3251
3252static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3253{
3254 struct cnic_local *cp = dev->cnic_priv;
3255 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3256 u8 opcode = l4kcqe->op_code;
3257 u32 l5_cid;
3258 struct cnic_sock *csk;
3259
3260 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3261 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3262 cnic_cm_process_offld_pg(dev, l4kcqe);
3263 return;
3264 }
3265
3266 l5_cid = l4kcqe->conn_id;
3267 if (opcode & 0x80)
3268 l5_cid = l4kcqe->cid;
3269 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3270 return;
3271
3272 csk = &cp->csk_tbl[l5_cid];
3273 csk_hold(csk);
3274
3275 if (!cnic_in_use(csk)) {
3276 csk_put(csk);
3277 return;
3278 }
3279
3280 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00003281 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3282 if (l4kcqe->status != 0) {
3283 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3284 cnic_cm_upcall(cp, csk,
3285 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3286 }
3287 break;
Michael Chana4636962009-06-08 18:14:43 -07003288 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3289 if (l4kcqe->status == 0)
3290 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3291
3292 smp_mb__before_clear_bit();
3293 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3294 cnic_cm_upcall(cp, csk, opcode);
3295 break;
3296
3297 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Michael Chana4636962009-06-08 18:14:43 -07003298 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3299 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00003300 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3301 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chana4636962009-06-08 18:14:43 -07003302 cp->close_conn(csk, opcode);
3303 break;
3304
3305 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3306 cnic_cm_upcall(cp, csk, opcode);
3307 break;
3308 }
3309 csk_put(csk);
3310}
3311
3312static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3313{
3314 struct cnic_dev *dev = data;
3315 int i;
3316
3317 for (i = 0; i < num; i++)
3318 cnic_cm_process_kcqe(dev, kcqe[i]);
3319}
3320
3321static struct cnic_ulp_ops cm_ulp_ops = {
3322 .indicate_kcqes = cnic_cm_indicate_kcqe,
3323};
3324
3325static void cnic_cm_free_mem(struct cnic_dev *dev)
3326{
3327 struct cnic_local *cp = dev->cnic_priv;
3328
3329 kfree(cp->csk_tbl);
3330 cp->csk_tbl = NULL;
3331 cnic_free_id_tbl(&cp->csk_port_tbl);
3332}
3333
3334static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3335{
3336 struct cnic_local *cp = dev->cnic_priv;
3337
3338 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3339 GFP_KERNEL);
3340 if (!cp->csk_tbl)
3341 return -ENOMEM;
3342
3343 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3344 CNIC_LOCAL_PORT_MIN)) {
3345 cnic_cm_free_mem(dev);
3346 return -ENOMEM;
3347 }
3348 return 0;
3349}
3350
3351static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3352{
Michael Chan943189f2010-06-15 08:57:02 +00003353 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3354 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3355 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3356 csk->state = opcode;
Michael Chana1e621b2010-06-15 08:57:01 +00003357 }
Michael Chan943189f2010-06-15 08:57:02 +00003358
3359 /* 1. If event opcode matches the expected event in csk->state
3360 * 2. If the expected event is CLOSE_COMP, we accept any event
Michael Chan7b34a462010-06-15 08:57:03 +00003361 * 3. If the expected event is 0, meaning the connection was never
3362 * never established, we accept the opcode from cm_abort.
Michael Chan943189f2010-06-15 08:57:02 +00003363 */
Michael Chan7b34a462010-06-15 08:57:03 +00003364 if (opcode == csk->state || csk->state == 0 ||
3365 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3366 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3367 if (csk->state == 0)
3368 csk->state = opcode;
Michael Chana4636962009-06-08 18:14:43 -07003369 return 1;
Michael Chan7b34a462010-06-15 08:57:03 +00003370 }
Michael Chana4636962009-06-08 18:14:43 -07003371 }
3372 return 0;
3373}
3374
3375static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3376{
3377 struct cnic_dev *dev = csk->dev;
3378 struct cnic_local *cp = dev->cnic_priv;
3379
Michael Chana1e621b2010-06-15 08:57:01 +00003380 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3381 cnic_cm_upcall(cp, csk, opcode);
3382 return;
3383 }
3384
Michael Chana4636962009-06-08 18:14:43 -07003385 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00003386 cnic_close_conn(csk);
Michael Chan7b34a462010-06-15 08:57:03 +00003387 csk->state = opcode;
Eddie Wai66883e92010-02-24 14:42:05 +00003388 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07003389}
3390
3391static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3392{
3393}
3394
3395static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3396{
3397 u32 seed;
3398
3399 get_random_bytes(&seed, 4);
3400 cnic_ctx_wr(dev, 45, 0, seed);
3401 return 0;
3402}
3403
Michael Chan71034ba2009-10-10 13:46:59 +00003404static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3405{
3406 struct cnic_dev *dev = csk->dev;
3407 struct cnic_local *cp = dev->cnic_priv;
3408 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3409 union l5cm_specific_data l5_data;
3410 u32 cmd = 0;
3411 int close_complete = 0;
3412
3413 switch (opcode) {
3414 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3415 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3416 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan7b34a462010-06-15 08:57:03 +00003417 if (cnic_ready_to_close(csk, opcode)) {
3418 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3419 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3420 else
3421 close_complete = 1;
3422 }
Michael Chan71034ba2009-10-10 13:46:59 +00003423 break;
3424 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3425 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3426 break;
3427 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3428 close_complete = 1;
3429 break;
3430 }
3431 if (cmd) {
3432 memset(&l5_data, 0, sizeof(l5_data));
3433
3434 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3435 &l5_data);
3436 } else if (close_complete) {
3437 ctx->timestamp = jiffies;
3438 cnic_close_conn(csk);
3439 cnic_cm_upcall(cp, csk, csk->state);
3440 }
3441}
3442
3443static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3444{
Michael Chanfdf24082010-10-13 14:06:47 +00003445 struct cnic_local *cp = dev->cnic_priv;
3446 int i;
3447
3448 if (!cp->ctx_tbl)
3449 return;
3450
3451 if (!netif_running(dev->netdev))
3452 return;
3453
3454 for (i = 0; i < cp->max_cid_space; i++) {
3455 struct cnic_context *ctx = &cp->ctx_tbl[i];
3456
3457 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3458 msleep(10);
3459
3460 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3461 netdev_warn(dev->netdev, "CID %x not deleted\n",
3462 ctx->cid);
3463 }
3464
3465 cancel_delayed_work(&cp->delete_task);
3466 flush_workqueue(cnic_wq);
3467
3468 if (atomic_read(&cp->iscsi_conn) != 0)
3469 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3470 atomic_read(&cp->iscsi_conn));
Michael Chan71034ba2009-10-10 13:46:59 +00003471}
3472
3473static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3474{
3475 struct cnic_local *cp = dev->cnic_priv;
Michael Chan14203982010-10-06 03:16:06 +00003476 u32 pfid = cp->pfid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003477 u32 port = CNIC_PORT(cp);
Michael Chan71034ba2009-10-10 13:46:59 +00003478
3479 cnic_init_bnx2x_mac(dev);
3480 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3481
3482 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003483 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00003484
3485 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003486 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00003487 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003488 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
Michael Chan71034ba2009-10-10 13:46:59 +00003489 DEF_MAX_DA_COUNT);
3490
3491 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003492 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
Michael Chan71034ba2009-10-10 13:46:59 +00003493 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003494 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
Michael Chan71034ba2009-10-10 13:46:59 +00003495 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003496 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
Michael Chan71034ba2009-10-10 13:46:59 +00003497 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00003498 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
Michael Chan71034ba2009-10-10 13:46:59 +00003499
Michael Chan14203982010-10-06 03:16:06 +00003500 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00003501 DEF_MAX_CWND);
3502 return 0;
3503}
3504
Michael Chanfdf24082010-10-13 14:06:47 +00003505static void cnic_delete_task(struct work_struct *work)
3506{
3507 struct cnic_local *cp;
3508 struct cnic_dev *dev;
3509 u32 i;
3510 int need_resched = 0;
3511
3512 cp = container_of(work, struct cnic_local, delete_task.work);
3513 dev = cp->dev;
3514
3515 for (i = 0; i < cp->max_cid_space; i++) {
3516 struct cnic_context *ctx = &cp->ctx_tbl[i];
3517
3518 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3519 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3520 continue;
3521
3522 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
3523 need_resched = 1;
3524 continue;
3525 }
3526
3527 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3528 continue;
3529
3530 cnic_bnx2x_destroy_ramrod(dev, i);
3531
3532 cnic_free_bnx2x_conn_resc(dev, i);
3533 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
3534 atomic_dec(&cp->iscsi_conn);
3535
3536 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
3537 }
3538
3539 if (need_resched)
3540 queue_delayed_work(cnic_wq, &cp->delete_task,
3541 msecs_to_jiffies(10));
3542
3543}
3544
Michael Chana4636962009-06-08 18:14:43 -07003545static int cnic_cm_open(struct cnic_dev *dev)
3546{
3547 struct cnic_local *cp = dev->cnic_priv;
3548 int err;
3549
3550 err = cnic_cm_alloc_mem(dev);
3551 if (err)
3552 return err;
3553
3554 err = cp->start_cm(dev);
3555
3556 if (err)
3557 goto err_out;
3558
Michael Chanfdf24082010-10-13 14:06:47 +00003559 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
3560
Michael Chana4636962009-06-08 18:14:43 -07003561 dev->cm_create = cnic_cm_create;
3562 dev->cm_destroy = cnic_cm_destroy;
3563 dev->cm_connect = cnic_cm_connect;
3564 dev->cm_abort = cnic_cm_abort;
3565 dev->cm_close = cnic_cm_close;
3566 dev->cm_select_dev = cnic_cm_select_dev;
3567
3568 cp->ulp_handle[CNIC_ULP_L4] = dev;
3569 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
3570 return 0;
3571
3572err_out:
3573 cnic_cm_free_mem(dev);
3574 return err;
3575}
3576
3577static int cnic_cm_shutdown(struct cnic_dev *dev)
3578{
3579 struct cnic_local *cp = dev->cnic_priv;
3580 int i;
3581
3582 cp->stop_cm(dev);
3583
3584 if (!cp->csk_tbl)
3585 return 0;
3586
3587 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
3588 struct cnic_sock *csk = &cp->csk_tbl[i];
3589
3590 clear_bit(SK_F_INUSE, &csk->flags);
3591 cnic_cm_cleanup(csk);
3592 }
3593 cnic_cm_free_mem(dev);
3594
3595 return 0;
3596}
3597
3598static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3599{
Michael Chana4636962009-06-08 18:14:43 -07003600 u32 cid_addr;
3601 int i;
3602
Michael Chana4636962009-06-08 18:14:43 -07003603 cid_addr = GET_CID_ADDR(cid);
3604
3605 for (i = 0; i < CTX_SIZE; i += 4)
3606 cnic_ctx_wr(dev, cid_addr, i, 0);
3607}
3608
3609static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
3610{
3611 struct cnic_local *cp = dev->cnic_priv;
3612 int ret = 0, i;
3613 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
3614
3615 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3616 return 0;
3617
3618 for (i = 0; i < cp->ctx_blks; i++) {
3619 int j;
3620 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
3621 u32 val;
3622
3623 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
3624
3625 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
3626 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
3627 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
3628 (u64) cp->ctx_arr[i].mapping >> 32);
3629 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
3630 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3631 for (j = 0; j < 10; j++) {
3632
3633 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
3634 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
3635 break;
3636 udelay(5);
3637 }
3638 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
3639 ret = -EBUSY;
3640 break;
3641 }
3642 }
3643 return ret;
3644}
3645
3646static void cnic_free_irq(struct cnic_dev *dev)
3647{
3648 struct cnic_local *cp = dev->cnic_priv;
3649 struct cnic_eth_dev *ethdev = cp->ethdev;
3650
3651 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3652 cp->disable_int_sync(dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00003653 tasklet_kill(&cp->cnic_irq_task);
Michael Chana4636962009-06-08 18:14:43 -07003654 free_irq(ethdev->irq_arr[0].vector, dev);
3655 }
3656}
3657
Michael Chan6e0dc642010-10-13 14:06:44 +00003658static int cnic_request_irq(struct cnic_dev *dev)
3659{
3660 struct cnic_local *cp = dev->cnic_priv;
3661 struct cnic_eth_dev *ethdev = cp->ethdev;
3662 int err;
3663
3664 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
3665 if (err)
3666 tasklet_disable(&cp->cnic_irq_task);
3667
3668 return err;
3669}
3670
Michael Chana4636962009-06-08 18:14:43 -07003671static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3672{
3673 struct cnic_local *cp = dev->cnic_priv;
3674 struct cnic_eth_dev *ethdev = cp->ethdev;
3675
3676 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3677 int err, i = 0;
3678 int sblk_num = cp->status_blk_num;
3679 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
3680 BNX2_HC_SB_CONFIG_1;
3681
3682 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
3683
3684 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
3685 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
3686 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
3687
Michael Chana4dde3a2010-02-24 14:42:08 +00003688 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00003689 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07003690 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00003691 err = cnic_request_irq(dev);
3692 if (err)
Michael Chana4636962009-06-08 18:14:43 -07003693 return err;
Michael Chan6e0dc642010-10-13 14:06:44 +00003694
Michael Chana4dde3a2010-02-24 14:42:08 +00003695 while (cp->status_blk.bnx2->status_completion_producer_index &&
Michael Chana4636962009-06-08 18:14:43 -07003696 i < 10) {
3697 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
3698 1 << (11 + sblk_num));
3699 udelay(10);
3700 i++;
3701 barrier();
3702 }
Michael Chana4dde3a2010-02-24 14:42:08 +00003703 if (cp->status_blk.bnx2->status_completion_producer_index) {
Michael Chana4636962009-06-08 18:14:43 -07003704 cnic_free_irq(dev);
3705 goto failed;
3706 }
3707
3708 } else {
Michael Chana4dde3a2010-02-24 14:42:08 +00003709 struct status_block *sblk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07003710 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
3711 int i = 0;
3712
3713 while (sblk->status_completion_producer_index && i < 10) {
3714 CNIC_WR(dev, BNX2_HC_COMMAND,
3715 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3716 udelay(10);
3717 i++;
3718 barrier();
3719 }
3720 if (sblk->status_completion_producer_index)
3721 goto failed;
3722
3723 }
3724 return 0;
3725
3726failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00003727 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07003728 return -EBUSY;
3729}
3730
3731static void cnic_enable_bnx2_int(struct cnic_dev *dev)
3732{
3733 struct cnic_local *cp = dev->cnic_priv;
3734 struct cnic_eth_dev *ethdev = cp->ethdev;
3735
3736 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3737 return;
3738
3739 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3740 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3741}
3742
3743static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
3744{
3745 struct cnic_local *cp = dev->cnic_priv;
3746 struct cnic_eth_dev *ethdev = cp->ethdev;
3747
3748 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3749 return;
3750
3751 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3752 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3753 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
3754 synchronize_irq(ethdev->irq_arr[0].vector);
3755}
3756
3757static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3758{
3759 struct cnic_local *cp = dev->cnic_priv;
3760 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00003761 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07003762 u32 cid_addr, tx_cid, sb_id;
3763 u32 val, offset0, offset1, offset2, offset3;
3764 int i;
3765 struct tx_bd *txbd;
Michael Chancd801532010-10-13 14:06:49 +00003766 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Michael Chana4dde3a2010-02-24 14:42:08 +00003767 struct status_block *s_blk = cp->status_blk.gen;
Michael Chana4636962009-06-08 18:14:43 -07003768
3769 sb_id = cp->status_blk_num;
3770 tx_cid = 20;
Michael Chana4636962009-06-08 18:14:43 -07003771 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3772 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00003773 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07003774
3775 tx_cid = TX_TSS_CID + sb_id - 1;
Michael Chana4636962009-06-08 18:14:43 -07003776 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3777 (TX_TSS_CID << 7));
3778 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
3779 }
3780 cp->tx_cons = *cp->tx_cons_ptr;
3781
3782 cid_addr = GET_CID_ADDR(tx_cid);
3783 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
3784 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
3785
3786 for (i = 0; i < PHY_CTX_SIZE; i += 4)
3787 cnic_ctx_wr(dev, cid_addr2, i, 0);
3788
3789 offset0 = BNX2_L2CTX_TYPE_XI;
3790 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3791 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3792 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3793 } else {
Michael Chanb58ffb42010-05-27 16:31:41 -07003794 cnic_init_context(dev, tx_cid);
3795 cnic_init_context(dev, tx_cid + 1);
3796
Michael Chana4636962009-06-08 18:14:43 -07003797 offset0 = BNX2_L2CTX_TYPE;
3798 offset1 = BNX2_L2CTX_CMD_TYPE;
3799 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3800 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3801 }
3802 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3803 cnic_ctx_wr(dev, cid_addr, offset0, val);
3804
3805 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3806 cnic_ctx_wr(dev, cid_addr, offset1, val);
3807
Michael Chancd801532010-10-13 14:06:49 +00003808 txbd = (struct tx_bd *) udev->l2_ring;
Michael Chana4636962009-06-08 18:14:43 -07003809
Michael Chancd801532010-10-13 14:06:49 +00003810 buf_map = udev->l2_buf_map;
Michael Chana4636962009-06-08 18:14:43 -07003811 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
3812 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
3813 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3814 }
Michael Chancd801532010-10-13 14:06:49 +00003815 val = (u64) ring_map >> 32;
Michael Chana4636962009-06-08 18:14:43 -07003816 cnic_ctx_wr(dev, cid_addr, offset2, val);
3817 txbd->tx_bd_haddr_hi = val;
3818
Michael Chancd801532010-10-13 14:06:49 +00003819 val = (u64) ring_map & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07003820 cnic_ctx_wr(dev, cid_addr, offset3, val);
3821 txbd->tx_bd_haddr_lo = val;
3822}
3823
3824static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3825{
3826 struct cnic_local *cp = dev->cnic_priv;
3827 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chancd801532010-10-13 14:06:49 +00003828 struct cnic_uio_dev *udev = cp->udev;
Michael Chana4636962009-06-08 18:14:43 -07003829 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3830 int i;
3831 struct rx_bd *rxbd;
Michael Chana4dde3a2010-02-24 14:42:08 +00003832 struct status_block *s_blk = cp->status_blk.gen;
Michael Chancd801532010-10-13 14:06:49 +00003833 dma_addr_t ring_map = udev->l2_ring_map;
Michael Chana4636962009-06-08 18:14:43 -07003834
3835 sb_id = cp->status_blk_num;
3836 cnic_init_context(dev, 2);
3837 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
3838 coal_reg = BNX2_HC_COMMAND;
3839 coal_val = CNIC_RD(dev, coal_reg);
3840 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chana4dde3a2010-02-24 14:42:08 +00003841 struct status_block_msix *sblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07003842
3843 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
3844 coal_reg = BNX2_HC_COALESCE_NOW;
3845 coal_val = 1 << (11 + sb_id);
3846 }
3847 i = 0;
3848 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
3849 CNIC_WR(dev, coal_reg, coal_val);
3850 udelay(10);
3851 i++;
3852 barrier();
3853 }
3854 cp->rx_cons = *cp->rx_cons_ptr;
3855
3856 cid_addr = GET_CID_ADDR(2);
3857 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3858 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3859 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
3860
3861 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07003862 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07003863 else
Michael Chand0549382009-10-28 03:41:59 -07003864 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07003865 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3866
Michael Chancd801532010-10-13 14:06:49 +00003867 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
Michael Chana4636962009-06-08 18:14:43 -07003868 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3869 dma_addr_t buf_map;
3870 int n = (i % cp->l2_rx_ring_size) + 1;
3871
Michael Chancd801532010-10-13 14:06:49 +00003872 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chana4636962009-06-08 18:14:43 -07003873 rxbd->rx_bd_len = cp->l2_single_buf_size;
3874 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3875 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
3876 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3877 }
Michael Chancd801532010-10-13 14:06:49 +00003878 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
Michael Chana4636962009-06-08 18:14:43 -07003879 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
3880 rxbd->rx_bd_haddr_hi = val;
3881
Michael Chancd801532010-10-13 14:06:49 +00003882 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
Michael Chana4636962009-06-08 18:14:43 -07003883 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
3884 rxbd->rx_bd_haddr_lo = val;
3885
3886 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
3887 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
3888}
3889
3890static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
3891{
3892 struct kwqe *wqes[1], l2kwqe;
3893
3894 memset(&l2kwqe, 0, sizeof(l2kwqe));
3895 wqes[0] = &l2kwqe;
3896 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
3897 (L2_KWQE_OPCODE_VALUE_FLUSH <<
3898 KWQE_OPCODE_SHIFT) | 2;
3899 dev->submit_kwqes(dev, wqes, 1);
3900}
3901
3902static void cnic_set_bnx2_mac(struct cnic_dev *dev)
3903{
3904 struct cnic_local *cp = dev->cnic_priv;
3905 u32 val;
3906
3907 val = cp->func << 2;
3908
3909 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
3910
3911 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3912 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
3913 dev->mac_addr[0] = (u8) (val >> 8);
3914 dev->mac_addr[1] = (u8) val;
3915
3916 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
3917
3918 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3919 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
3920 dev->mac_addr[2] = (u8) (val >> 24);
3921 dev->mac_addr[3] = (u8) (val >> 16);
3922 dev->mac_addr[4] = (u8) (val >> 8);
3923 dev->mac_addr[5] = (u8) val;
3924
3925 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
3926
3927 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
3928 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3929 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
3930
3931 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
3932 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
3933 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
3934}
3935
3936static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3937{
3938 struct cnic_local *cp = dev->cnic_priv;
3939 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chana4dde3a2010-02-24 14:42:08 +00003940 struct status_block *sblk = cp->status_blk.gen;
Michael Chane6c28892010-06-24 14:58:39 +00003941 u32 val, kcq_cid_addr, kwq_cid_addr;
Michael Chana4636962009-06-08 18:14:43 -07003942 int err;
3943
3944 cnic_set_bnx2_mac(dev);
3945
3946 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
3947 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3948 if (BCM_PAGE_BITS > 12)
3949 val |= (12 - 8) << 4;
3950 else
3951 val |= (BCM_PAGE_BITS - 8) << 4;
3952
3953 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
3954
3955 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
3956 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
3957 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
3958
3959 err = cnic_setup_5709_context(dev, 1);
3960 if (err)
3961 return err;
3962
3963 cnic_init_context(dev, KWQ_CID);
3964 cnic_init_context(dev, KCQ_CID);
3965
Michael Chane6c28892010-06-24 14:58:39 +00003966 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
Michael Chana4636962009-06-08 18:14:43 -07003967 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3968
3969 cp->max_kwq_idx = MAX_KWQ_IDX;
3970 cp->kwq_prod_idx = 0;
3971 cp->kwq_con_idx = 0;
Michael Chan1f1332a2010-05-18 11:32:52 +00003972 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
Michael Chana4636962009-06-08 18:14:43 -07003973
3974 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
3975 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
3976 else
3977 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
3978
3979 /* Initialize the kernel work queue context. */
3980 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3981 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00003982 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07003983
3984 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00003985 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07003986
3987 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00003988 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07003989
3990 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
Michael Chane6c28892010-06-24 14:58:39 +00003991 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07003992
3993 val = (u32) cp->kwq_info.pgtbl_map;
Michael Chane6c28892010-06-24 14:58:39 +00003994 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07003995
Michael Chane6c28892010-06-24 14:58:39 +00003996 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3997 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
Michael Chana4636962009-06-08 18:14:43 -07003998
Michael Chane6c28892010-06-24 14:58:39 +00003999 cp->kcq1.sw_prod_idx = 0;
4000 cp->kcq1.hw_prod_idx_ptr =
4001 (u16 *) &sblk->status_completion_producer_index;
4002
4003 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
Michael Chana4636962009-06-08 18:14:43 -07004004
4005 /* Initialize the kernel complete queue context. */
4006 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4007 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
Michael Chane6c28892010-06-24 14:58:39 +00004008 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
Michael Chana4636962009-06-08 18:14:43 -07004009
4010 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
Michael Chane6c28892010-06-24 14:58:39 +00004011 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
Michael Chana4636962009-06-08 18:14:43 -07004012
4013 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
Michael Chane6c28892010-06-24 14:58:39 +00004014 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
Michael Chana4636962009-06-08 18:14:43 -07004015
Michael Chane6c28892010-06-24 14:58:39 +00004016 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4017 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
Michael Chana4636962009-06-08 18:14:43 -07004018
Michael Chane6c28892010-06-24 14:58:39 +00004019 val = (u32) cp->kcq1.dma.pgtbl_map;
4020 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
Michael Chana4636962009-06-08 18:14:43 -07004021
4022 cp->int_num = 0;
4023 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
Michael Chane6c28892010-06-24 14:58:39 +00004024 struct status_block_msix *msblk = cp->status_blk.bnx2;
Michael Chana4636962009-06-08 18:14:43 -07004025 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07004026 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07004027
Michael Chane6c28892010-06-24 14:58:39 +00004028 cp->kcq1.hw_prod_idx_ptr =
4029 (u16 *) &msblk->status_completion_producer_index;
4030 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
Michael Chanb177a5d52010-06-24 14:58:41 +00004031 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
Michael Chana4636962009-06-08 18:14:43 -07004032 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
Michael Chane6c28892010-06-24 14:58:39 +00004033 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4034 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
Michael Chana4636962009-06-08 18:14:43 -07004035 }
4036
4037 /* Enable Commnad Scheduler notification when we write to the
4038 * host producer index of the kernel contexts. */
4039 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4040
4041 /* Enable Command Scheduler notification when we write to either
4042 * the Send Queue or Receive Queue producer indexes of the kernel
4043 * bypass contexts. */
4044 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4045 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4046
4047 /* Notify COM when the driver post an application buffer. */
4048 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4049
4050 /* Set the CP and COM doorbells. These two processors polls the
4051 * doorbell for a non zero value before running. This must be done
4052 * after setting up the kernel queue contexts. */
4053 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4054 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4055
4056 cnic_init_bnx2_tx_ring(dev);
4057 cnic_init_bnx2_rx_ring(dev);
4058
4059 err = cnic_init_bnx2_irq(dev);
4060 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004061 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07004062 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4063 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4064 return err;
4065 }
4066
4067 return 0;
4068}
4069
Michael Chan71034ba2009-10-10 13:46:59 +00004070static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4071{
4072 struct cnic_local *cp = dev->cnic_priv;
4073 struct cnic_eth_dev *ethdev = cp->ethdev;
4074 u32 start_offset = ethdev->ctx_tbl_offset;
4075 int i;
4076
4077 for (i = 0; i < cp->ctx_blks; i++) {
4078 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4079 dma_addr_t map = ctx->mapping;
4080
4081 if (cp->ctx_align) {
4082 unsigned long mask = cp->ctx_align - 1;
4083
4084 map = (map + mask) & ~mask;
4085 }
4086
4087 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4088 }
4089}
4090
4091static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4092{
4093 struct cnic_local *cp = dev->cnic_priv;
4094 struct cnic_eth_dev *ethdev = cp->ethdev;
4095 int err = 0;
4096
Joe Perches164165d2009-11-19 09:30:10 +00004097 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00004098 (unsigned long) dev);
Michael Chan6e0dc642010-10-13 14:06:44 +00004099 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4100 err = cnic_request_irq(dev);
4101
Michael Chan71034ba2009-10-10 13:46:59 +00004102 return err;
4103}
4104
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004105static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4106 u16 sb_id, u8 sb_index,
4107 u8 disable)
4108{
4109
4110 u32 addr = BAR_CSTRORM_INTMEM +
4111 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4112 offsetof(struct hc_status_block_data_e1x, index_data) +
4113 sizeof(struct hc_index_data)*sb_index +
4114 offsetof(struct hc_index_data, flags);
4115 u16 flags = CNIC_RD16(dev, addr);
4116 /* clear and set */
4117 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4118 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4119 HC_INDEX_DATA_HC_ENABLED);
4120 CNIC_WR16(dev, addr, flags);
4121}
4122
Michael Chan71034ba2009-10-10 13:46:59 +00004123static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4124{
4125 struct cnic_local *cp = dev->cnic_priv;
4126 u8 sb_id = cp->status_blk_num;
Michael Chan71034ba2009-10-10 13:46:59 +00004127
4128 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004129 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4130 offsetof(struct hc_status_block_data_e1x, index_data) +
4131 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4132 offsetof(struct hc_index_data, timeout), 64 / 12);
4133 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004134}
4135
4136static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4137{
4138}
4139
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004140static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4141 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004142{
4143 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004144 struct cnic_uio_dev *udev = cp->udev;
4145 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4146 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004147 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004148 int port = CNIC_PORT(cp);
4149 int i;
Michael Chan5159fdc2010-12-23 07:42:59 +00004150 u32 cli = cp->ethdev->iscsi_l2_client_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004151 u32 val;
4152
4153 memset(txbd, 0, BCM_PAGE_SIZE);
4154
Michael Chancd801532010-10-13 14:06:49 +00004155 buf_map = udev->l2_buf_map;
Michael Chan71034ba2009-10-10 13:46:59 +00004156 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4157 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4158 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4159
4160 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4161 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4162 reg_bd->addr_hi = start_bd->addr_hi;
4163 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4164 start_bd->nbytes = cpu_to_le16(0x10);
4165 start_bd->nbd = cpu_to_le16(3);
4166 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4167 start_bd->general_data = (UNICAST_ADDRESS <<
4168 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4169 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4170
4171 }
Michael Chan71034ba2009-10-10 13:46:59 +00004172
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004173 val = (u64) ring_map >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004174 txbd->next_bd.addr_hi = cpu_to_le32(val);
4175
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004176 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004177
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004178 val = (u64) ring_map & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004179 txbd->next_bd.addr_lo = cpu_to_le32(val);
4180
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004181 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004182
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004183 /* Other ramrod params */
4184 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4185 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004186
4187 /* reset xstorm per client statistics */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004188 if (cli < MAX_STAT_COUNTER_ID) {
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004189 val = BAR_XSTRORM_INTMEM +
4190 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4191 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4192 CNIC_WR(dev, val + i * 4, 0);
4193 }
Michael Chan71034ba2009-10-10 13:46:59 +00004194
4195 cp->tx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004196 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
Michael Chan71034ba2009-10-10 13:46:59 +00004197}
4198
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004199static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4200 struct client_init_ramrod_data *data)
Michael Chan71034ba2009-10-10 13:46:59 +00004201{
4202 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004203 struct cnic_uio_dev *udev = cp->udev;
4204 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
Michael Chan71034ba2009-10-10 13:46:59 +00004205 BCM_PAGE_SIZE);
4206 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
Michael Chancd801532010-10-13 14:06:49 +00004207 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004208 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
Michael Chan71034ba2009-10-10 13:46:59 +00004209 int i;
4210 int port = CNIC_PORT(cp);
Michael Chan5159fdc2010-12-23 07:42:59 +00004211 u32 cli = cp->ethdev->iscsi_l2_client_id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004212 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
Michael Chan71034ba2009-10-10 13:46:59 +00004213 u32 val;
Michael Chancd801532010-10-13 14:06:49 +00004214 dma_addr_t ring_map = udev->l2_ring_map;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004215
4216 /* General data */
4217 data->general.client_id = cli;
4218 data->general.statistics_en_flg = 1;
4219 data->general.statistics_counter_id = cli;
4220 data->general.activate_flg = 1;
4221 data->general.sp_client_id = cli;
Michael Chan71034ba2009-10-10 13:46:59 +00004222
4223 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4224 dma_addr_t buf_map;
4225 int n = (i % cp->l2_rx_ring_size) + 1;
4226
Michael Chancd801532010-10-13 14:06:49 +00004227 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004228 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4229 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4230 }
Michael Chan71034ba2009-10-10 13:46:59 +00004231
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004232 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004233 rxbd->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004234 data->rx.bd_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004235
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004236 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004237 rxbd->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004238 data->rx.bd_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004239
4240 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004241 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
Michael Chan71034ba2009-10-10 13:46:59 +00004242 rxcqe->addr_hi = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004243 data->rx.cqe_page_base.hi = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004244
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004245 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
Michael Chan71034ba2009-10-10 13:46:59 +00004246 rxcqe->addr_lo = cpu_to_le32(val);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004247 data->rx.cqe_page_base.lo = cpu_to_le32(val);
Michael Chan71034ba2009-10-10 13:46:59 +00004248
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004249 /* Other ramrod params */
4250 data->rx.client_qzone_id = cl_qzone_id;
4251 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4252 data->rx.status_block_id = BNX2X_DEF_SB_ID;
Michael Chan71034ba2009-10-10 13:46:59 +00004253
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004254 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4255 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
Michael Chan71034ba2009-10-10 13:46:59 +00004256
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004257 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4258 data->rx.outer_vlan_removal_enable_flg = 1;
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004259
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004260 /* reset tstorm and ustorm per client statistics */
4261 if (cli < MAX_STAT_COUNTER_ID) {
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004262 val = BAR_TSTRORM_INTMEM +
4263 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4264 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4265 CNIC_WR(dev, val + i * 4, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004266
Dmitry Kravkov6b2a5412010-06-23 11:57:09 -07004267 val = BAR_USTRORM_INTMEM +
4268 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4269 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4270 CNIC_WR(dev, val + i * 4, 0);
4271 }
Michael Chan71034ba2009-10-10 13:46:59 +00004272
4273 cp->rx_cons_ptr =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004274 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
Michael Chan5159fdc2010-12-23 07:42:59 +00004275 cp->rx_cons = *cp->rx_cons_ptr;
Michael Chan71034ba2009-10-10 13:46:59 +00004276}
4277
Michael Chan4aacb7a2010-12-23 07:43:01 +00004278static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4279 u32 lower_addr)
4280{
4281 u32 val;
4282 u8 mac[6];
4283
4284 val = CNIC_RD(dev, upper_addr);
4285
4286 mac[0] = (u8) (val >> 8);
4287 mac[1] = (u8) val;
4288
4289 val = CNIC_RD(dev, lower_addr);
4290
4291 mac[2] = (u8) (val >> 24);
4292 mac[3] = (u8) (val >> 16);
4293 mac[4] = (u8) (val >> 8);
4294 mac[5] = (u8) val;
4295
4296 if (is_valid_ether_addr(mac)) {
4297 memcpy(dev->mac_addr, mac, 6);
4298 return 0;
4299 } else {
4300 return -EINVAL;
4301 }
4302}
4303
Michael Chan71034ba2009-10-10 13:46:59 +00004304static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4305{
4306 struct cnic_local *cp = dev->cnic_priv;
Michael Chan4aacb7a2010-12-23 07:43:01 +00004307 u32 base, base2, addr, addr1, val;
Michael Chan71034ba2009-10-10 13:46:59 +00004308 int port = CNIC_PORT(cp);
4309
4310 dev->max_iscsi_conn = 0;
4311 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004312 if (base == 0)
Michael Chan71034ba2009-10-10 13:46:59 +00004313 return;
4314
Michael Chanee87a822010-10-13 14:06:51 +00004315 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4316 MISC_REG_GENERIC_CR_0));
Michael Chandd2e4db2009-12-02 15:15:37 +00004317 addr = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004318 dev_info.port_hw_config[port].iscsi_mac_upper);
4319
Michael Chan4aacb7a2010-12-23 07:43:01 +00004320 addr1 = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004321 dev_info.port_hw_config[port].iscsi_mac_lower);
4322
Michael Chan4aacb7a2010-12-23 07:43:01 +00004323 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
Michael Chan71034ba2009-10-10 13:46:59 +00004324
4325 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4326 val = CNIC_RD(dev, addr);
4327
4328 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4329 u16 val16;
4330
4331 addr = BNX2X_SHMEM_ADDR(base,
4332 drv_lic_key[port].max_iscsi_init_conn);
4333 val16 = CNIC_RD16(dev, addr);
4334
4335 if (val16)
4336 val16 ^= 0x1e1e;
4337 dev->max_iscsi_conn = val16;
4338 }
Michael Chanee87a822010-10-13 14:06:51 +00004339 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
Michael Chan71034ba2009-10-10 13:46:59 +00004340 int func = CNIC_FUNC(cp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004341 u32 mf_cfg_addr;
Michael Chan71034ba2009-10-10 13:46:59 +00004342
Michael Chanee87a822010-10-13 14:06:51 +00004343 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4344 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4345 mf_cfg_addr));
4346 else
4347 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004348
Michael Chan4aacb7a2010-12-23 07:43:01 +00004349 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4350 /* Must determine if the MF is SD vs SI mode */
4351 addr = BNX2X_SHMEM_ADDR(base,
4352 dev_info.shared_feature_config.config);
4353 val = CNIC_RD(dev, addr);
4354 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4355 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4356 int rc;
4357
4358 /* MULTI_FUNCTION_SI mode */
4359 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4360 func_ext_config[func].func_cfg);
4361 val = CNIC_RD(dev, addr);
4362 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4363 dev->max_iscsi_conn = 0;
4364
4365 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4366 func_ext_config[func].
4367 iscsi_mac_addr_upper);
4368 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4369 func_ext_config[func].
4370 iscsi_mac_addr_lower);
4371 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4372 addr1);
4373 if (rc && func > 1)
4374 dev->max_iscsi_conn = 0;
4375
4376 return;
4377 }
4378 }
4379
4380 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4381 func_mf_config[func].e1hov_tag);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004382
Michael Chan71034ba2009-10-10 13:46:59 +00004383 val = CNIC_RD(dev, addr);
4384 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4385 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Michael Chan4aacb7a2010-12-23 07:43:01 +00004386 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4387 func_mf_config[func].config);
Michael Chan71034ba2009-10-10 13:46:59 +00004388 val = CNIC_RD(dev, addr);
4389 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4390 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4391 dev->max_iscsi_conn = 0;
4392 }
4393 }
Michael Chan4aacb7a2010-12-23 07:43:01 +00004394 if (!is_valid_ether_addr(dev->mac_addr))
4395 dev->max_iscsi_conn = 0;
Michael Chan71034ba2009-10-10 13:46:59 +00004396}
4397
Michael Chane21ba412010-12-23 07:43:03 +00004398static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4399{
4400 struct cnic_local *cp = dev->cnic_priv;
4401 u32 pfid = cp->pfid;
4402
4403 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4404 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4405 cp->kcq1.sw_prod_idx = 0;
4406
4407 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4408 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4409
4410 cp->kcq1.hw_prod_idx_ptr =
4411 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4412 cp->kcq1.status_idx_ptr =
4413 &sb->sb.running_index[SM_RX_ID];
4414 } else {
4415 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4416
4417 cp->kcq1.hw_prod_idx_ptr =
4418 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4419 cp->kcq1.status_idx_ptr =
4420 &sb->sb.running_index[SM_RX_ID];
4421 }
4422
4423 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4424 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4425
4426 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4427 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4428 cp->kcq2.sw_prod_idx = 0;
4429 cp->kcq2.hw_prod_idx_ptr =
4430 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4431 cp->kcq2.status_idx_ptr =
4432 &sb->sb.running_index[SM_RX_ID];
4433 }
4434}
4435
Michael Chan71034ba2009-10-10 13:46:59 +00004436static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4437{
4438 struct cnic_local *cp = dev->cnic_priv;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004439 struct cnic_eth_dev *ethdev = cp->ethdev;
Michael Chan71034ba2009-10-10 13:46:59 +00004440 int func = CNIC_FUNC(cp), ret, i;
Michael Chan14203982010-10-06 03:16:06 +00004441 u32 pfid;
Michael Chan71034ba2009-10-10 13:46:59 +00004442
Michael Chanee87a822010-10-13 14:06:51 +00004443 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4444 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4445
4446 if (!(val & 1))
4447 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4448 else
4449 val = (val >> 1) & 1;
4450
4451 if (val)
4452 cp->pfid = func >> 1;
4453 else
4454 cp->pfid = func & 0x6;
4455 } else {
4456 cp->pfid = func;
4457 }
Michael Chan14203982010-10-06 03:16:06 +00004458 pfid = cp->pfid;
4459
Michael Chan71034ba2009-10-10 13:46:59 +00004460 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
Michael Chan520efdf2010-06-24 14:58:37 +00004461 cp->iscsi_start_cid);
Michael Chan71034ba2009-10-10 13:46:59 +00004462
4463 if (ret)
4464 return -ENOMEM;
4465
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004466 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4467
Michael Chane21ba412010-12-23 07:43:03 +00004468 cnic_init_bnx2x_kcq(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004469
4470 cnic_get_bnx2x_iscsi_info(dev);
4471
4472 /* Only 1 EQ */
Michael Chane6c28892010-06-24 14:58:39 +00004473 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
Michael Chan71034ba2009-10-10 13:46:59 +00004474 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004475 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004476 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004477 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00004478 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00004479 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004480 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00004481 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00004482 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004483 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
Michael Chane6c28892010-06-24 14:58:39 +00004484 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
Michael Chan71034ba2009-10-10 13:46:59 +00004485 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004486 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
Michael Chane6c28892010-06-24 14:58:39 +00004487 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
Michael Chan71034ba2009-10-10 13:46:59 +00004488 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004489 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
Michael Chan71034ba2009-10-10 13:46:59 +00004490 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004491 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
Michael Chan71034ba2009-10-10 13:46:59 +00004492 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004493 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004494 HC_INDEX_ISCSI_EQ_CONS);
Michael Chan71034ba2009-10-10 13:46:59 +00004495
4496 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4497 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004498 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
Michael Chan71034ba2009-10-10 13:46:59 +00004499 cp->conn_buf_info.pgtbl[2 * i]);
4500 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004501 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00004502 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4503 }
4504
4505 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004506 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
Michael Chan71034ba2009-10-10 13:46:59 +00004507 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4508 CNIC_WR(dev, BAR_USTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004509 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
Michael Chan71034ba2009-10-10 13:46:59 +00004510 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4511
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004512 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4513 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4514
Michael Chan71034ba2009-10-10 13:46:59 +00004515 cnic_setup_bnx2x_context(dev);
4516
Michael Chan71034ba2009-10-10 13:46:59 +00004517 ret = cnic_init_bnx2x_irq(dev);
4518 if (ret)
4519 return ret;
4520
Michael Chan71034ba2009-10-10 13:46:59 +00004521 return 0;
4522}
4523
Michael Chan86b53602009-10-10 13:46:57 +00004524static void cnic_init_rings(struct cnic_dev *dev)
4525{
Michael Chan541a7812010-10-06 03:17:22 +00004526 struct cnic_local *cp = dev->cnic_priv;
Michael Chancd801532010-10-13 14:06:49 +00004527 struct cnic_uio_dev *udev = cp->udev;
Michael Chan541a7812010-10-06 03:17:22 +00004528
4529 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4530 return;
4531
Michael Chan86b53602009-10-10 13:46:57 +00004532 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4533 cnic_init_bnx2_tx_ring(dev);
4534 cnic_init_bnx2_rx_ring(dev);
Michael Chan541a7812010-10-06 03:17:22 +00004535 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan71034ba2009-10-10 13:46:59 +00004536 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
Michael Chan5159fdc2010-12-23 07:42:59 +00004537 u32 cli = cp->ethdev->iscsi_l2_client_id;
4538 u32 cid = cp->ethdev->iscsi_l2_cid;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004539 u32 cl_qzone_id, type;
4540 struct client_init_ramrod_data *data;
Michael Chan71034ba2009-10-10 13:46:59 +00004541 union l5cm_specific_data l5_data;
4542 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chanc7596b72009-12-02 15:15:35 +00004543 u32 off, i;
Michael Chan71034ba2009-10-10 13:46:59 +00004544
4545 rx_prods.bd_prod = 0;
4546 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4547 barrier();
4548
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004549 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4550
Michael Chanc7596b72009-12-02 15:15:35 +00004551 off = BAR_USTRORM_INTMEM +
Michael Chanee87a822010-10-13 14:06:51 +00004552 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4553 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4554 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
Michael Chan71034ba2009-10-10 13:46:59 +00004555
4556 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00004557 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00004558
Michael Chan48f753d2010-05-18 11:32:53 +00004559 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4560
Michael Chancd801532010-10-13 14:06:49 +00004561 data = udev->l2_buf;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004562
4563 memset(data, 0, sizeof(*data));
4564
4565 cnic_init_bnx2x_tx_ring(dev, data);
4566 cnic_init_bnx2x_rx_ring(dev, data);
4567
Michael Chancd801532010-10-13 14:06:49 +00004568 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4569 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004570
4571 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4572 & SPE_HDR_CONN_TYPE;
4573 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4574 SPE_HDR_FUNCTION_ID);
Michael Chan71034ba2009-10-10 13:46:59 +00004575
Michael Chan541a7812010-10-06 03:17:22 +00004576 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4577
Michael Chan71034ba2009-10-10 13:46:59 +00004578 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
Michael Chan5159fdc2010-12-23 07:42:59 +00004579 cid, type, &l5_data);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004580
Michael Chan48f753d2010-05-18 11:32:53 +00004581 i = 0;
4582 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4583 ++i < 10)
4584 msleep(1);
4585
4586 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4587 netdev_err(dev->netdev,
4588 "iSCSI CLIENT_SETUP did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00004589 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan5159fdc2010-12-23 07:42:59 +00004590 cnic_ring_ctl(dev, cid, cli, 1);
Michael Chan86b53602009-10-10 13:46:57 +00004591 }
4592}
4593
4594static void cnic_shutdown_rings(struct cnic_dev *dev)
4595{
Michael Chan541a7812010-10-06 03:17:22 +00004596 struct cnic_local *cp = dev->cnic_priv;
4597
4598 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4599 return;
4600
Michael Chan86b53602009-10-10 13:46:57 +00004601 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4602 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004603 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4604 struct cnic_local *cp = dev->cnic_priv;
Michael Chan5159fdc2010-12-23 07:42:59 +00004605 u32 cli = cp->ethdev->iscsi_l2_client_id;
4606 u32 cid = cp->ethdev->iscsi_l2_cid;
Michael Chan8b065b62009-12-02 15:15:36 +00004607 union l5cm_specific_data l5_data;
Michael Chan48f753d2010-05-18 11:32:53 +00004608 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004609 u32 type;
Michael Chan71034ba2009-10-10 13:46:59 +00004610
Michael Chan5159fdc2010-12-23 07:42:59 +00004611 cnic_ring_ctl(dev, cid, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00004612
Michael Chan48f753d2010-05-18 11:32:53 +00004613 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4614
Michael Chan8b065b62009-12-02 15:15:36 +00004615 l5_data.phy_address.lo = cli;
4616 l5_data.phy_address.hi = 0;
4617 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
Michael Chan5159fdc2010-12-23 07:42:59 +00004618 cid, ETH_CONNECTION_TYPE, &l5_data);
Michael Chan48f753d2010-05-18 11:32:53 +00004619 i = 0;
4620 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4621 ++i < 10)
4622 msleep(1);
4623
4624 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4625 netdev_err(dev->netdev,
4626 "iSCSI CLIENT_HALT did not complete\n");
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00004627 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
Michael Chan1bcdc322009-12-10 15:40:57 +00004628
4629 memset(&l5_data, 0, sizeof(l5_data));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004630 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4631 & SPE_HDR_CONN_TYPE;
4632 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4633 SPE_HDR_FUNCTION_ID);
4634 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
Michael Chan5159fdc2010-12-23 07:42:59 +00004635 cid, type, &l5_data);
Michael Chan1bcdc322009-12-10 15:40:57 +00004636 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00004637 }
Michael Chan541a7812010-10-06 03:17:22 +00004638 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
Michael Chan86b53602009-10-10 13:46:57 +00004639}
4640
Michael Chana3059b12009-08-14 15:49:44 +00004641static int cnic_register_netdev(struct cnic_dev *dev)
4642{
4643 struct cnic_local *cp = dev->cnic_priv;
4644 struct cnic_eth_dev *ethdev = cp->ethdev;
4645 int err;
4646
4647 if (!ethdev)
4648 return -ENODEV;
4649
4650 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
4651 return 0;
4652
4653 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
4654 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00004655 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00004656
4657 return err;
4658}
4659
4660static void cnic_unregister_netdev(struct cnic_dev *dev)
4661{
4662 struct cnic_local *cp = dev->cnic_priv;
4663 struct cnic_eth_dev *ethdev = cp->ethdev;
4664
4665 if (!ethdev)
4666 return;
4667
4668 ethdev->drv_unregister_cnic(dev->netdev);
4669}
4670
Michael Chana4636962009-06-08 18:14:43 -07004671static int cnic_start_hw(struct cnic_dev *dev)
4672{
4673 struct cnic_local *cp = dev->cnic_priv;
4674 struct cnic_eth_dev *ethdev = cp->ethdev;
4675 int err;
4676
4677 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
4678 return -EALREADY;
4679
Michael Chana4636962009-06-08 18:14:43 -07004680 dev->regview = ethdev->io_base;
Michael Chana4636962009-06-08 18:14:43 -07004681 pci_dev_get(dev->pcidev);
4682 cp->func = PCI_FUNC(dev->pcidev->devfn);
Michael Chana4dde3a2010-02-24 14:42:08 +00004683 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
Michael Chana4636962009-06-08 18:14:43 -07004684 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
4685
4686 err = cp->alloc_resc(dev);
4687 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004688 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07004689 goto err1;
4690 }
4691
4692 err = cp->start_hw(dev);
4693 if (err)
4694 goto err1;
4695
4696 err = cnic_cm_open(dev);
4697 if (err)
4698 goto err1;
4699
4700 set_bit(CNIC_F_CNIC_UP, &dev->flags);
4701
4702 cp->enable_int(dev);
4703
4704 return 0;
4705
4706err1:
Michael Chana4636962009-06-08 18:14:43 -07004707 cp->free_resc(dev);
4708 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07004709 return err;
4710}
4711
4712static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4713{
Michael Chana4636962009-06-08 18:14:43 -07004714 cnic_disable_bnx2_int_sync(dev);
4715
4716 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4717 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4718
4719 cnic_init_context(dev, KWQ_CID);
4720 cnic_init_context(dev, KCQ_CID);
4721
4722 cnic_setup_5709_context(dev, 0);
4723 cnic_free_irq(dev);
4724
Michael Chana4636962009-06-08 18:14:43 -07004725 cnic_free_resc(dev);
4726}
4727
Michael Chan71034ba2009-10-10 13:46:59 +00004728
4729static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4730{
4731 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00004732
4733 cnic_free_irq(dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004734 *cp->kcq1.hw_prod_idx_ptr = 0;
Michael Chan4e9c4fd2009-12-10 15:40:58 +00004735 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
Michael Chan14203982010-10-06 03:16:06 +00004736 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
Michael Chane6c28892010-06-24 14:58:39 +00004737 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004738 cnic_free_resc(dev);
4739}
4740
Michael Chana4636962009-06-08 18:14:43 -07004741static void cnic_stop_hw(struct cnic_dev *dev)
4742{
4743 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4744 struct cnic_local *cp = dev->cnic_priv;
Michael Chan48f753d2010-05-18 11:32:53 +00004745 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -07004746
Michael Chan48f753d2010-05-18 11:32:53 +00004747 /* Need to wait for the ring shutdown event to complete
4748 * before clearing the CNIC_UP flag.
4749 */
Michael Chancd801532010-10-13 14:06:49 +00004750 while (cp->udev->uio_dev != -1 && i < 15) {
Michael Chan48f753d2010-05-18 11:32:53 +00004751 msleep(100);
4752 i++;
4753 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00004754 cnic_shutdown_rings(dev);
Michael Chana4636962009-06-08 18:14:43 -07004755 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4756 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4757 synchronize_rcu();
4758 cnic_cm_shutdown(dev);
4759 cp->stop_hw(dev);
4760 pci_dev_put(dev->pcidev);
4761 }
4762}
4763
4764static void cnic_free_dev(struct cnic_dev *dev)
4765{
4766 int i = 0;
4767
4768 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
4769 msleep(100);
4770 i++;
4771 }
4772 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00004773 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07004774
Joe Perchesddf79b22010-02-17 15:01:54 +00004775 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07004776 dev_put(dev->netdev);
4777 kfree(dev);
4778}
4779
4780static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4781 struct pci_dev *pdev)
4782{
4783 struct cnic_dev *cdev;
4784 struct cnic_local *cp;
4785 int alloc_size;
4786
4787 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
4788
4789 cdev = kzalloc(alloc_size , GFP_KERNEL);
4790 if (cdev == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004791 netdev_err(dev, "allocate dev struct failure\n");
Michael Chana4636962009-06-08 18:14:43 -07004792 return NULL;
4793 }
4794
4795 cdev->netdev = dev;
4796 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
4797 cdev->register_device = cnic_register_device;
4798 cdev->unregister_device = cnic_unregister_device;
4799 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
4800
4801 cp = cdev->cnic_priv;
4802 cp->dev = cdev;
Michael Chana4636962009-06-08 18:14:43 -07004803 cp->l2_single_buf_size = 0x400;
4804 cp->l2_rx_ring_size = 3;
4805
4806 spin_lock_init(&cp->cnic_ulp_lock);
4807
Joe Perchesddf79b22010-02-17 15:01:54 +00004808 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07004809
4810 return cdev;
4811}
4812
4813static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
4814{
4815 struct pci_dev *pdev;
4816 struct cnic_dev *cdev;
4817 struct cnic_local *cp;
4818 struct cnic_eth_dev *ethdev = NULL;
Michael Chane2ee3612009-06-13 17:43:02 -07004819 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
Michael Chana4636962009-06-08 18:14:43 -07004820
Michael Chane2ee3612009-06-13 17:43:02 -07004821 probe = symbol_get(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07004822 if (probe) {
4823 ethdev = (*probe)(dev);
Michael Chan64c64602009-08-14 15:49:43 +00004824 symbol_put(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07004825 }
4826 if (!ethdev)
4827 return NULL;
4828
4829 pdev = ethdev->pdev;
4830 if (!pdev)
4831 return NULL;
4832
4833 dev_hold(dev);
4834 pci_dev_get(pdev);
4835 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
4836 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
4837 u8 rev;
4838
4839 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
4840 if (rev < 0x10) {
4841 pci_dev_put(pdev);
4842 goto cnic_err;
4843 }
4844 }
4845 pci_dev_put(pdev);
4846
4847 cdev = cnic_alloc_dev(dev, pdev);
4848 if (cdev == NULL)
4849 goto cnic_err;
4850
4851 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
4852 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
4853
4854 cp = cdev->cnic_priv;
4855 cp->ethdev = ethdev;
4856 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00004857 cp->chip_id = ethdev->chip_id;
Michael Chana4636962009-06-08 18:14:43 -07004858
4859 cp->cnic_ops = &cnic_bnx2_ops;
4860 cp->start_hw = cnic_start_bnx2_hw;
4861 cp->stop_hw = cnic_stop_bnx2_hw;
4862 cp->setup_pgtbl = cnic_setup_page_tbl;
4863 cp->alloc_resc = cnic_alloc_bnx2_resc;
4864 cp->free_resc = cnic_free_resc;
4865 cp->start_cm = cnic_cm_init_bnx2_hw;
4866 cp->stop_cm = cnic_cm_stop_bnx2_hw;
4867 cp->enable_int = cnic_enable_bnx2_int;
4868 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
4869 cp->close_conn = cnic_close_bnx2_conn;
4870 cp->next_idx = cnic_bnx2_next_idx;
4871 cp->hw_idx = cnic_bnx2_hw_idx;
4872 return cdev;
4873
4874cnic_err:
4875 dev_put(dev);
4876 return NULL;
4877}
4878
Michael Chan71034ba2009-10-10 13:46:59 +00004879static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4880{
4881 struct pci_dev *pdev;
4882 struct cnic_dev *cdev;
4883 struct cnic_local *cp;
4884 struct cnic_eth_dev *ethdev = NULL;
4885 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4886
4887 probe = symbol_get(bnx2x_cnic_probe);
4888 if (probe) {
4889 ethdev = (*probe)(dev);
4890 symbol_put(bnx2x_cnic_probe);
4891 }
4892 if (!ethdev)
4893 return NULL;
4894
4895 pdev = ethdev->pdev;
4896 if (!pdev)
4897 return NULL;
4898
4899 dev_hold(dev);
4900 cdev = cnic_alloc_dev(dev, pdev);
4901 if (cdev == NULL) {
4902 dev_put(dev);
4903 return NULL;
4904 }
4905
4906 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4907 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4908
4909 cp = cdev->cnic_priv;
4910 cp->ethdev = ethdev;
4911 cdev->pcidev = pdev;
Michael Chanee87a822010-10-13 14:06:51 +00004912 cp->chip_id = ethdev->chip_id;
Michael Chan71034ba2009-10-10 13:46:59 +00004913
4914 cp->cnic_ops = &cnic_bnx2x_ops;
4915 cp->start_hw = cnic_start_bnx2x_hw;
4916 cp->stop_hw = cnic_stop_bnx2x_hw;
4917 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4918 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4919 cp->free_resc = cnic_free_resc;
4920 cp->start_cm = cnic_cm_init_bnx2x_hw;
4921 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4922 cp->enable_int = cnic_enable_bnx2x_int;
4923 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
Michael Chanee87a822010-10-13 14:06:51 +00004924 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4925 cp->ack_int = cnic_ack_bnx2x_e2_msix;
4926 else
4927 cp->ack_int = cnic_ack_bnx2x_msix;
Michael Chan71034ba2009-10-10 13:46:59 +00004928 cp->close_conn = cnic_close_bnx2x_conn;
4929 cp->next_idx = cnic_bnx2x_next_idx;
4930 cp->hw_idx = cnic_bnx2x_hw_idx;
4931 return cdev;
4932}
4933
Michael Chana4636962009-06-08 18:14:43 -07004934static struct cnic_dev *is_cnic_dev(struct net_device *dev)
4935{
4936 struct ethtool_drvinfo drvinfo;
4937 struct cnic_dev *cdev = NULL;
4938
4939 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
4940 memset(&drvinfo, 0, sizeof(drvinfo));
4941 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
4942
4943 if (!strcmp(drvinfo.driver, "bnx2"))
4944 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004945 if (!strcmp(drvinfo.driver, "bnx2x"))
4946 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07004947 if (cdev) {
4948 write_lock(&cnic_dev_lock);
4949 list_add(&cdev->list, &cnic_dev_list);
4950 write_unlock(&cnic_dev_lock);
4951 }
4952 }
4953 return cdev;
4954}
4955
4956/**
4957 * netdev event handler
4958 */
4959static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
4960 void *ptr)
4961{
4962 struct net_device *netdev = ptr;
4963 struct cnic_dev *dev;
4964 int if_type;
4965 int new_dev = 0;
4966
4967 dev = cnic_from_netdev(netdev);
4968
4969 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
4970 /* Check for the hot-plug device */
4971 dev = is_cnic_dev(netdev);
4972 if (dev) {
4973 new_dev = 1;
4974 cnic_hold(dev);
4975 }
4976 }
4977 if (dev) {
4978 struct cnic_local *cp = dev->cnic_priv;
4979
4980 if (new_dev)
4981 cnic_ulp_init(dev);
4982 else if (event == NETDEV_UNREGISTER)
4983 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07004984
4985 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00004986 if (cnic_register_netdev(dev) != 0) {
4987 cnic_put(dev);
4988 goto done;
4989 }
Michael Chana4636962009-06-08 18:14:43 -07004990 if (!cnic_start_hw(dev))
4991 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07004992 }
4993
4994 rcu_read_lock();
4995 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
4996 struct cnic_ulp_ops *ulp_ops;
4997 void *ctx;
4998
4999 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5000 if (!ulp_ops || !ulp_ops->indicate_netevent)
5001 continue;
5002
5003 ctx = cp->ulp_handle[if_type];
5004
5005 ulp_ops->indicate_netevent(ctx, event);
5006 }
5007 rcu_read_unlock();
5008
5009 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07005010 cnic_ulp_stop(dev);
5011 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005012 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005013 } else if (event == NETDEV_UNREGISTER) {
5014 write_lock(&cnic_dev_lock);
5015 list_del_init(&dev->list);
5016 write_unlock(&cnic_dev_lock);
5017
5018 cnic_put(dev);
5019 cnic_free_dev(dev);
5020 goto done;
5021 }
5022 cnic_put(dev);
5023 }
5024done:
5025 return NOTIFY_DONE;
5026}
5027
5028static struct notifier_block cnic_netdev_notifier = {
5029 .notifier_call = cnic_netdev_event
5030};
5031
5032static void cnic_release(void)
5033{
5034 struct cnic_dev *dev;
Michael Chana3ceeeb2010-10-13 14:06:50 +00005035 struct cnic_uio_dev *udev;
Michael Chana4636962009-06-08 18:14:43 -07005036
5037 while (!list_empty(&cnic_dev_list)) {
5038 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5039 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5040 cnic_ulp_stop(dev);
5041 cnic_stop_hw(dev);
5042 }
5043
5044 cnic_ulp_exit(dev);
Michael Chana3059b12009-08-14 15:49:44 +00005045 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07005046 list_del_init(&dev->list);
5047 cnic_free_dev(dev);
5048 }
Michael Chana3ceeeb2010-10-13 14:06:50 +00005049 while (!list_empty(&cnic_udev_list)) {
5050 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5051 list);
5052 cnic_free_uio(udev);
5053 }
Michael Chana4636962009-06-08 18:14:43 -07005054}
5055
5056static int __init cnic_init(void)
5057{
5058 int rc = 0;
5059
Joe Perchesddf79b22010-02-17 15:01:54 +00005060 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07005061
5062 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5063 if (rc) {
5064 cnic_release();
5065 return rc;
5066 }
5067
Michael Chanfdf24082010-10-13 14:06:47 +00005068 cnic_wq = create_singlethread_workqueue("cnic_wq");
5069 if (!cnic_wq) {
5070 cnic_release();
5071 unregister_netdevice_notifier(&cnic_netdev_notifier);
5072 return -ENOMEM;
5073 }
5074
Michael Chana4636962009-06-08 18:14:43 -07005075 return 0;
5076}
5077
5078static void __exit cnic_exit(void)
5079{
5080 unregister_netdevice_notifier(&cnic_netdev_notifier);
5081 cnic_release();
Michael Chanfdf24082010-10-13 14:06:47 +00005082 destroy_workqueue(cnic_wq);
Michael Chana4636962009-06-08 18:14:43 -07005083}
5084
5085module_init(cnic_init);
5086module_exit(cnic_exit);