blob: 45584442a35cc0cd45686161954f6bfca223707f [file] [log] [blame]
Michael Chana4636962009-06-08 18:14:43 -07001/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
Joe Perchesddf79b22010-02-17 15:01:54 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Michael Chana4636962009-06-08 18:14:43 -070015#include <linux/module.h>
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/uio_driver.h>
25#include <linux/in.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31#define BCM_VLAN 1
32#endif
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/route.h>
36#include <net/ipv6.h>
37#include <net/ip6_route.h>
David S. Millerc05e85a2009-10-12 23:18:35 -070038#include <net/ip6_checksum.h>
Michael Chana4636962009-06-08 18:14:43 -070039#include <scsi/iscsi_if.h>
40
41#include "cnic_if.h"
42#include "bnx2.h"
Michael Chane2513062009-10-10 13:46:58 +000043#include "bnx2x_reg.h"
44#include "bnx2x_fw_defs.h"
45#include "bnx2x_hsi.h"
46#include "../scsi/bnx2i/57xx_iscsi_constants.h"
47#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
Michael Chana4636962009-06-08 18:14:43 -070048#include "cnic.h"
49#include "cnic_defs.h"
50
51#define DRV_MODULE_NAME "cnic"
Michael Chana4636962009-06-08 18:14:43 -070052
53static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
55
56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59MODULE_LICENSE("GPL");
60MODULE_VERSION(CNIC_MODULE_VERSION);
61
62static LIST_HEAD(cnic_dev_list);
63static DEFINE_RWLOCK(cnic_dev_lock);
64static DEFINE_MUTEX(cnic_lock);
65
66static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
67
68static int cnic_service_bnx2(void *, void *);
Michael Chan71034ba2009-10-10 13:46:59 +000069static int cnic_service_bnx2x(void *, void *);
Michael Chana4636962009-06-08 18:14:43 -070070static int cnic_ctl(void *, struct cnic_ctl_info *);
71
72static struct cnic_ops cnic_bnx2_ops = {
73 .cnic_owner = THIS_MODULE,
74 .cnic_handler = cnic_service_bnx2,
75 .cnic_ctl = cnic_ctl,
76};
77
Michael Chan71034ba2009-10-10 13:46:59 +000078static struct cnic_ops cnic_bnx2x_ops = {
79 .cnic_owner = THIS_MODULE,
80 .cnic_handler = cnic_service_bnx2x,
81 .cnic_ctl = cnic_ctl,
82};
83
Michael Chan86b53602009-10-10 13:46:57 +000084static void cnic_shutdown_rings(struct cnic_dev *);
85static void cnic_init_rings(struct cnic_dev *);
Michael Chana4636962009-06-08 18:14:43 -070086static int cnic_cm_set_pg(struct cnic_sock *);
87
88static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
89{
90 struct cnic_dev *dev = uinfo->priv;
91 struct cnic_local *cp = dev->cnic_priv;
92
93 if (!capable(CAP_NET_ADMIN))
94 return -EPERM;
95
96 if (cp->uio_dev != -1)
97 return -EBUSY;
98
Michael Chan86b53602009-10-10 13:46:57 +000099 rtnl_lock();
100 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
101 rtnl_unlock();
102 return -ENODEV;
103 }
104
Michael Chana4636962009-06-08 18:14:43 -0700105 cp->uio_dev = iminor(inode);
106
Michael Chan86b53602009-10-10 13:46:57 +0000107 cnic_init_rings(dev);
108 rtnl_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700109
110 return 0;
111}
112
113static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
114{
115 struct cnic_dev *dev = uinfo->priv;
116 struct cnic_local *cp = dev->cnic_priv;
117
Michael Chan86b53602009-10-10 13:46:57 +0000118 cnic_shutdown_rings(dev);
Michael Chan6ef57a02009-09-21 15:39:37 +0000119
Michael Chana4636962009-06-08 18:14:43 -0700120 cp->uio_dev = -1;
121 return 0;
122}
123
124static inline void cnic_hold(struct cnic_dev *dev)
125{
126 atomic_inc(&dev->ref_count);
127}
128
129static inline void cnic_put(struct cnic_dev *dev)
130{
131 atomic_dec(&dev->ref_count);
132}
133
134static inline void csk_hold(struct cnic_sock *csk)
135{
136 atomic_inc(&csk->ref_count);
137}
138
139static inline void csk_put(struct cnic_sock *csk)
140{
141 atomic_dec(&csk->ref_count);
142}
143
144static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
145{
146 struct cnic_dev *cdev;
147
148 read_lock(&cnic_dev_lock);
149 list_for_each_entry(cdev, &cnic_dev_list, list) {
150 if (netdev == cdev->netdev) {
151 cnic_hold(cdev);
152 read_unlock(&cnic_dev_lock);
153 return cdev;
154 }
155 }
156 read_unlock(&cnic_dev_lock);
157 return NULL;
158}
159
Michael Chan7fc1ece2009-08-14 15:49:47 +0000160static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
161{
162 atomic_inc(&ulp_ops->ref_count);
163}
164
165static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
166{
167 atomic_dec(&ulp_ops->ref_count);
168}
169
Michael Chana4636962009-06-08 18:14:43 -0700170static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
171{
172 struct cnic_local *cp = dev->cnic_priv;
173 struct cnic_eth_dev *ethdev = cp->ethdev;
174 struct drv_ctl_info info;
175 struct drv_ctl_io *io = &info.data.io;
176
177 info.cmd = DRV_CTL_CTX_WR_CMD;
178 io->cid_addr = cid_addr;
179 io->offset = off;
180 io->data = val;
181 ethdev->drv_ctl(dev->netdev, &info);
182}
183
Michael Chan71034ba2009-10-10 13:46:59 +0000184static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
185{
186 struct cnic_local *cp = dev->cnic_priv;
187 struct cnic_eth_dev *ethdev = cp->ethdev;
188 struct drv_ctl_info info;
189 struct drv_ctl_io *io = &info.data.io;
190
191 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
192 io->offset = off;
193 io->dma_addr = addr;
194 ethdev->drv_ctl(dev->netdev, &info);
195}
196
197static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
198{
199 struct cnic_local *cp = dev->cnic_priv;
200 struct cnic_eth_dev *ethdev = cp->ethdev;
201 struct drv_ctl_info info;
202 struct drv_ctl_l2_ring *ring = &info.data.ring;
203
204 if (start)
205 info.cmd = DRV_CTL_START_L2_CMD;
206 else
207 info.cmd = DRV_CTL_STOP_L2_CMD;
208
209 ring->cid = cid;
210 ring->client_id = cl_id;
211 ethdev->drv_ctl(dev->netdev, &info);
212}
213
Michael Chana4636962009-06-08 18:14:43 -0700214static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
215{
216 struct cnic_local *cp = dev->cnic_priv;
217 struct cnic_eth_dev *ethdev = cp->ethdev;
218 struct drv_ctl_info info;
219 struct drv_ctl_io *io = &info.data.io;
220
221 info.cmd = DRV_CTL_IO_WR_CMD;
222 io->offset = off;
223 io->data = val;
224 ethdev->drv_ctl(dev->netdev, &info);
225}
226
227static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
228{
229 struct cnic_local *cp = dev->cnic_priv;
230 struct cnic_eth_dev *ethdev = cp->ethdev;
231 struct drv_ctl_info info;
232 struct drv_ctl_io *io = &info.data.io;
233
234 info.cmd = DRV_CTL_IO_RD_CMD;
235 io->offset = off;
236 ethdev->drv_ctl(dev->netdev, &info);
237 return io->data;
238}
239
240static int cnic_in_use(struct cnic_sock *csk)
241{
242 return test_bit(SK_F_INUSE, &csk->flags);
243}
244
245static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
246{
247 struct cnic_local *cp = dev->cnic_priv;
248 struct cnic_eth_dev *ethdev = cp->ethdev;
249 struct drv_ctl_info info;
250
251 info.cmd = DRV_CTL_COMPLETION_CMD;
252 info.data.comp.comp_count = count;
253 ethdev->drv_ctl(dev->netdev, &info);
254}
255
Michael Chan71034ba2009-10-10 13:46:59 +0000256static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
257{
258 u32 i;
259
260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
261 if (cp->ctx_tbl[i].cid == cid) {
262 *l5_cid = i;
263 return 0;
264 }
265 }
266 return -EINVAL;
267}
268
Michael Chana4636962009-06-08 18:14:43 -0700269static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
270 struct cnic_sock *csk)
271{
272 struct iscsi_path path_req;
273 char *buf = NULL;
274 u16 len = 0;
275 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
276 struct cnic_ulp_ops *ulp_ops;
277
278 if (cp->uio_dev == -1)
279 return -ENODEV;
280
281 if (csk) {
282 len = sizeof(path_req);
283 buf = (char *) &path_req;
284 memset(&path_req, 0, len);
285
286 msg_type = ISCSI_KEVENT_PATH_REQ;
287 path_req.handle = (u64) csk->l5_cid;
288 if (test_bit(SK_F_IPV6, &csk->flags)) {
289 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
290 sizeof(struct in6_addr));
291 path_req.ip_addr_len = 16;
292 } else {
293 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
294 sizeof(struct in_addr));
295 path_req.ip_addr_len = 4;
296 }
297 path_req.vlan_id = csk->vlan_id;
298 path_req.pmtu = csk->mtu;
299 }
300
301 rcu_read_lock();
Michael Chan6d7760a2009-07-27 11:25:58 -0700302 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
Michael Chana4636962009-06-08 18:14:43 -0700303 if (ulp_ops)
304 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
305 rcu_read_unlock();
306 return 0;
307}
308
309static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
310 char *buf, u16 len)
311{
312 int rc = -EINVAL;
313
314 switch (msg_type) {
315 case ISCSI_UEVENT_PATH_UPDATE: {
316 struct cnic_local *cp;
317 u32 l5_cid;
318 struct cnic_sock *csk;
319 struct iscsi_path *path_resp;
320
321 if (len < sizeof(*path_resp))
322 break;
323
324 path_resp = (struct iscsi_path *) buf;
325 cp = dev->cnic_priv;
326 l5_cid = (u32) path_resp->handle;
327 if (l5_cid >= MAX_CM_SK_TBL_SZ)
328 break;
329
Michael Chand02a5e62010-02-24 14:42:06 +0000330 rcu_read_lock();
331 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
332 rc = -ENODEV;
333 rcu_read_unlock();
334 break;
335 }
Michael Chana4636962009-06-08 18:14:43 -0700336 csk = &cp->csk_tbl[l5_cid];
337 csk_hold(csk);
338 if (cnic_in_use(csk)) {
339 memcpy(csk->ha, path_resp->mac_addr, 6);
340 if (test_bit(SK_F_IPV6, &csk->flags))
341 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
342 sizeof(struct in6_addr));
343 else
344 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
345 sizeof(struct in_addr));
346 if (is_valid_ether_addr(csk->ha))
347 cnic_cm_set_pg(csk);
348 }
349 csk_put(csk);
Michael Chand02a5e62010-02-24 14:42:06 +0000350 rcu_read_unlock();
Michael Chana4636962009-06-08 18:14:43 -0700351 rc = 0;
352 }
353 }
354
355 return rc;
356}
357
358static int cnic_offld_prep(struct cnic_sock *csk)
359{
360 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
361 return 0;
362
363 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
364 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
365 return 0;
366 }
367
368 return 1;
369}
370
371static int cnic_close_prep(struct cnic_sock *csk)
372{
373 clear_bit(SK_F_CONNECT_START, &csk->flags);
374 smp_mb__after_clear_bit();
375
376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
377 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
378 msleep(1);
379
380 return 1;
381 }
382 return 0;
383}
384
385static int cnic_abort_prep(struct cnic_sock *csk)
386{
387 clear_bit(SK_F_CONNECT_START, &csk->flags);
388 smp_mb__after_clear_bit();
389
390 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
391 msleep(1);
392
393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
394 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
395 return 1;
396 }
397
398 return 0;
399}
400
Michael Chan6d7760a2009-07-27 11:25:58 -0700401static void cnic_uio_stop(void)
402{
403 struct cnic_dev *dev;
404
405 read_lock(&cnic_dev_lock);
406 list_for_each_entry(dev, &cnic_dev_list, list) {
407 struct cnic_local *cp = dev->cnic_priv;
408
409 if (cp->cnic_uinfo)
410 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
411 }
412 read_unlock(&cnic_dev_lock);
413}
414
Michael Chana4636962009-06-08 18:14:43 -0700415int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
416{
417 struct cnic_dev *dev;
418
roel kluin0d37f362009-11-02 06:53:44 +0000419 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000420 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700421 return -EINVAL;
422 }
423 mutex_lock(&cnic_lock);
424 if (cnic_ulp_tbl[ulp_type]) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000425 pr_err("%s: Type %d has already been registered\n",
426 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700427 mutex_unlock(&cnic_lock);
428 return -EBUSY;
429 }
430
431 read_lock(&cnic_dev_lock);
432 list_for_each_entry(dev, &cnic_dev_list, list) {
433 struct cnic_local *cp = dev->cnic_priv;
434
435 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
436 }
437 read_unlock(&cnic_dev_lock);
438
Michael Chan7fc1ece2009-08-14 15:49:47 +0000439 atomic_set(&ulp_ops->ref_count, 0);
Michael Chana4636962009-06-08 18:14:43 -0700440 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
441 mutex_unlock(&cnic_lock);
442
443 /* Prevent race conditions with netdev_event */
444 rtnl_lock();
445 read_lock(&cnic_dev_lock);
446 list_for_each_entry(dev, &cnic_dev_list, list) {
447 struct cnic_local *cp = dev->cnic_priv;
448
449 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
450 ulp_ops->cnic_init(dev);
451 }
452 read_unlock(&cnic_dev_lock);
453 rtnl_unlock();
454
455 return 0;
456}
457
458int cnic_unregister_driver(int ulp_type)
459{
460 struct cnic_dev *dev;
Michael Chan7fc1ece2009-08-14 15:49:47 +0000461 struct cnic_ulp_ops *ulp_ops;
462 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700463
roel kluin0d37f362009-11-02 06:53:44 +0000464 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000465 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700466 return -EINVAL;
467 }
468 mutex_lock(&cnic_lock);
Michael Chan7fc1ece2009-08-14 15:49:47 +0000469 ulp_ops = cnic_ulp_tbl[ulp_type];
470 if (!ulp_ops) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000471 pr_err("%s: Type %d has not been registered\n",
472 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700473 goto out_unlock;
474 }
475 read_lock(&cnic_dev_lock);
476 list_for_each_entry(dev, &cnic_dev_list, list) {
477 struct cnic_local *cp = dev->cnic_priv;
478
479 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000480 pr_err("%s: Type %d still has devices registered\n",
481 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700482 read_unlock(&cnic_dev_lock);
483 goto out_unlock;
484 }
485 }
486 read_unlock(&cnic_dev_lock);
487
Michael Chan6d7760a2009-07-27 11:25:58 -0700488 if (ulp_type == CNIC_ULP_ISCSI)
489 cnic_uio_stop();
490
Michael Chana4636962009-06-08 18:14:43 -0700491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
492
493 mutex_unlock(&cnic_lock);
494 synchronize_rcu();
Michael Chan7fc1ece2009-08-14 15:49:47 +0000495 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
496 msleep(100);
497 i++;
498 }
499
500 if (atomic_read(&ulp_ops->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +0000501 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -0700502 return 0;
503
504out_unlock:
505 mutex_unlock(&cnic_lock);
506 return -EINVAL;
507}
508
509static int cnic_start_hw(struct cnic_dev *);
510static void cnic_stop_hw(struct cnic_dev *);
511
512static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
513 void *ulp_ctx)
514{
515 struct cnic_local *cp = dev->cnic_priv;
516 struct cnic_ulp_ops *ulp_ops;
517
roel kluin0d37f362009-11-02 06:53:44 +0000518 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000519 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700520 return -EINVAL;
521 }
522 mutex_lock(&cnic_lock);
523 if (cnic_ulp_tbl[ulp_type] == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000524 pr_err("%s: Driver with type %d has not been registered\n",
525 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700526 mutex_unlock(&cnic_lock);
527 return -EAGAIN;
528 }
529 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000530 pr_err("%s: Type %d has already been registered to this device\n",
531 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700532 mutex_unlock(&cnic_lock);
533 return -EBUSY;
534 }
535
536 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
537 cp->ulp_handle[ulp_type] = ulp_ctx;
538 ulp_ops = cnic_ulp_tbl[ulp_type];
539 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
540 cnic_hold(dev);
541
542 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
543 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
544 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
545
546 mutex_unlock(&cnic_lock);
547
548 return 0;
549
550}
551EXPORT_SYMBOL(cnic_register_driver);
552
553static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
554{
555 struct cnic_local *cp = dev->cnic_priv;
Michael Chan681dbd72009-08-14 15:49:46 +0000556 int i = 0;
Michael Chana4636962009-06-08 18:14:43 -0700557
roel kluin0d37f362009-11-02 06:53:44 +0000558 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
Joe Perchesddf79b22010-02-17 15:01:54 +0000559 pr_err("%s: Bad type %d\n", __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700560 return -EINVAL;
561 }
562 mutex_lock(&cnic_lock);
563 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
564 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
565 cnic_put(dev);
566 } else {
Joe Perchesddf79b22010-02-17 15:01:54 +0000567 pr_err("%s: device not registered to this ulp type %d\n",
568 __func__, ulp_type);
Michael Chana4636962009-06-08 18:14:43 -0700569 mutex_unlock(&cnic_lock);
570 return -EINVAL;
571 }
572 mutex_unlock(&cnic_lock);
573
574 synchronize_rcu();
575
Michael Chan681dbd72009-08-14 15:49:46 +0000576 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
577 i < 20) {
578 msleep(100);
579 i++;
580 }
581 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
Joe Perchesddf79b22010-02-17 15:01:54 +0000582 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
Michael Chan681dbd72009-08-14 15:49:46 +0000583
Michael Chana4636962009-06-08 18:14:43 -0700584 return 0;
585}
586EXPORT_SYMBOL(cnic_unregister_driver);
587
588static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
589{
590 id_tbl->start = start_id;
591 id_tbl->max = size;
592 id_tbl->next = 0;
593 spin_lock_init(&id_tbl->lock);
594 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
595 if (!id_tbl->table)
596 return -ENOMEM;
597
598 return 0;
599}
600
601static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
602{
603 kfree(id_tbl->table);
604 id_tbl->table = NULL;
605}
606
607static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
608{
609 int ret = -1;
610
611 id -= id_tbl->start;
612 if (id >= id_tbl->max)
613 return ret;
614
615 spin_lock(&id_tbl->lock);
616 if (!test_bit(id, id_tbl->table)) {
617 set_bit(id, id_tbl->table);
618 ret = 0;
619 }
620 spin_unlock(&id_tbl->lock);
621 return ret;
622}
623
624/* Returns -1 if not successful */
625static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
626{
627 u32 id;
628
629 spin_lock(&id_tbl->lock);
630 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
631 if (id >= id_tbl->max) {
632 id = -1;
633 if (id_tbl->next != 0) {
634 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
635 if (id >= id_tbl->next)
636 id = -1;
637 }
638 }
639
640 if (id < id_tbl->max) {
641 set_bit(id, id_tbl->table);
642 id_tbl->next = (id + 1) & (id_tbl->max - 1);
643 id += id_tbl->start;
644 }
645
646 spin_unlock(&id_tbl->lock);
647
648 return id;
649}
650
651static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
652{
653 if (id == -1)
654 return;
655
656 id -= id_tbl->start;
657 if (id >= id_tbl->max)
658 return;
659
660 clear_bit(id, id_tbl->table);
661}
662
663static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
664{
665 int i;
666
667 if (!dma->pg_arr)
668 return;
669
670 for (i = 0; i < dma->num_pages; i++) {
671 if (dma->pg_arr[i]) {
Michael Chan3248e162009-12-02 15:15:39 +0000672 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
673 dma->pg_arr[i], dma->pg_map_arr[i]);
Michael Chana4636962009-06-08 18:14:43 -0700674 dma->pg_arr[i] = NULL;
675 }
676 }
677 if (dma->pgtbl) {
Michael Chan3248e162009-12-02 15:15:39 +0000678 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
679 dma->pgtbl, dma->pgtbl_map);
Michael Chana4636962009-06-08 18:14:43 -0700680 dma->pgtbl = NULL;
681 }
682 kfree(dma->pg_arr);
683 dma->pg_arr = NULL;
684 dma->num_pages = 0;
685}
686
687static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
688{
689 int i;
690 u32 *page_table = dma->pgtbl;
691
692 for (i = 0; i < dma->num_pages; i++) {
693 /* Each entry needs to be in big endian format. */
694 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
695 page_table++;
696 *page_table = (u32) dma->pg_map_arr[i];
697 page_table++;
698 }
699}
700
Michael Chan71034ba2009-10-10 13:46:59 +0000701static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
702{
703 int i;
704 u32 *page_table = dma->pgtbl;
705
706 for (i = 0; i < dma->num_pages; i++) {
707 /* Each entry needs to be in little endian format. */
708 *page_table = dma->pg_map_arr[i] & 0xffffffff;
709 page_table++;
710 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
711 page_table++;
712 }
713}
714
Michael Chana4636962009-06-08 18:14:43 -0700715static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
716 int pages, int use_pg_tbl)
717{
718 int i, size;
719 struct cnic_local *cp = dev->cnic_priv;
720
721 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
722 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
723 if (dma->pg_arr == NULL)
724 return -ENOMEM;
725
726 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
727 dma->num_pages = pages;
728
729 for (i = 0; i < pages; i++) {
Michael Chan3248e162009-12-02 15:15:39 +0000730 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
731 BCM_PAGE_SIZE,
732 &dma->pg_map_arr[i],
733 GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700734 if (dma->pg_arr[i] == NULL)
735 goto error;
736 }
737 if (!use_pg_tbl)
738 return 0;
739
740 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
741 ~(BCM_PAGE_SIZE - 1);
Michael Chan3248e162009-12-02 15:15:39 +0000742 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
743 &dma->pgtbl_map, GFP_ATOMIC);
Michael Chana4636962009-06-08 18:14:43 -0700744 if (dma->pgtbl == NULL)
745 goto error;
746
747 cp->setup_pgtbl(dev, dma);
748
749 return 0;
750
751error:
752 cnic_free_dma(dev, dma);
753 return -ENOMEM;
754}
755
Michael Chan86b53602009-10-10 13:46:57 +0000756static void cnic_free_context(struct cnic_dev *dev)
757{
758 struct cnic_local *cp = dev->cnic_priv;
759 int i;
760
761 for (i = 0; i < cp->ctx_blks; i++) {
762 if (cp->ctx_arr[i].ctx) {
Michael Chan3248e162009-12-02 15:15:39 +0000763 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
764 cp->ctx_arr[i].ctx,
765 cp->ctx_arr[i].mapping);
Michael Chan86b53602009-10-10 13:46:57 +0000766 cp->ctx_arr[i].ctx = NULL;
767 }
768 }
769}
770
Michael Chana4636962009-06-08 18:14:43 -0700771static void cnic_free_resc(struct cnic_dev *dev)
772{
773 struct cnic_local *cp = dev->cnic_priv;
774 int i = 0;
775
776 if (cp->cnic_uinfo) {
Michael Chana4636962009-06-08 18:14:43 -0700777 while (cp->uio_dev != -1 && i < 15) {
778 msleep(100);
779 i++;
780 }
781 uio_unregister_device(cp->cnic_uinfo);
782 kfree(cp->cnic_uinfo);
783 cp->cnic_uinfo = NULL;
784 }
785
786 if (cp->l2_buf) {
Michael Chan3248e162009-12-02 15:15:39 +0000787 dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size,
788 cp->l2_buf, cp->l2_buf_map);
Michael Chana4636962009-06-08 18:14:43 -0700789 cp->l2_buf = NULL;
790 }
791
792 if (cp->l2_ring) {
Michael Chan3248e162009-12-02 15:15:39 +0000793 dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size,
794 cp->l2_ring, cp->l2_ring_map);
Michael Chana4636962009-06-08 18:14:43 -0700795 cp->l2_ring = NULL;
796 }
797
Michael Chan86b53602009-10-10 13:46:57 +0000798 cnic_free_context(dev);
Michael Chana4636962009-06-08 18:14:43 -0700799 kfree(cp->ctx_arr);
800 cp->ctx_arr = NULL;
801 cp->ctx_blks = 0;
802
803 cnic_free_dma(dev, &cp->gbl_buf_info);
804 cnic_free_dma(dev, &cp->conn_buf_info);
805 cnic_free_dma(dev, &cp->kwq_info);
Michael Chan71034ba2009-10-10 13:46:59 +0000806 cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chana4636962009-06-08 18:14:43 -0700807 cnic_free_dma(dev, &cp->kcq_info);
808 kfree(cp->iscsi_tbl);
809 cp->iscsi_tbl = NULL;
810 kfree(cp->ctx_tbl);
811 cp->ctx_tbl = NULL;
812
813 cnic_free_id_tbl(&cp->cid_tbl);
814}
815
816static int cnic_alloc_context(struct cnic_dev *dev)
817{
818 struct cnic_local *cp = dev->cnic_priv;
819
820 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
821 int i, k, arr_size;
822
823 cp->ctx_blk_size = BCM_PAGE_SIZE;
824 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
825 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
826 sizeof(struct cnic_ctx);
827 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
828 if (cp->ctx_arr == NULL)
829 return -ENOMEM;
830
831 k = 0;
832 for (i = 0; i < 2; i++) {
833 u32 j, reg, off, lo, hi;
834
835 if (i == 0)
836 off = BNX2_PG_CTX_MAP;
837 else
838 off = BNX2_ISCSI_CTX_MAP;
839
840 reg = cnic_reg_rd_ind(dev, off);
841 lo = reg >> 16;
842 hi = reg & 0xffff;
843 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
844 cp->ctx_arr[k].cid = j;
845 }
846
847 cp->ctx_blks = k;
848 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
849 cp->ctx_blks = 0;
850 return -ENOMEM;
851 }
852
853 for (i = 0; i < cp->ctx_blks; i++) {
854 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +0000855 dma_alloc_coherent(&dev->pcidev->dev,
856 BCM_PAGE_SIZE,
857 &cp->ctx_arr[i].mapping,
858 GFP_KERNEL);
Michael Chana4636962009-06-08 18:14:43 -0700859 if (cp->ctx_arr[i].ctx == NULL)
860 return -ENOMEM;
861 }
862 }
863 return 0;
864}
865
Michael Chanec0248e2009-08-26 09:49:22 +0000866static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
867{
868 struct cnic_local *cp = dev->cnic_priv;
869
870 cp->l2_ring_size = pages * BCM_PAGE_SIZE;
Michael Chan3248e162009-12-02 15:15:39 +0000871 cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size,
872 &cp->l2_ring_map,
873 GFP_KERNEL | __GFP_COMP);
Michael Chanec0248e2009-08-26 09:49:22 +0000874 if (!cp->l2_ring)
875 return -ENOMEM;
876
877 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
878 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
Michael Chan3248e162009-12-02 15:15:39 +0000879 cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size,
880 &cp->l2_buf_map,
881 GFP_KERNEL | __GFP_COMP);
Michael Chanec0248e2009-08-26 09:49:22 +0000882 if (!cp->l2_buf)
883 return -ENOMEM;
884
885 return 0;
886}
887
Michael Chan5e9b2db2009-08-26 09:49:23 +0000888static int cnic_alloc_uio(struct cnic_dev *dev) {
889 struct cnic_local *cp = dev->cnic_priv;
890 struct uio_info *uinfo;
891 int ret;
892
893 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
894 if (!uinfo)
895 return -ENOMEM;
896
897 uinfo->mem[0].addr = dev->netdev->base_addr;
898 uinfo->mem[0].internal_addr = dev->regview;
899 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
900 uinfo->mem[0].memtype = UIO_MEM_PHYS;
901
Michael Chan5e9b2db2009-08-26 09:49:23 +0000902 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
Michael Chan86b53602009-10-10 13:46:57 +0000903 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
Michael Chan5e9b2db2009-08-26 09:49:23 +0000904 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
905 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
906 else
907 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
908
909 uinfo->name = "bnx2_cnic";
Michael Chan71034ba2009-10-10 13:46:59 +0000910 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
911 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
912 PAGE_MASK;
913 uinfo->mem[1].size = sizeof(struct host_def_status_block);
914
915 uinfo->name = "bnx2x_cnic";
Michael Chan5e9b2db2009-08-26 09:49:23 +0000916 }
917
918 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
919
920 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
921 uinfo->mem[2].size = cp->l2_ring_size;
922 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
923
924 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
925 uinfo->mem[3].size = cp->l2_buf_size;
926 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
927
928 uinfo->version = CNIC_MODULE_VERSION;
929 uinfo->irq = UIO_IRQ_CUSTOM;
930
931 uinfo->open = cnic_uio_open;
932 uinfo->release = cnic_uio_close;
933
934 uinfo->priv = dev;
935
936 ret = uio_register_device(&dev->pcidev->dev, uinfo);
937 if (ret) {
938 kfree(uinfo);
939 return ret;
940 }
941
942 cp->cnic_uinfo = uinfo;
943 return 0;
944}
945
Michael Chana4636962009-06-08 18:14:43 -0700946static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
947{
948 struct cnic_local *cp = dev->cnic_priv;
Michael Chana4636962009-06-08 18:14:43 -0700949 int ret;
950
951 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
952 if (ret)
953 goto error;
954 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
955
956 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
957 if (ret)
958 goto error;
959 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
960
961 ret = cnic_alloc_context(dev);
962 if (ret)
963 goto error;
964
Michael Chanec0248e2009-08-26 09:49:22 +0000965 ret = cnic_alloc_l2_rings(dev, 2);
966 if (ret)
Michael Chana4636962009-06-08 18:14:43 -0700967 goto error;
968
Michael Chan5e9b2db2009-08-26 09:49:23 +0000969 ret = cnic_alloc_uio(dev);
970 if (ret)
Michael Chana4636962009-06-08 18:14:43 -0700971 goto error;
972
Michael Chana4636962009-06-08 18:14:43 -0700973 return 0;
974
975error:
976 cnic_free_resc(dev);
977 return ret;
978}
979
Michael Chan71034ba2009-10-10 13:46:59 +0000980static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
981{
982 struct cnic_local *cp = dev->cnic_priv;
983 struct cnic_eth_dev *ethdev = cp->ethdev;
984 int ctx_blk_size = cp->ethdev->ctx_blk_size;
985 int total_mem, blks, i, cid_space;
986
987 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
988 return -EINVAL;
989
990 cid_space = MAX_ISCSI_TBL_SZ +
991 (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
992
993 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
994 blks = total_mem / ctx_blk_size;
995 if (total_mem % ctx_blk_size)
996 blks++;
997
998 if (blks > cp->ethdev->ctx_tbl_len)
999 return -ENOMEM;
1000
1001 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
1002 if (cp->ctx_arr == NULL)
1003 return -ENOMEM;
1004
1005 cp->ctx_blks = blks;
1006 cp->ctx_blk_size = ctx_blk_size;
1007 if (BNX2X_CHIP_IS_E1H(cp->chip_id))
1008 cp->ctx_align = 0;
1009 else
1010 cp->ctx_align = ctx_blk_size;
1011
1012 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1013
1014 for (i = 0; i < blks; i++) {
1015 cp->ctx_arr[i].ctx =
Michael Chan3248e162009-12-02 15:15:39 +00001016 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1017 &cp->ctx_arr[i].mapping,
1018 GFP_KERNEL);
Michael Chan71034ba2009-10-10 13:46:59 +00001019 if (cp->ctx_arr[i].ctx == NULL)
1020 return -ENOMEM;
1021
1022 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1023 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1024 cnic_free_context(dev);
1025 cp->ctx_blk_size += cp->ctx_align;
1026 i = -1;
1027 continue;
1028 }
1029 }
1030 }
1031 return 0;
1032}
1033
1034static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1035{
1036 struct cnic_local *cp = dev->cnic_priv;
1037 int i, j, n, ret, pages;
1038 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1039
1040 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1041 GFP_KERNEL);
1042 if (!cp->iscsi_tbl)
1043 goto error;
1044
1045 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1046 MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
1047 if (!cp->ctx_tbl)
1048 goto error;
1049
1050 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1051 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1052 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1053 }
1054
1055 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
1056 PAGE_SIZE;
1057
1058 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1059 if (ret)
1060 return -ENOMEM;
1061
1062 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1063 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1064 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1065
1066 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1067 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1068 off;
1069
1070 if ((i % n) == (n - 1))
1071 j++;
1072 }
1073
1074 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
1075 if (ret)
1076 goto error;
1077 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1078
1079 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1080 struct bnx2x_bd_chain_next *next =
1081 (struct bnx2x_bd_chain_next *)
1082 &cp->kcq[i][MAX_KCQE_CNT];
1083 int j = i + 1;
1084
1085 if (j >= KCQ_PAGE_CNT)
1086 j = 0;
1087 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1088 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1089 }
1090
1091 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1092 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1093 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1094 if (ret)
1095 goto error;
1096
1097 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1098 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1099 if (ret)
1100 goto error;
1101
1102 ret = cnic_alloc_bnx2x_context(dev);
1103 if (ret)
1104 goto error;
1105
1106 cp->bnx2x_status_blk = cp->status_blk;
1107 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1108
Michael Chan4e9c4fd2009-12-10 15:40:58 +00001109 memset(cp->bnx2x_status_blk, 0, sizeof(struct host_status_block));
1110
Michael Chan71034ba2009-10-10 13:46:59 +00001111 cp->l2_rx_ring_size = 15;
1112
1113 ret = cnic_alloc_l2_rings(dev, 4);
1114 if (ret)
1115 goto error;
1116
1117 ret = cnic_alloc_uio(dev);
1118 if (ret)
1119 goto error;
1120
1121 return 0;
1122
1123error:
1124 cnic_free_resc(dev);
1125 return -ENOMEM;
1126}
1127
Michael Chana4636962009-06-08 18:14:43 -07001128static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1129{
1130 return cp->max_kwq_idx -
1131 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1132}
1133
1134static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1135 u32 num_wqes)
1136{
1137 struct cnic_local *cp = dev->cnic_priv;
1138 struct kwqe *prod_qe;
1139 u16 prod, sw_prod, i;
1140
1141 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1142 return -EAGAIN; /* bnx2 is down */
1143
1144 spin_lock_bh(&cp->cnic_ulp_lock);
1145 if (num_wqes > cnic_kwq_avail(cp) &&
1146 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
1147 spin_unlock_bh(&cp->cnic_ulp_lock);
1148 return -EAGAIN;
1149 }
1150
1151 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
1152
1153 prod = cp->kwq_prod_idx;
1154 sw_prod = prod & MAX_KWQ_IDX;
1155 for (i = 0; i < num_wqes; i++) {
1156 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1157 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1158 prod++;
1159 sw_prod = prod & MAX_KWQ_IDX;
1160 }
1161 cp->kwq_prod_idx = prod;
1162
1163 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1164
1165 spin_unlock_bh(&cp->cnic_ulp_lock);
1166 return 0;
1167}
1168
Michael Chan71034ba2009-10-10 13:46:59 +00001169static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1170 union l5cm_specific_data *l5_data)
1171{
1172 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1173 dma_addr_t map;
1174
1175 map = ctx->kwqe_data_mapping;
1176 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1177 l5_data->phy_address.hi = (u64) map >> 32;
1178 return ctx->kwqe_data;
1179}
1180
1181static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1182 u32 type, union l5cm_specific_data *l5_data)
1183{
1184 struct cnic_local *cp = dev->cnic_priv;
1185 struct l5cm_spe kwqe;
1186 struct kwqe_16 *kwq[1];
1187 int ret;
1188
1189 kwqe.hdr.conn_and_cmd_data =
1190 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1191 BNX2X_HW_CID(cid, cp->func)));
1192 kwqe.hdr.type = cpu_to_le16(type);
1193 kwqe.hdr.reserved = 0;
1194 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1195 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1196
1197 kwq[0] = (struct kwqe_16 *) &kwqe;
1198
1199 spin_lock_bh(&cp->cnic_ulp_lock);
1200 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1201 spin_unlock_bh(&cp->cnic_ulp_lock);
1202
1203 if (ret == 1)
1204 return 0;
1205
1206 return -EBUSY;
1207}
1208
1209static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1210 struct kcqe *cqes[], u32 num_cqes)
1211{
1212 struct cnic_local *cp = dev->cnic_priv;
1213 struct cnic_ulp_ops *ulp_ops;
1214
1215 rcu_read_lock();
1216 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1217 if (likely(ulp_ops)) {
1218 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1219 cqes, num_cqes);
1220 }
1221 rcu_read_unlock();
1222}
1223
1224static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1225{
1226 struct cnic_local *cp = dev->cnic_priv;
1227 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1228 int func = cp->func, pages;
1229 int hq_bds;
1230
1231 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1232 cp->num_ccells = req1->num_ccells_per_conn;
1233 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1234 cp->num_iscsi_tasks;
1235 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1236 BNX2X_ISCSI_R2TQE_SIZE;
1237 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1238 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1239 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1240 cp->num_cqs = req1->num_cqs;
1241
1242 if (!dev->max_iscsi_conn)
1243 return 0;
1244
1245 /* init Tstorm RAM */
1246 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
1247 req1->rq_num_wqes);
1248 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1249 PAGE_SIZE);
1250 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1251 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1252 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1253 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1254 req1->num_tasks_per_conn);
1255
1256 /* init Ustorm RAM */
1257 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1258 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
1259 req1->rq_buffer_size);
1260 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1261 PAGE_SIZE);
1262 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1263 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1264 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1265 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1266 req1->num_tasks_per_conn);
1267 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
1268 req1->rq_num_wqes);
1269 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
1270 req1->cq_num_wqes);
1271 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1272 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1273
1274 /* init Xstorm RAM */
1275 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1276 PAGE_SIZE);
1277 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1278 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1279 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1280 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1281 req1->num_tasks_per_conn);
1282 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1283 hq_bds);
1284 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
1285 req1->num_tasks_per_conn);
1286 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1287 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1288
1289 /* init Cstorm RAM */
1290 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1291 PAGE_SIZE);
1292 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1293 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1294 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1295 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1296 req1->num_tasks_per_conn);
1297 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
1298 req1->cq_num_wqes);
1299 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1300 hq_bds);
1301
1302 return 0;
1303}
1304
1305static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1306{
1307 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1308 struct cnic_local *cp = dev->cnic_priv;
1309 int func = cp->func;
1310 struct iscsi_kcqe kcqe;
1311 struct kcqe *cqes[1];
1312
1313 memset(&kcqe, 0, sizeof(kcqe));
1314 if (!dev->max_iscsi_conn) {
1315 kcqe.completion_status =
1316 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1317 goto done;
1318 }
1319
1320 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1321 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1322 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1323 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1324 req2->error_bit_map[1]);
1325
1326 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1327 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1328 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1329 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1330 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1331 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1332 req2->error_bit_map[1]);
1333
1334 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1335 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1336
1337 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1338
1339done:
1340 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1341 cqes[0] = (struct kcqe *) &kcqe;
1342 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1343
1344 return 0;
1345}
1346
1347static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1348{
1349 struct cnic_local *cp = dev->cnic_priv;
1350 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1351
1352 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1353 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1354
1355 cnic_free_dma(dev, &iscsi->hq_info);
1356 cnic_free_dma(dev, &iscsi->r2tq_info);
1357 cnic_free_dma(dev, &iscsi->task_array_info);
1358 }
1359 cnic_free_id(&cp->cid_tbl, ctx->cid);
1360 ctx->cid = 0;
1361}
1362
1363static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1364{
1365 u32 cid;
1366 int ret, pages;
1367 struct cnic_local *cp = dev->cnic_priv;
1368 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1369 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1370
1371 cid = cnic_alloc_new_id(&cp->cid_tbl);
1372 if (cid == -1) {
1373 ret = -ENOMEM;
1374 goto error;
1375 }
1376
1377 ctx->cid = cid;
1378 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1379
1380 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1381 if (ret)
1382 goto error;
1383
1384 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1385 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1386 if (ret)
1387 goto error;
1388
1389 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1390 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1391 if (ret)
1392 goto error;
1393
1394 return 0;
1395
1396error:
1397 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1398 return ret;
1399}
1400
1401static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1402 struct regpair *ctx_addr)
1403{
1404 struct cnic_local *cp = dev->cnic_priv;
1405 struct cnic_eth_dev *ethdev = cp->ethdev;
1406 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1407 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1408 unsigned long align_off = 0;
1409 dma_addr_t ctx_map;
1410 void *ctx;
1411
1412 if (cp->ctx_align) {
1413 unsigned long mask = cp->ctx_align - 1;
1414
1415 if (cp->ctx_arr[blk].mapping & mask)
1416 align_off = cp->ctx_align -
1417 (cp->ctx_arr[blk].mapping & mask);
1418 }
1419 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1420 (off * BNX2X_CONTEXT_MEM_SIZE);
1421 ctx = cp->ctx_arr[blk].ctx + align_off +
1422 (off * BNX2X_CONTEXT_MEM_SIZE);
1423 if (init)
1424 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1425
1426 ctx_addr->lo = ctx_map & 0xffffffff;
1427 ctx_addr->hi = (u64) ctx_map >> 32;
1428 return ctx;
1429}
1430
1431static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1432 u32 num)
1433{
1434 struct cnic_local *cp = dev->cnic_priv;
1435 struct iscsi_kwqe_conn_offload1 *req1 =
1436 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1437 struct iscsi_kwqe_conn_offload2 *req2 =
1438 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1439 struct iscsi_kwqe_conn_offload3 *req3;
1440 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1441 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1442 u32 cid = ctx->cid;
1443 u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
1444 struct iscsi_context *ictx;
1445 struct regpair context_addr;
1446 int i, j, n = 2, n_max;
1447
1448 ctx->ctx_flags = 0;
1449 if (!req2->num_additional_wqes)
1450 return -EINVAL;
1451
1452 n_max = req2->num_additional_wqes + 2;
1453
1454 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1455 if (ictx == NULL)
1456 return -ENOMEM;
1457
1458 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1459
1460 ictx->xstorm_ag_context.hq_prod = 1;
1461
1462 ictx->xstorm_st_context.iscsi.first_burst_length =
1463 ISCSI_DEF_FIRST_BURST_LEN;
1464 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1465 ISCSI_DEF_MAX_RECV_SEG_LEN;
1466 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1467 req1->sq_page_table_addr_lo;
1468 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1469 req1->sq_page_table_addr_hi;
1470 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1471 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1472 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1473 iscsi->hq_info.pgtbl_map & 0xffffffff;
1474 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1475 (u64) iscsi->hq_info.pgtbl_map >> 32;
1476 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1477 iscsi->hq_info.pgtbl[0];
1478 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1479 iscsi->hq_info.pgtbl[1];
1480 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1481 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1482 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1483 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1484 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1485 iscsi->r2tq_info.pgtbl[0];
1486 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1487 iscsi->r2tq_info.pgtbl[1];
1488 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1489 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1490 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1491 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1492 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1493 BNX2X_ISCSI_PBL_NOT_CACHED;
1494 ictx->xstorm_st_context.iscsi.flags.flags |=
1495 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1496 ictx->xstorm_st_context.iscsi.flags.flags |=
1497 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1498
1499 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1500 /* TSTORM requires the base address of RQ DB & not PTE */
1501 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1502 req2->rq_page_table_addr_lo & PAGE_MASK;
1503 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1504 req2->rq_page_table_addr_hi;
1505 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1506 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1507 ictx->tstorm_st_context.tcp.flags2 |=
1508 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1509
1510 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1511
1512 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
Michael Chan15971c32009-12-02 15:15:38 +00001513 req2->rq_page_table_addr_lo;
Michael Chan71034ba2009-10-10 13:46:59 +00001514 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
Michael Chan15971c32009-12-02 15:15:38 +00001515 req2->rq_page_table_addr_hi;
Michael Chan71034ba2009-10-10 13:46:59 +00001516 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1517 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1518 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1519 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1520 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1521 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1522 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1523 iscsi->r2tq_info.pgtbl[0];
1524 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1525 iscsi->r2tq_info.pgtbl[1];
1526 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1527 req1->cq_page_table_addr_lo;
1528 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1529 req1->cq_page_table_addr_hi;
1530 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1531 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1532 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1533 ictx->ustorm_st_context.task_pbe_cache_index =
1534 BNX2X_ISCSI_PBL_NOT_CACHED;
1535 ictx->ustorm_st_context.task_pdu_cache_index =
1536 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1537
1538 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1539 if (j == 3) {
1540 if (n >= n_max)
1541 break;
1542 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1543 j = 0;
1544 }
1545 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1546 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1547 req3->qp_first_pte[j].hi;
1548 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1549 req3->qp_first_pte[j].lo;
1550 }
1551
1552 ictx->ustorm_st_context.task_pbl_base.lo =
1553 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1554 ictx->ustorm_st_context.task_pbl_base.hi =
1555 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1556 ictx->ustorm_st_context.tce_phy_addr.lo =
1557 iscsi->task_array_info.pgtbl[0];
1558 ictx->ustorm_st_context.tce_phy_addr.hi =
1559 iscsi->task_array_info.pgtbl[1];
1560 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1561 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1562 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1563 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1564 ISCSI_DEF_MAX_BURST_LEN;
1565 ictx->ustorm_st_context.negotiated_rx |=
1566 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1567 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1568
1569 ictx->cstorm_st_context.hq_pbl_base.lo =
1570 iscsi->hq_info.pgtbl_map & 0xffffffff;
1571 ictx->cstorm_st_context.hq_pbl_base.hi =
1572 (u64) iscsi->hq_info.pgtbl_map >> 32;
1573 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1574 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1575 ictx->cstorm_st_context.task_pbl_base.lo =
1576 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1577 ictx->cstorm_st_context.task_pbl_base.hi =
1578 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1579 /* CSTORM and USTORM initialization is different, CSTORM requires
1580 * CQ DB base & not PTE addr */
1581 ictx->cstorm_st_context.cq_db_base.lo =
1582 req1->cq_page_table_addr_lo & PAGE_MASK;
1583 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1584 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1585 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1586 for (i = 0; i < cp->num_cqs; i++) {
1587 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1588 ISCSI_INITIAL_SN;
1589 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1590 ISCSI_INITIAL_SN;
1591 }
1592
1593 ictx->xstorm_ag_context.cdu_reserved =
1594 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1595 ISCSI_CONNECTION_TYPE);
1596 ictx->ustorm_ag_context.cdu_usage =
1597 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1598 ISCSI_CONNECTION_TYPE);
1599 return 0;
1600
1601}
1602
1603static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1604 u32 num, int *work)
1605{
1606 struct iscsi_kwqe_conn_offload1 *req1;
1607 struct iscsi_kwqe_conn_offload2 *req2;
1608 struct cnic_local *cp = dev->cnic_priv;
1609 struct iscsi_kcqe kcqe;
1610 struct kcqe *cqes[1];
1611 u32 l5_cid;
1612 int ret;
1613
1614 if (num < 2) {
1615 *work = num;
1616 return -EINVAL;
1617 }
1618
1619 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1620 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1621 if ((num - 2) < req2->num_additional_wqes) {
1622 *work = num;
1623 return -EINVAL;
1624 }
1625 *work = 2 + req2->num_additional_wqes;;
1626
1627 l5_cid = req1->iscsi_conn_id;
1628 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1629 return -EINVAL;
1630
1631 memset(&kcqe, 0, sizeof(kcqe));
1632 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1633 kcqe.iscsi_conn_id = l5_cid;
1634 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1635
1636 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1637 atomic_dec(&cp->iscsi_conn);
1638 ret = 0;
1639 goto done;
1640 }
1641 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1642 if (ret) {
1643 atomic_dec(&cp->iscsi_conn);
1644 ret = 0;
1645 goto done;
1646 }
1647 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1648 if (ret < 0) {
1649 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1650 atomic_dec(&cp->iscsi_conn);
1651 goto done;
1652 }
1653
1654 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1655 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
1656 cp->func);
1657
1658done:
1659 cqes[0] = (struct kcqe *) &kcqe;
1660 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1661 return ret;
1662}
1663
1664
1665static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1666{
1667 struct cnic_local *cp = dev->cnic_priv;
1668 struct iscsi_kwqe_conn_update *req =
1669 (struct iscsi_kwqe_conn_update *) kwqe;
1670 void *data;
1671 union l5cm_specific_data l5_data;
1672 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1673 int ret;
1674
1675 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1676 return -EINVAL;
1677
1678 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1679 if (!data)
1680 return -ENOMEM;
1681
1682 memcpy(data, kwqe, sizeof(struct kwqe));
1683
1684 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1685 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1686 return ret;
1687}
1688
1689static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1690{
1691 struct cnic_local *cp = dev->cnic_priv;
1692 struct iscsi_kwqe_conn_destroy *req =
1693 (struct iscsi_kwqe_conn_destroy *) kwqe;
1694 union l5cm_specific_data l5_data;
1695 u32 l5_cid = req->reserved0;
1696 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1697 int ret = 0;
1698 struct iscsi_kcqe kcqe;
1699 struct kcqe *cqes[1];
1700
1701 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
1702 goto skip_cfc_delete;
1703
1704 while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
1705 msleep(250);
1706
1707 init_waitqueue_head(&ctx->waitq);
1708 ctx->wait_cond = 0;
1709 memset(&l5_data, 0, sizeof(l5_data));
1710 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
1711 req->context_id,
1712 ETH_CONNECTION_TYPE |
1713 (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
1714 &l5_data);
1715 if (ret == 0)
1716 wait_event(ctx->waitq, ctx->wait_cond);
1717
1718skip_cfc_delete:
1719 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1720
1721 atomic_dec(&cp->iscsi_conn);
1722
1723 memset(&kcqe, 0, sizeof(kcqe));
1724 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1725 kcqe.iscsi_conn_id = l5_cid;
1726 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1727 kcqe.iscsi_conn_context_id = req->context_id;
1728
1729 cqes[0] = (struct kcqe *) &kcqe;
1730 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1731
1732 return ret;
1733}
1734
1735static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1736 struct l4_kwq_connect_req1 *kwqe1,
1737 struct l4_kwq_connect_req3 *kwqe3,
1738 struct l5cm_active_conn_buffer *conn_buf)
1739{
1740 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1741 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1742 &conn_buf->xstorm_conn_buffer;
1743 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1744 &conn_buf->tstorm_conn_buffer;
1745 struct regpair context_addr;
1746 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1747 struct in6_addr src_ip, dst_ip;
1748 int i;
1749 u32 *addrp;
1750
1751 addrp = (u32 *) &conn_addr->local_ip_addr;
1752 for (i = 0; i < 4; i++, addrp++)
1753 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1754
1755 addrp = (u32 *) &conn_addr->remote_ip_addr;
1756 for (i = 0; i < 4; i++, addrp++)
1757 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1758
1759 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1760
1761 xstorm_buf->context_addr.hi = context_addr.hi;
1762 xstorm_buf->context_addr.lo = context_addr.lo;
1763 xstorm_buf->mss = 0xffff;
1764 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1765 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1766 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1767 xstorm_buf->pseudo_header_checksum =
1768 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1769
1770 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1771 tstorm_buf->params |=
1772 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1773 if (kwqe3->ka_timeout) {
1774 tstorm_buf->ka_enable = 1;
1775 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1776 tstorm_buf->ka_interval = kwqe3->ka_interval;
1777 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1778 }
1779 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1780 tstorm_buf->snd_buf = kwqe3->snd_buf;
1781 tstorm_buf->max_rt_time = 0xffffffff;
1782}
1783
1784static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1785{
1786 struct cnic_local *cp = dev->cnic_priv;
1787 int func = CNIC_FUNC(cp);
1788 u8 *mac = dev->mac_addr;
1789
1790 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
1792 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1793 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
1794 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1795 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
1796 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1797 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
1798 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1799 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
1800 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1801 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
1802
1803 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1804 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
1805 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1806 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1807 mac[4]);
1808 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1809 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
1810 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1811 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1812 mac[2]);
1813 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1814 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
1815 mac[1]);
1816 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1817 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
1818 mac[0]);
1819}
1820
1821static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1822{
1823 struct cnic_local *cp = dev->cnic_priv;
1824 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1825 u16 tstorm_flags = 0;
1826
1827 if (tcp_ts) {
1828 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1829 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1830 }
1831
1832 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1833 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
1834
1835 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1836 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
1837}
1838
1839static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1840 u32 num, int *work)
1841{
1842 struct cnic_local *cp = dev->cnic_priv;
1843 struct l4_kwq_connect_req1 *kwqe1 =
1844 (struct l4_kwq_connect_req1 *) wqes[0];
1845 struct l4_kwq_connect_req3 *kwqe3;
1846 struct l5cm_active_conn_buffer *conn_buf;
1847 struct l5cm_conn_addr_params *conn_addr;
1848 union l5cm_specific_data l5_data;
1849 u32 l5_cid = kwqe1->pg_cid;
1850 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1851 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1852 int ret;
1853
1854 if (num < 2) {
1855 *work = num;
1856 return -EINVAL;
1857 }
1858
1859 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1860 *work = 3;
1861 else
1862 *work = 2;
1863
1864 if (num < *work) {
1865 *work = num;
1866 return -EINVAL;
1867 }
1868
1869 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
Joe Perchesddf79b22010-02-17 15:01:54 +00001870 netdev_err(dev->netdev, "conn_buf size too big\n");
Michael Chan71034ba2009-10-10 13:46:59 +00001871 return -ENOMEM;
1872 }
1873 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1874 if (!conn_buf)
1875 return -ENOMEM;
1876
1877 memset(conn_buf, 0, sizeof(*conn_buf));
1878
1879 conn_addr = &conn_buf->conn_addr_buf;
1880 conn_addr->remote_addr_0 = csk->ha[0];
1881 conn_addr->remote_addr_1 = csk->ha[1];
1882 conn_addr->remote_addr_2 = csk->ha[2];
1883 conn_addr->remote_addr_3 = csk->ha[3];
1884 conn_addr->remote_addr_4 = csk->ha[4];
1885 conn_addr->remote_addr_5 = csk->ha[5];
1886
1887 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
1888 struct l4_kwq_connect_req2 *kwqe2 =
1889 (struct l4_kwq_connect_req2 *) wqes[1];
1890
1891 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
1892 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
1893 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
1894
1895 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
1896 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
1897 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
1898 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
1899 }
1900 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
1901
1902 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
1903 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
1904 conn_addr->local_tcp_port = kwqe1->src_port;
1905 conn_addr->remote_tcp_port = kwqe1->dst_port;
1906
1907 conn_addr->pmtu = kwqe3->pmtu;
1908 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
1909
1910 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1911 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
1912
1913 cnic_bnx2x_set_tcp_timestamp(dev,
1914 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
1915
1916 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
1917 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1918 if (!ret)
1919 ctx->ctx_flags |= CTX_FL_OFFLD_START;
1920
1921 return ret;
1922}
1923
1924static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
1925{
1926 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
1927 union l5cm_specific_data l5_data;
1928 int ret;
1929
1930 memset(&l5_data, 0, sizeof(l5_data));
1931 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
1932 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1933 return ret;
1934}
1935
1936static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
1937{
1938 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
1939 union l5cm_specific_data l5_data;
1940 int ret;
1941
1942 memset(&l5_data, 0, sizeof(l5_data));
1943 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
1944 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1945 return ret;
1946}
1947static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1948{
1949 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
1950 struct l4_kcq kcqe;
1951 struct kcqe *cqes[1];
1952
1953 memset(&kcqe, 0, sizeof(kcqe));
1954 kcqe.pg_host_opaque = req->host_opaque;
1955 kcqe.pg_cid = req->host_opaque;
1956 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
1957 cqes[0] = (struct kcqe *) &kcqe;
1958 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1959 return 0;
1960}
1961
1962static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1963{
1964 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
1965 struct l4_kcq kcqe;
1966 struct kcqe *cqes[1];
1967
1968 memset(&kcqe, 0, sizeof(kcqe));
1969 kcqe.pg_host_opaque = req->pg_host_opaque;
1970 kcqe.pg_cid = req->pg_cid;
1971 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
1972 cqes[0] = (struct kcqe *) &kcqe;
1973 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1974 return 0;
1975}
1976
1977static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1978 u32 num_wqes)
1979{
1980 int i, work, ret;
1981 u32 opcode;
1982 struct kwqe *kwqe;
1983
1984 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1985 return -EAGAIN; /* bnx2 is down */
1986
1987 for (i = 0; i < num_wqes; ) {
1988 kwqe = wqes[i];
1989 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
1990 work = 1;
1991
1992 switch (opcode) {
1993 case ISCSI_KWQE_OPCODE_INIT1:
1994 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
1995 break;
1996 case ISCSI_KWQE_OPCODE_INIT2:
1997 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
1998 break;
1999 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2000 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2001 num_wqes - i, &work);
2002 break;
2003 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2004 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2005 break;
2006 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2007 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2008 break;
2009 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2010 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2011 &work);
2012 break;
2013 case L4_KWQE_OPCODE_VALUE_CLOSE:
2014 ret = cnic_bnx2x_close(dev, kwqe);
2015 break;
2016 case L4_KWQE_OPCODE_VALUE_RESET:
2017 ret = cnic_bnx2x_reset(dev, kwqe);
2018 break;
2019 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2020 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2021 break;
2022 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2023 ret = cnic_bnx2x_update_pg(dev, kwqe);
2024 break;
2025 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2026 ret = 0;
2027 break;
2028 default:
2029 ret = 0;
Joe Perchesddf79b22010-02-17 15:01:54 +00002030 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2031 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002032 break;
2033 }
2034 if (ret < 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00002035 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2036 opcode);
Michael Chan71034ba2009-10-10 13:46:59 +00002037 i += work;
2038 }
2039 return 0;
2040}
2041
Michael Chana4636962009-06-08 18:14:43 -07002042static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2043{
2044 struct cnic_local *cp = dev->cnic_priv;
2045 int i, j;
2046
2047 i = 0;
2048 j = 1;
2049 while (num_cqes) {
2050 struct cnic_ulp_ops *ulp_ops;
2051 int ulp_type;
2052 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2053 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
2054
2055 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2056 cnic_kwq_completion(dev, 1);
2057
2058 while (j < num_cqes) {
2059 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2060
2061 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
2062 break;
2063
2064 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2065 cnic_kwq_completion(dev, 1);
2066 j++;
2067 }
2068
2069 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2070 ulp_type = CNIC_ULP_RDMA;
2071 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2072 ulp_type = CNIC_ULP_ISCSI;
2073 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2074 ulp_type = CNIC_ULP_L4;
2075 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2076 goto end;
2077 else {
Joe Perchesddf79b22010-02-17 15:01:54 +00002078 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2079 kcqe_op_flag);
Michael Chana4636962009-06-08 18:14:43 -07002080 goto end;
2081 }
2082
2083 rcu_read_lock();
2084 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2085 if (likely(ulp_ops)) {
2086 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2087 cp->completed_kcq + i, j);
2088 }
2089 rcu_read_unlock();
2090end:
2091 num_cqes -= j;
2092 i += j;
2093 j = 1;
2094 }
2095 return;
2096}
2097
2098static u16 cnic_bnx2_next_idx(u16 idx)
2099{
2100 return idx + 1;
2101}
2102
2103static u16 cnic_bnx2_hw_idx(u16 idx)
2104{
2105 return idx;
2106}
2107
Michael Chan71034ba2009-10-10 13:46:59 +00002108static u16 cnic_bnx2x_next_idx(u16 idx)
2109{
2110 idx++;
2111 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2112 idx++;
2113
2114 return idx;
2115}
2116
2117static u16 cnic_bnx2x_hw_idx(u16 idx)
2118{
2119 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2120 idx++;
2121 return idx;
2122}
2123
Michael Chana4636962009-06-08 18:14:43 -07002124static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2125{
2126 struct cnic_local *cp = dev->cnic_priv;
2127 u16 i, ri, last;
2128 struct kcqe *kcqe;
2129 int kcqe_cnt = 0, last_cnt = 0;
2130
2131 i = ri = last = *sw_prod;
2132 ri &= MAX_KCQ_IDX;
2133
2134 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2135 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2136 cp->completed_kcq[kcqe_cnt++] = kcqe;
2137 i = cp->next_idx(i);
2138 ri = i & MAX_KCQ_IDX;
2139 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2140 last_cnt = kcqe_cnt;
2141 last = i;
2142 }
2143 }
2144
2145 *sw_prod = last;
2146 return last_cnt;
2147}
2148
Michael Chan86b53602009-10-10 13:46:57 +00002149static void cnic_chk_pkt_rings(struct cnic_local *cp)
Michael Chana4636962009-06-08 18:14:43 -07002150{
2151 u16 rx_cons = *cp->rx_cons_ptr;
2152 u16 tx_cons = *cp->tx_cons_ptr;
2153
2154 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2155 cp->tx_cons = tx_cons;
2156 cp->rx_cons = rx_cons;
Michael Chan71034ba2009-10-10 13:46:59 +00002157
Michael Chana4636962009-06-08 18:14:43 -07002158 uio_event_notify(cp->cnic_uinfo);
2159 }
2160}
2161
2162static int cnic_service_bnx2(void *data, void *status_blk)
2163{
2164 struct cnic_dev *dev = data;
2165 struct status_block *sblk = status_blk;
2166 struct cnic_local *cp = dev->cnic_priv;
2167 u32 status_idx = sblk->status_idx;
2168 u16 hw_prod, sw_prod;
2169 int kcqe_cnt;
2170
2171 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2172 return status_idx;
2173
2174 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2175
2176 hw_prod = sblk->status_completion_producer_index;
2177 sw_prod = cp->kcq_prod_idx;
2178 while (sw_prod != hw_prod) {
2179 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2180 if (kcqe_cnt == 0)
2181 goto done;
2182
2183 service_kcqes(dev, kcqe_cnt);
2184
2185 /* Tell compiler that status_blk fields can change. */
2186 barrier();
2187 if (status_idx != sblk->status_idx) {
2188 status_idx = sblk->status_idx;
2189 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2190 hw_prod = sblk->status_completion_producer_index;
2191 } else
2192 break;
2193 }
2194
2195done:
2196 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
2197
2198 cp->kcq_prod_idx = sw_prod;
2199
Michael Chan86b53602009-10-10 13:46:57 +00002200 cnic_chk_pkt_rings(cp);
Michael Chana4636962009-06-08 18:14:43 -07002201 return status_idx;
2202}
2203
2204static void cnic_service_bnx2_msix(unsigned long data)
2205{
2206 struct cnic_dev *dev = (struct cnic_dev *) data;
2207 struct cnic_local *cp = dev->cnic_priv;
2208 struct status_block_msix *status_blk = cp->bnx2_status_blk;
2209 u32 status_idx = status_blk->status_idx;
2210 u16 hw_prod, sw_prod;
2211 int kcqe_cnt;
2212
2213 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2214
2215 hw_prod = status_blk->status_completion_producer_index;
2216 sw_prod = cp->kcq_prod_idx;
2217 while (sw_prod != hw_prod) {
2218 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2219 if (kcqe_cnt == 0)
2220 goto done;
2221
2222 service_kcqes(dev, kcqe_cnt);
2223
2224 /* Tell compiler that status_blk fields can change. */
2225 barrier();
2226 if (status_idx != status_blk->status_idx) {
2227 status_idx = status_blk->status_idx;
2228 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2229 hw_prod = status_blk->status_completion_producer_index;
2230 } else
2231 break;
2232 }
2233
2234done:
2235 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
2236 cp->kcq_prod_idx = sw_prod;
2237
Michael Chan86b53602009-10-10 13:46:57 +00002238 cnic_chk_pkt_rings(cp);
Michael Chana4636962009-06-08 18:14:43 -07002239
2240 cp->last_status_idx = status_idx;
2241 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2242 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2243}
2244
2245static irqreturn_t cnic_irq(int irq, void *dev_instance)
2246{
2247 struct cnic_dev *dev = dev_instance;
2248 struct cnic_local *cp = dev->cnic_priv;
2249 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2250
2251 if (cp->ack_int)
2252 cp->ack_int(dev);
2253
2254 prefetch(cp->status_blk);
2255 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2256
2257 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2258 tasklet_schedule(&cp->cnic_irq_task);
2259
2260 return IRQ_HANDLED;
2261}
2262
Michael Chan71034ba2009-10-10 13:46:59 +00002263static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2264 u16 index, u8 op, u8 update)
2265{
2266 struct cnic_local *cp = dev->cnic_priv;
2267 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2268 COMMAND_REG_INT_ACK);
2269 struct igu_ack_register igu_ack;
2270
2271 igu_ack.status_block_index = index;
2272 igu_ack.sb_id_and_flags =
2273 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2274 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2275 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2276 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2277
2278 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2279}
2280
2281static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2282{
2283 struct cnic_local *cp = dev->cnic_priv;
2284
2285 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
2286 IGU_INT_DISABLE, 0);
2287}
2288
2289static void cnic_service_bnx2x_bh(unsigned long data)
2290{
2291 struct cnic_dev *dev = (struct cnic_dev *) data;
2292 struct cnic_local *cp = dev->cnic_priv;
2293 u16 hw_prod, sw_prod;
2294 struct cstorm_status_block_c *sblk =
2295 &cp->bnx2x_status_blk->c_status_block;
2296 u32 status_idx = sblk->status_block_index;
2297 int kcqe_cnt;
2298
2299 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2300 return;
2301
2302 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2303 hw_prod = cp->hw_idx(hw_prod);
2304 sw_prod = cp->kcq_prod_idx;
2305 while (sw_prod != hw_prod) {
2306 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2307 if (kcqe_cnt == 0)
2308 goto done;
2309
2310 service_kcqes(dev, kcqe_cnt);
2311
2312 /* Tell compiler that sblk fields can change. */
2313 barrier();
2314 if (status_idx == sblk->status_block_index)
2315 break;
2316
2317 status_idx = sblk->status_block_index;
2318 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2319 hw_prod = cp->hw_idx(hw_prod);
2320 }
2321
2322done:
2323 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
2324 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2325 status_idx, IGU_INT_ENABLE, 1);
2326
2327 cp->kcq_prod_idx = sw_prod;
2328 return;
2329}
2330
2331static int cnic_service_bnx2x(void *data, void *status_blk)
2332{
2333 struct cnic_dev *dev = data;
2334 struct cnic_local *cp = dev->cnic_priv;
2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2336
2337 prefetch(cp->status_blk);
2338 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2339
2340 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2341 tasklet_schedule(&cp->cnic_irq_task);
2342
2343 cnic_chk_pkt_rings(cp);
2344
2345 return 0;
2346}
2347
Michael Chana4636962009-06-08 18:14:43 -07002348static void cnic_ulp_stop(struct cnic_dev *dev)
2349{
2350 struct cnic_local *cp = dev->cnic_priv;
2351 int if_type;
2352
Michael Chan6d7760a2009-07-27 11:25:58 -07002353 if (cp->cnic_uinfo)
2354 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2355
Michael Chana4636962009-06-08 18:14:43 -07002356 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2357 struct cnic_ulp_ops *ulp_ops;
2358
Michael Chan681dbd72009-08-14 15:49:46 +00002359 mutex_lock(&cnic_lock);
2360 ulp_ops = cp->ulp_ops[if_type];
2361 if (!ulp_ops) {
2362 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002363 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002364 }
2365 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2366 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002367
2368 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2369 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002370
2371 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002372 }
Michael Chana4636962009-06-08 18:14:43 -07002373}
2374
2375static void cnic_ulp_start(struct cnic_dev *dev)
2376{
2377 struct cnic_local *cp = dev->cnic_priv;
2378 int if_type;
2379
Michael Chana4636962009-06-08 18:14:43 -07002380 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2381 struct cnic_ulp_ops *ulp_ops;
2382
Michael Chan681dbd72009-08-14 15:49:46 +00002383 mutex_lock(&cnic_lock);
2384 ulp_ops = cp->ulp_ops[if_type];
2385 if (!ulp_ops || !ulp_ops->cnic_start) {
2386 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002387 continue;
Michael Chan681dbd72009-08-14 15:49:46 +00002388 }
2389 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2390 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002391
2392 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2393 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
Michael Chan681dbd72009-08-14 15:49:46 +00002394
2395 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
Michael Chana4636962009-06-08 18:14:43 -07002396 }
Michael Chana4636962009-06-08 18:14:43 -07002397}
2398
2399static int cnic_ctl(void *data, struct cnic_ctl_info *info)
2400{
2401 struct cnic_dev *dev = data;
2402
2403 switch (info->cmd) {
2404 case CNIC_CTL_STOP_CMD:
2405 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07002406
2407 cnic_ulp_stop(dev);
2408 cnic_stop_hw(dev);
2409
Michael Chana4636962009-06-08 18:14:43 -07002410 cnic_put(dev);
2411 break;
2412 case CNIC_CTL_START_CMD:
2413 cnic_hold(dev);
Michael Chana4636962009-06-08 18:14:43 -07002414
2415 if (!cnic_start_hw(dev))
2416 cnic_ulp_start(dev);
2417
Michael Chana4636962009-06-08 18:14:43 -07002418 cnic_put(dev);
2419 break;
Michael Chan71034ba2009-10-10 13:46:59 +00002420 case CNIC_CTL_COMPLETION_CMD: {
2421 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2422 u32 l5_cid;
2423 struct cnic_local *cp = dev->cnic_priv;
2424
2425 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2426 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2427
2428 ctx->wait_cond = 1;
2429 wake_up(&ctx->waitq);
2430 }
2431 break;
2432 }
Michael Chana4636962009-06-08 18:14:43 -07002433 default:
2434 return -EINVAL;
2435 }
2436 return 0;
2437}
2438
2439static void cnic_ulp_init(struct cnic_dev *dev)
2440{
2441 int i;
2442 struct cnic_local *cp = dev->cnic_priv;
2443
Michael Chana4636962009-06-08 18:14:43 -07002444 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2445 struct cnic_ulp_ops *ulp_ops;
2446
Michael Chan7fc1ece2009-08-14 15:49:47 +00002447 mutex_lock(&cnic_lock);
2448 ulp_ops = cnic_ulp_tbl[i];
2449 if (!ulp_ops || !ulp_ops->cnic_init) {
2450 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002451 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00002452 }
2453 ulp_get(ulp_ops);
2454 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002455
2456 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2457 ulp_ops->cnic_init(dev);
2458
Michael Chan7fc1ece2009-08-14 15:49:47 +00002459 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07002460 }
Michael Chana4636962009-06-08 18:14:43 -07002461}
2462
2463static void cnic_ulp_exit(struct cnic_dev *dev)
2464{
2465 int i;
2466 struct cnic_local *cp = dev->cnic_priv;
2467
Michael Chana4636962009-06-08 18:14:43 -07002468 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
2469 struct cnic_ulp_ops *ulp_ops;
2470
Michael Chan7fc1ece2009-08-14 15:49:47 +00002471 mutex_lock(&cnic_lock);
2472 ulp_ops = cnic_ulp_tbl[i];
2473 if (!ulp_ops || !ulp_ops->cnic_exit) {
2474 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002475 continue;
Michael Chan7fc1ece2009-08-14 15:49:47 +00002476 }
2477 ulp_get(ulp_ops);
2478 mutex_unlock(&cnic_lock);
Michael Chana4636962009-06-08 18:14:43 -07002479
2480 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
2481 ulp_ops->cnic_exit(dev);
2482
Michael Chan7fc1ece2009-08-14 15:49:47 +00002483 ulp_put(ulp_ops);
Michael Chana4636962009-06-08 18:14:43 -07002484 }
Michael Chana4636962009-06-08 18:14:43 -07002485}
2486
2487static int cnic_cm_offload_pg(struct cnic_sock *csk)
2488{
2489 struct cnic_dev *dev = csk->dev;
2490 struct l4_kwq_offload_pg *l4kwqe;
2491 struct kwqe *wqes[1];
2492
2493 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
2494 memset(l4kwqe, 0, sizeof(*l4kwqe));
2495 wqes[0] = (struct kwqe *) l4kwqe;
2496
2497 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
2498 l4kwqe->flags =
2499 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
2500 l4kwqe->l2hdr_nbytes = ETH_HLEN;
2501
2502 l4kwqe->da0 = csk->ha[0];
2503 l4kwqe->da1 = csk->ha[1];
2504 l4kwqe->da2 = csk->ha[2];
2505 l4kwqe->da3 = csk->ha[3];
2506 l4kwqe->da4 = csk->ha[4];
2507 l4kwqe->da5 = csk->ha[5];
2508
2509 l4kwqe->sa0 = dev->mac_addr[0];
2510 l4kwqe->sa1 = dev->mac_addr[1];
2511 l4kwqe->sa2 = dev->mac_addr[2];
2512 l4kwqe->sa3 = dev->mac_addr[3];
2513 l4kwqe->sa4 = dev->mac_addr[4];
2514 l4kwqe->sa5 = dev->mac_addr[5];
2515
2516 l4kwqe->etype = ETH_P_IP;
Eddie Waia9736c02010-02-24 14:42:04 +00002517 l4kwqe->ipid_start = DEF_IPID_START;
Michael Chana4636962009-06-08 18:14:43 -07002518 l4kwqe->host_opaque = csk->l5_cid;
2519
2520 if (csk->vlan_id) {
2521 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
2522 l4kwqe->vlan_tag = csk->vlan_id;
2523 l4kwqe->l2hdr_nbytes += 4;
2524 }
2525
2526 return dev->submit_kwqes(dev, wqes, 1);
2527}
2528
2529static int cnic_cm_update_pg(struct cnic_sock *csk)
2530{
2531 struct cnic_dev *dev = csk->dev;
2532 struct l4_kwq_update_pg *l4kwqe;
2533 struct kwqe *wqes[1];
2534
2535 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
2536 memset(l4kwqe, 0, sizeof(*l4kwqe));
2537 wqes[0] = (struct kwqe *) l4kwqe;
2538
2539 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
2540 l4kwqe->flags =
2541 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
2542 l4kwqe->pg_cid = csk->pg_cid;
2543
2544 l4kwqe->da0 = csk->ha[0];
2545 l4kwqe->da1 = csk->ha[1];
2546 l4kwqe->da2 = csk->ha[2];
2547 l4kwqe->da3 = csk->ha[3];
2548 l4kwqe->da4 = csk->ha[4];
2549 l4kwqe->da5 = csk->ha[5];
2550
2551 l4kwqe->pg_host_opaque = csk->l5_cid;
2552 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
2553
2554 return dev->submit_kwqes(dev, wqes, 1);
2555}
2556
2557static int cnic_cm_upload_pg(struct cnic_sock *csk)
2558{
2559 struct cnic_dev *dev = csk->dev;
2560 struct l4_kwq_upload *l4kwqe;
2561 struct kwqe *wqes[1];
2562
2563 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
2564 memset(l4kwqe, 0, sizeof(*l4kwqe));
2565 wqes[0] = (struct kwqe *) l4kwqe;
2566
2567 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
2568 l4kwqe->flags =
2569 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
2570 l4kwqe->cid = csk->pg_cid;
2571
2572 return dev->submit_kwqes(dev, wqes, 1);
2573}
2574
2575static int cnic_cm_conn_req(struct cnic_sock *csk)
2576{
2577 struct cnic_dev *dev = csk->dev;
2578 struct l4_kwq_connect_req1 *l4kwqe1;
2579 struct l4_kwq_connect_req2 *l4kwqe2;
2580 struct l4_kwq_connect_req3 *l4kwqe3;
2581 struct kwqe *wqes[3];
2582 u8 tcp_flags = 0;
2583 int num_wqes = 2;
2584
2585 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
2586 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
2587 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
2588 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
2589 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
2590 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
2591
2592 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
2593 l4kwqe3->flags =
2594 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
2595 l4kwqe3->ka_timeout = csk->ka_timeout;
2596 l4kwqe3->ka_interval = csk->ka_interval;
2597 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
2598 l4kwqe3->tos = csk->tos;
2599 l4kwqe3->ttl = csk->ttl;
2600 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
2601 l4kwqe3->pmtu = csk->mtu;
2602 l4kwqe3->rcv_buf = csk->rcv_buf;
2603 l4kwqe3->snd_buf = csk->snd_buf;
2604 l4kwqe3->seed = csk->seed;
2605
2606 wqes[0] = (struct kwqe *) l4kwqe1;
2607 if (test_bit(SK_F_IPV6, &csk->flags)) {
2608 wqes[1] = (struct kwqe *) l4kwqe2;
2609 wqes[2] = (struct kwqe *) l4kwqe3;
2610 num_wqes = 3;
2611
2612 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
2613 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
2614 l4kwqe2->flags =
2615 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
2616 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
2617 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
2618 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
2619 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
2620 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
2621 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
2622 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
2623 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
2624 sizeof(struct tcphdr);
2625 } else {
2626 wqes[1] = (struct kwqe *) l4kwqe3;
2627 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
2628 sizeof(struct tcphdr);
2629 }
2630
2631 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
2632 l4kwqe1->flags =
2633 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
2634 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
2635 l4kwqe1->cid = csk->cid;
2636 l4kwqe1->pg_cid = csk->pg_cid;
2637 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
2638 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
2639 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
2640 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
2641 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
2642 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
2643 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
2644 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
2645 if (csk->tcp_flags & SK_TCP_NAGLE)
2646 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
2647 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
2648 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
2649 if (csk->tcp_flags & SK_TCP_SACK)
2650 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
2651 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
2652 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
2653
2654 l4kwqe1->tcp_flags = tcp_flags;
2655
2656 return dev->submit_kwqes(dev, wqes, num_wqes);
2657}
2658
2659static int cnic_cm_close_req(struct cnic_sock *csk)
2660{
2661 struct cnic_dev *dev = csk->dev;
2662 struct l4_kwq_close_req *l4kwqe;
2663 struct kwqe *wqes[1];
2664
2665 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
2666 memset(l4kwqe, 0, sizeof(*l4kwqe));
2667 wqes[0] = (struct kwqe *) l4kwqe;
2668
2669 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
2670 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
2671 l4kwqe->cid = csk->cid;
2672
2673 return dev->submit_kwqes(dev, wqes, 1);
2674}
2675
2676static int cnic_cm_abort_req(struct cnic_sock *csk)
2677{
2678 struct cnic_dev *dev = csk->dev;
2679 struct l4_kwq_reset_req *l4kwqe;
2680 struct kwqe *wqes[1];
2681
2682 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
2683 memset(l4kwqe, 0, sizeof(*l4kwqe));
2684 wqes[0] = (struct kwqe *) l4kwqe;
2685
2686 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
2687 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
2688 l4kwqe->cid = csk->cid;
2689
2690 return dev->submit_kwqes(dev, wqes, 1);
2691}
2692
2693static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
2694 u32 l5_cid, struct cnic_sock **csk, void *context)
2695{
2696 struct cnic_local *cp = dev->cnic_priv;
2697 struct cnic_sock *csk1;
2698
2699 if (l5_cid >= MAX_CM_SK_TBL_SZ)
2700 return -EINVAL;
2701
2702 csk1 = &cp->csk_tbl[l5_cid];
2703 if (atomic_read(&csk1->ref_count))
2704 return -EAGAIN;
2705
2706 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
2707 return -EBUSY;
2708
2709 csk1->dev = dev;
2710 csk1->cid = cid;
2711 csk1->l5_cid = l5_cid;
2712 csk1->ulp_type = ulp_type;
2713 csk1->context = context;
2714
2715 csk1->ka_timeout = DEF_KA_TIMEOUT;
2716 csk1->ka_interval = DEF_KA_INTERVAL;
2717 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
2718 csk1->tos = DEF_TOS;
2719 csk1->ttl = DEF_TTL;
2720 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
2721 csk1->rcv_buf = DEF_RCV_BUF;
2722 csk1->snd_buf = DEF_SND_BUF;
2723 csk1->seed = DEF_SEED;
2724
2725 *csk = csk1;
2726 return 0;
2727}
2728
2729static void cnic_cm_cleanup(struct cnic_sock *csk)
2730{
2731 if (csk->src_port) {
2732 struct cnic_dev *dev = csk->dev;
2733 struct cnic_local *cp = dev->cnic_priv;
2734
2735 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
2736 csk->src_port = 0;
2737 }
2738}
2739
2740static void cnic_close_conn(struct cnic_sock *csk)
2741{
2742 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
2743 cnic_cm_upload_pg(csk);
2744 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
2745 }
2746 cnic_cm_cleanup(csk);
2747}
2748
2749static int cnic_cm_destroy(struct cnic_sock *csk)
2750{
2751 if (!cnic_in_use(csk))
2752 return -EINVAL;
2753
2754 csk_hold(csk);
2755 clear_bit(SK_F_INUSE, &csk->flags);
2756 smp_mb__after_clear_bit();
2757 while (atomic_read(&csk->ref_count) != 1)
2758 msleep(1);
2759 cnic_cm_cleanup(csk);
2760
2761 csk->flags = 0;
2762 csk_put(csk);
2763 return 0;
2764}
2765
2766static inline u16 cnic_get_vlan(struct net_device *dev,
2767 struct net_device **vlan_dev)
2768{
2769 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2770 *vlan_dev = vlan_dev_real_dev(dev);
2771 return vlan_dev_vlan_id(dev);
2772 }
2773 *vlan_dev = dev;
2774 return 0;
2775}
2776
2777static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2778 struct dst_entry **dst)
2779{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002780#if defined(CONFIG_INET)
Michael Chana4636962009-06-08 18:14:43 -07002781 struct flowi fl;
2782 int err;
2783 struct rtable *rt;
2784
2785 memset(&fl, 0, sizeof(fl));
2786 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
2787
2788 err = ip_route_output_key(&init_net, &rt, &fl);
2789 if (!err)
2790 *dst = &rt->u.dst;
2791 return err;
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002792#else
2793 return -ENETUNREACH;
2794#endif
Michael Chana4636962009-06-08 18:14:43 -07002795}
2796
2797static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
2798 struct dst_entry **dst)
2799{
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002800#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
Michael Chana4636962009-06-08 18:14:43 -07002801 struct flowi fl;
2802
2803 memset(&fl, 0, sizeof(fl));
2804 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
2805 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
2806 fl.oif = dst_addr->sin6_scope_id;
2807
2808 *dst = ip6_route_output(&init_net, NULL, &fl);
2809 if (*dst)
2810 return 0;
2811#endif
2812
2813 return -ENETUNREACH;
2814}
2815
2816static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
2817 int ulp_type)
2818{
2819 struct cnic_dev *dev = NULL;
2820 struct dst_entry *dst;
2821 struct net_device *netdev = NULL;
2822 int err = -ENETUNREACH;
2823
2824 if (dst_addr->sin_family == AF_INET)
2825 err = cnic_get_v4_route(dst_addr, &dst);
2826 else if (dst_addr->sin_family == AF_INET6) {
2827 struct sockaddr_in6 *dst_addr6 =
2828 (struct sockaddr_in6 *) dst_addr;
2829
2830 err = cnic_get_v6_route(dst_addr6, &dst);
2831 } else
2832 return NULL;
2833
2834 if (err)
2835 return NULL;
2836
2837 if (!dst->dev)
2838 goto done;
2839
2840 cnic_get_vlan(dst->dev, &netdev);
2841
2842 dev = cnic_from_netdev(netdev);
2843
2844done:
2845 dst_release(dst);
2846 if (dev)
2847 cnic_put(dev);
2848 return dev;
2849}
2850
2851static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2852{
2853 struct cnic_dev *dev = csk->dev;
2854 struct cnic_local *cp = dev->cnic_priv;
2855
2856 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
2857}
2858
2859static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2860{
2861 struct cnic_dev *dev = csk->dev;
2862 struct cnic_local *cp = dev->cnic_priv;
2863 int is_v6, err, rc = -ENETUNREACH;
2864 struct dst_entry *dst;
2865 struct net_device *realdev;
2866 u32 local_port;
2867
2868 if (saddr->local.v6.sin6_family == AF_INET6 &&
2869 saddr->remote.v6.sin6_family == AF_INET6)
2870 is_v6 = 1;
2871 else if (saddr->local.v4.sin_family == AF_INET &&
2872 saddr->remote.v4.sin_family == AF_INET)
2873 is_v6 = 0;
2874 else
2875 return -EINVAL;
2876
2877 clear_bit(SK_F_IPV6, &csk->flags);
2878
2879 if (is_v6) {
Randy Dunlapfaea56c2009-06-12 11:43:48 -07002880#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
Michael Chana4636962009-06-08 18:14:43 -07002881 set_bit(SK_F_IPV6, &csk->flags);
2882 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
2883 if (err)
2884 return err;
2885
2886 if (!dst || dst->error || !dst->dev)
2887 goto err_out;
2888
2889 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
2890 sizeof(struct in6_addr));
2891 csk->dst_port = saddr->remote.v6.sin6_port;
2892 local_port = saddr->local.v6.sin6_port;
2893#else
2894 return rc;
2895#endif
2896
2897 } else {
2898 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
2899 if (err)
2900 return err;
2901
2902 if (!dst || dst->error || !dst->dev)
2903 goto err_out;
2904
2905 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
2906 csk->dst_port = saddr->remote.v4.sin_port;
2907 local_port = saddr->local.v4.sin_port;
2908 }
2909
2910 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
2911 if (realdev != dev->netdev)
2912 goto err_out;
2913
2914 if (local_port >= CNIC_LOCAL_PORT_MIN &&
2915 local_port < CNIC_LOCAL_PORT_MAX) {
2916 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
2917 local_port = 0;
2918 } else
2919 local_port = 0;
2920
2921 if (!local_port) {
2922 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
2923 if (local_port == -1) {
2924 rc = -ENOMEM;
2925 goto err_out;
2926 }
2927 }
2928 csk->src_port = local_port;
2929
2930 csk->mtu = dst_mtu(dst);
2931 rc = 0;
2932
2933err_out:
2934 dst_release(dst);
2935 return rc;
2936}
2937
2938static void cnic_init_csk_state(struct cnic_sock *csk)
2939{
2940 csk->state = 0;
2941 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
2942 clear_bit(SK_F_CLOSING, &csk->flags);
2943}
2944
2945static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
2946{
2947 int err = 0;
2948
2949 if (!cnic_in_use(csk))
2950 return -EINVAL;
2951
2952 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
2953 return -EINVAL;
2954
2955 cnic_init_csk_state(csk);
2956
2957 err = cnic_get_route(csk, saddr);
2958 if (err)
2959 goto err_out;
2960
2961 err = cnic_resolve_addr(csk, saddr);
2962 if (!err)
2963 return 0;
2964
2965err_out:
2966 clear_bit(SK_F_CONNECT_START, &csk->flags);
2967 return err;
2968}
2969
2970static int cnic_cm_abort(struct cnic_sock *csk)
2971{
2972 struct cnic_local *cp = csk->dev->cnic_priv;
2973 u32 opcode;
2974
2975 if (!cnic_in_use(csk))
2976 return -EINVAL;
2977
2978 if (cnic_abort_prep(csk))
2979 return cnic_cm_abort_req(csk);
2980
2981 /* Getting here means that we haven't started connect, or
2982 * connect was not successful.
2983 */
2984
2985 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2986 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
2987 opcode = csk->state;
2988 else
2989 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
2990 cp->close_conn(csk, opcode);
2991
2992 return 0;
2993}
2994
2995static int cnic_cm_close(struct cnic_sock *csk)
2996{
2997 if (!cnic_in_use(csk))
2998 return -EINVAL;
2999
3000 if (cnic_close_prep(csk)) {
3001 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3002 return cnic_cm_close_req(csk);
3003 }
3004 return 0;
3005}
3006
3007static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3008 u8 opcode)
3009{
3010 struct cnic_ulp_ops *ulp_ops;
3011 int ulp_type = csk->ulp_type;
3012
3013 rcu_read_lock();
3014 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3015 if (ulp_ops) {
3016 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3017 ulp_ops->cm_connect_complete(csk);
3018 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3019 ulp_ops->cm_close_complete(csk);
3020 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3021 ulp_ops->cm_remote_abort(csk);
3022 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3023 ulp_ops->cm_abort_complete(csk);
3024 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3025 ulp_ops->cm_remote_close(csk);
3026 }
3027 rcu_read_unlock();
3028}
3029
3030static int cnic_cm_set_pg(struct cnic_sock *csk)
3031{
3032 if (cnic_offld_prep(csk)) {
3033 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3034 cnic_cm_update_pg(csk);
3035 else
3036 cnic_cm_offload_pg(csk);
3037 }
3038 return 0;
3039}
3040
3041static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3042{
3043 struct cnic_local *cp = dev->cnic_priv;
3044 u32 l5_cid = kcqe->pg_host_opaque;
3045 u8 opcode = kcqe->op_code;
3046 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3047
3048 csk_hold(csk);
3049 if (!cnic_in_use(csk))
3050 goto done;
3051
3052 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3053 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3054 goto done;
3055 }
Eddie Waia9736c02010-02-24 14:42:04 +00003056 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3057 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3058 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3059 cnic_cm_upcall(cp, csk,
3060 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3061 goto done;
3062 }
3063
Michael Chana4636962009-06-08 18:14:43 -07003064 csk->pg_cid = kcqe->pg_cid;
3065 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3066 cnic_cm_conn_req(csk);
3067
3068done:
3069 csk_put(csk);
3070}
3071
3072static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3073{
3074 struct cnic_local *cp = dev->cnic_priv;
3075 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3076 u8 opcode = l4kcqe->op_code;
3077 u32 l5_cid;
3078 struct cnic_sock *csk;
3079
3080 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3081 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3082 cnic_cm_process_offld_pg(dev, l4kcqe);
3083 return;
3084 }
3085
3086 l5_cid = l4kcqe->conn_id;
3087 if (opcode & 0x80)
3088 l5_cid = l4kcqe->cid;
3089 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3090 return;
3091
3092 csk = &cp->csk_tbl[l5_cid];
3093 csk_hold(csk);
3094
3095 if (!cnic_in_use(csk)) {
3096 csk_put(csk);
3097 return;
3098 }
3099
3100 switch (opcode) {
Eddie Waia9736c02010-02-24 14:42:04 +00003101 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3102 if (l4kcqe->status != 0) {
3103 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3104 cnic_cm_upcall(cp, csk,
3105 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3106 }
3107 break;
Michael Chana4636962009-06-08 18:14:43 -07003108 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3109 if (l4kcqe->status == 0)
3110 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3111
3112 smp_mb__before_clear_bit();
3113 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3114 cnic_cm_upcall(cp, csk, opcode);
3115 break;
3116
3117 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
Eddie Wai66883e92010-02-24 14:42:05 +00003118 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
3119 cnic_cm_upcall(cp, csk, opcode);
3120 break;
3121 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
Michael Chana4636962009-06-08 18:14:43 -07003122 csk->state = opcode;
3123 /* fall through */
3124 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3125 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
Michael Chan71034ba2009-10-10 13:46:59 +00003126 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3127 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
Michael Chana4636962009-06-08 18:14:43 -07003128 cp->close_conn(csk, opcode);
3129 break;
3130
3131 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3132 cnic_cm_upcall(cp, csk, opcode);
3133 break;
3134 }
3135 csk_put(csk);
3136}
3137
3138static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3139{
3140 struct cnic_dev *dev = data;
3141 int i;
3142
3143 for (i = 0; i < num; i++)
3144 cnic_cm_process_kcqe(dev, kcqe[i]);
3145}
3146
3147static struct cnic_ulp_ops cm_ulp_ops = {
3148 .indicate_kcqes = cnic_cm_indicate_kcqe,
3149};
3150
3151static void cnic_cm_free_mem(struct cnic_dev *dev)
3152{
3153 struct cnic_local *cp = dev->cnic_priv;
3154
3155 kfree(cp->csk_tbl);
3156 cp->csk_tbl = NULL;
3157 cnic_free_id_tbl(&cp->csk_port_tbl);
3158}
3159
3160static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3161{
3162 struct cnic_local *cp = dev->cnic_priv;
3163
3164 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3165 GFP_KERNEL);
3166 if (!cp->csk_tbl)
3167 return -ENOMEM;
3168
3169 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3170 CNIC_LOCAL_PORT_MIN)) {
3171 cnic_cm_free_mem(dev);
3172 return -ENOMEM;
3173 }
3174 return 0;
3175}
3176
3177static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3178{
3179 if ((opcode == csk->state) ||
3180 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
3181 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
3182 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
3183 return 1;
3184 }
Eddie Wai66883e92010-02-24 14:42:05 +00003185 /* 57710+ only workaround to handle unsolicited RESET_COMP
3186 * which will be treated like a RESET RCVD notification
3187 * which triggers the clean up procedure
3188 */
3189 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3190 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3191 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3192 return 1;
3193 }
3194 }
Michael Chana4636962009-06-08 18:14:43 -07003195 return 0;
3196}
3197
3198static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3199{
3200 struct cnic_dev *dev = csk->dev;
3201 struct cnic_local *cp = dev->cnic_priv;
3202
3203 clear_bit(SK_F_CONNECT_START, &csk->flags);
Eddie Wai66883e92010-02-24 14:42:05 +00003204 cnic_close_conn(csk);
3205 cnic_cm_upcall(cp, csk, opcode);
Michael Chana4636962009-06-08 18:14:43 -07003206}
3207
3208static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3209{
3210}
3211
3212static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3213{
3214 u32 seed;
3215
3216 get_random_bytes(&seed, 4);
3217 cnic_ctx_wr(dev, 45, 0, seed);
3218 return 0;
3219}
3220
Michael Chan71034ba2009-10-10 13:46:59 +00003221static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3222{
3223 struct cnic_dev *dev = csk->dev;
3224 struct cnic_local *cp = dev->cnic_priv;
3225 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3226 union l5cm_specific_data l5_data;
3227 u32 cmd = 0;
3228 int close_complete = 0;
3229
3230 switch (opcode) {
3231 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3232 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3233 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3234 if (cnic_ready_to_close(csk, opcode))
3235 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3236 break;
3237 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3238 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3239 break;
3240 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3241 close_complete = 1;
3242 break;
3243 }
3244 if (cmd) {
3245 memset(&l5_data, 0, sizeof(l5_data));
3246
3247 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3248 &l5_data);
3249 } else if (close_complete) {
3250 ctx->timestamp = jiffies;
3251 cnic_close_conn(csk);
3252 cnic_cm_upcall(cp, csk, csk->state);
3253 }
3254}
3255
3256static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3257{
3258}
3259
3260static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3261{
3262 struct cnic_local *cp = dev->cnic_priv;
3263 int func = CNIC_FUNC(cp);
3264
3265 cnic_init_bnx2x_mac(dev);
3266 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3267
3268 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3269 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
3270
3271 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3272 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
3273 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3274 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
3275 DEF_MAX_DA_COUNT);
3276
3277 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3278 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
3279 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3280 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
3281 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3282 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
3283 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3284 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
3285
3286 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
3287 DEF_MAX_CWND);
3288 return 0;
3289}
3290
Michael Chana4636962009-06-08 18:14:43 -07003291static int cnic_cm_open(struct cnic_dev *dev)
3292{
3293 struct cnic_local *cp = dev->cnic_priv;
3294 int err;
3295
3296 err = cnic_cm_alloc_mem(dev);
3297 if (err)
3298 return err;
3299
3300 err = cp->start_cm(dev);
3301
3302 if (err)
3303 goto err_out;
3304
3305 dev->cm_create = cnic_cm_create;
3306 dev->cm_destroy = cnic_cm_destroy;
3307 dev->cm_connect = cnic_cm_connect;
3308 dev->cm_abort = cnic_cm_abort;
3309 dev->cm_close = cnic_cm_close;
3310 dev->cm_select_dev = cnic_cm_select_dev;
3311
3312 cp->ulp_handle[CNIC_ULP_L4] = dev;
3313 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
3314 return 0;
3315
3316err_out:
3317 cnic_cm_free_mem(dev);
3318 return err;
3319}
3320
3321static int cnic_cm_shutdown(struct cnic_dev *dev)
3322{
3323 struct cnic_local *cp = dev->cnic_priv;
3324 int i;
3325
3326 cp->stop_cm(dev);
3327
3328 if (!cp->csk_tbl)
3329 return 0;
3330
3331 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
3332 struct cnic_sock *csk = &cp->csk_tbl[i];
3333
3334 clear_bit(SK_F_INUSE, &csk->flags);
3335 cnic_cm_cleanup(csk);
3336 }
3337 cnic_cm_free_mem(dev);
3338
3339 return 0;
3340}
3341
3342static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3343{
3344 struct cnic_local *cp = dev->cnic_priv;
3345 u32 cid_addr;
3346 int i;
3347
3348 if (CHIP_NUM(cp) == CHIP_NUM_5709)
3349 return;
3350
3351 cid_addr = GET_CID_ADDR(cid);
3352
3353 for (i = 0; i < CTX_SIZE; i += 4)
3354 cnic_ctx_wr(dev, cid_addr, i, 0);
3355}
3356
3357static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
3358{
3359 struct cnic_local *cp = dev->cnic_priv;
3360 int ret = 0, i;
3361 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
3362
3363 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3364 return 0;
3365
3366 for (i = 0; i < cp->ctx_blks; i++) {
3367 int j;
3368 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
3369 u32 val;
3370
3371 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
3372
3373 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
3374 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
3375 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
3376 (u64) cp->ctx_arr[i].mapping >> 32);
3377 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
3378 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3379 for (j = 0; j < 10; j++) {
3380
3381 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
3382 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
3383 break;
3384 udelay(5);
3385 }
3386 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
3387 ret = -EBUSY;
3388 break;
3389 }
3390 }
3391 return ret;
3392}
3393
3394static void cnic_free_irq(struct cnic_dev *dev)
3395{
3396 struct cnic_local *cp = dev->cnic_priv;
3397 struct cnic_eth_dev *ethdev = cp->ethdev;
3398
3399 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3400 cp->disable_int_sync(dev);
3401 tasklet_disable(&cp->cnic_irq_task);
3402 free_irq(ethdev->irq_arr[0].vector, dev);
3403 }
3404}
3405
3406static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3407{
3408 struct cnic_local *cp = dev->cnic_priv;
3409 struct cnic_eth_dev *ethdev = cp->ethdev;
3410
3411 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3412 int err, i = 0;
3413 int sblk_num = cp->status_blk_num;
3414 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
3415 BNX2_HC_SB_CONFIG_1;
3416
3417 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
3418
3419 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
3420 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
3421 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
3422
3423 cp->bnx2_status_blk = cp->status_blk;
3424 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
Joe Perches164165d2009-11-19 09:30:10 +00003425 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
Michael Chana4636962009-06-08 18:14:43 -07003426 (unsigned long) dev);
3427 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3428 "cnic", dev);
3429 if (err) {
3430 tasklet_disable(&cp->cnic_irq_task);
3431 return err;
3432 }
3433 while (cp->bnx2_status_blk->status_completion_producer_index &&
3434 i < 10) {
3435 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
3436 1 << (11 + sblk_num));
3437 udelay(10);
3438 i++;
3439 barrier();
3440 }
3441 if (cp->bnx2_status_blk->status_completion_producer_index) {
3442 cnic_free_irq(dev);
3443 goto failed;
3444 }
3445
3446 } else {
3447 struct status_block *sblk = cp->status_blk;
3448 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
3449 int i = 0;
3450
3451 while (sblk->status_completion_producer_index && i < 10) {
3452 CNIC_WR(dev, BNX2_HC_COMMAND,
3453 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3454 udelay(10);
3455 i++;
3456 barrier();
3457 }
3458 if (sblk->status_completion_producer_index)
3459 goto failed;
3460
3461 }
3462 return 0;
3463
3464failed:
Joe Perchesddf79b22010-02-17 15:01:54 +00003465 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
Michael Chana4636962009-06-08 18:14:43 -07003466 return -EBUSY;
3467}
3468
3469static void cnic_enable_bnx2_int(struct cnic_dev *dev)
3470{
3471 struct cnic_local *cp = dev->cnic_priv;
3472 struct cnic_eth_dev *ethdev = cp->ethdev;
3473
3474 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3475 return;
3476
3477 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3478 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3479}
3480
3481static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
3482{
3483 struct cnic_local *cp = dev->cnic_priv;
3484 struct cnic_eth_dev *ethdev = cp->ethdev;
3485
3486 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3487 return;
3488
3489 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3490 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3491 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
3492 synchronize_irq(ethdev->irq_arr[0].vector);
3493}
3494
3495static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3496{
3497 struct cnic_local *cp = dev->cnic_priv;
3498 struct cnic_eth_dev *ethdev = cp->ethdev;
3499 u32 cid_addr, tx_cid, sb_id;
3500 u32 val, offset0, offset1, offset2, offset3;
3501 int i;
3502 struct tx_bd *txbd;
3503 dma_addr_t buf_map;
3504 struct status_block *s_blk = cp->status_blk;
3505
3506 sb_id = cp->status_blk_num;
3507 tx_cid = 20;
3508 cnic_init_context(dev, tx_cid);
3509 cnic_init_context(dev, tx_cid + 1);
3510 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3511 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3512 struct status_block_msix *sblk = cp->status_blk;
3513
3514 tx_cid = TX_TSS_CID + sb_id - 1;
3515 cnic_init_context(dev, tx_cid);
3516 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3517 (TX_TSS_CID << 7));
3518 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
3519 }
3520 cp->tx_cons = *cp->tx_cons_ptr;
3521
3522 cid_addr = GET_CID_ADDR(tx_cid);
3523 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
3524 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
3525
3526 for (i = 0; i < PHY_CTX_SIZE; i += 4)
3527 cnic_ctx_wr(dev, cid_addr2, i, 0);
3528
3529 offset0 = BNX2_L2CTX_TYPE_XI;
3530 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3531 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3532 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3533 } else {
3534 offset0 = BNX2_L2CTX_TYPE;
3535 offset1 = BNX2_L2CTX_CMD_TYPE;
3536 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3537 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3538 }
3539 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3540 cnic_ctx_wr(dev, cid_addr, offset0, val);
3541
3542 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3543 cnic_ctx_wr(dev, cid_addr, offset1, val);
3544
3545 txbd = (struct tx_bd *) cp->l2_ring;
3546
3547 buf_map = cp->l2_buf_map;
3548 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
3549 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
3550 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3551 }
3552 val = (u64) cp->l2_ring_map >> 32;
3553 cnic_ctx_wr(dev, cid_addr, offset2, val);
3554 txbd->tx_bd_haddr_hi = val;
3555
3556 val = (u64) cp->l2_ring_map & 0xffffffff;
3557 cnic_ctx_wr(dev, cid_addr, offset3, val);
3558 txbd->tx_bd_haddr_lo = val;
3559}
3560
3561static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3562{
3563 struct cnic_local *cp = dev->cnic_priv;
3564 struct cnic_eth_dev *ethdev = cp->ethdev;
3565 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3566 int i;
3567 struct rx_bd *rxbd;
3568 struct status_block *s_blk = cp->status_blk;
3569
3570 sb_id = cp->status_blk_num;
3571 cnic_init_context(dev, 2);
3572 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
3573 coal_reg = BNX2_HC_COMMAND;
3574 coal_val = CNIC_RD(dev, coal_reg);
3575 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3576 struct status_block_msix *sblk = cp->status_blk;
3577
3578 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
3579 coal_reg = BNX2_HC_COALESCE_NOW;
3580 coal_val = 1 << (11 + sb_id);
3581 }
3582 i = 0;
3583 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
3584 CNIC_WR(dev, coal_reg, coal_val);
3585 udelay(10);
3586 i++;
3587 barrier();
3588 }
3589 cp->rx_cons = *cp->rx_cons_ptr;
3590
3591 cid_addr = GET_CID_ADDR(2);
3592 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3593 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3594 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
3595
3596 if (sb_id == 0)
Michael Chand0549382009-10-28 03:41:59 -07003597 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
Michael Chana4636962009-06-08 18:14:43 -07003598 else
Michael Chand0549382009-10-28 03:41:59 -07003599 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07003600 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3601
3602 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
3603 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3604 dma_addr_t buf_map;
3605 int n = (i % cp->l2_rx_ring_size) + 1;
3606
3607 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3608 rxbd->rx_bd_len = cp->l2_single_buf_size;
3609 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3610 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
3611 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3612 }
3613 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3614 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
3615 rxbd->rx_bd_haddr_hi = val;
3616
3617 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3618 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
3619 rxbd->rx_bd_haddr_lo = val;
3620
3621 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
3622 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
3623}
3624
3625static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
3626{
3627 struct kwqe *wqes[1], l2kwqe;
3628
3629 memset(&l2kwqe, 0, sizeof(l2kwqe));
3630 wqes[0] = &l2kwqe;
3631 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
3632 (L2_KWQE_OPCODE_VALUE_FLUSH <<
3633 KWQE_OPCODE_SHIFT) | 2;
3634 dev->submit_kwqes(dev, wqes, 1);
3635}
3636
3637static void cnic_set_bnx2_mac(struct cnic_dev *dev)
3638{
3639 struct cnic_local *cp = dev->cnic_priv;
3640 u32 val;
3641
3642 val = cp->func << 2;
3643
3644 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
3645
3646 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3647 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
3648 dev->mac_addr[0] = (u8) (val >> 8);
3649 dev->mac_addr[1] = (u8) val;
3650
3651 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
3652
3653 val = cnic_reg_rd_ind(dev, cp->shmem_base +
3654 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
3655 dev->mac_addr[2] = (u8) (val >> 24);
3656 dev->mac_addr[3] = (u8) (val >> 16);
3657 dev->mac_addr[4] = (u8) (val >> 8);
3658 dev->mac_addr[5] = (u8) val;
3659
3660 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
3661
3662 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
3663 if (CHIP_NUM(cp) != CHIP_NUM_5709)
3664 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
3665
3666 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
3667 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
3668 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
3669}
3670
3671static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3672{
3673 struct cnic_local *cp = dev->cnic_priv;
3674 struct cnic_eth_dev *ethdev = cp->ethdev;
3675 struct status_block *sblk = cp->status_blk;
3676 u32 val;
3677 int err;
3678
3679 cnic_set_bnx2_mac(dev);
3680
3681 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
3682 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3683 if (BCM_PAGE_BITS > 12)
3684 val |= (12 - 8) << 4;
3685 else
3686 val |= (BCM_PAGE_BITS - 8) << 4;
3687
3688 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
3689
3690 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
3691 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
3692 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
3693
3694 err = cnic_setup_5709_context(dev, 1);
3695 if (err)
3696 return err;
3697
3698 cnic_init_context(dev, KWQ_CID);
3699 cnic_init_context(dev, KCQ_CID);
3700
3701 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
3702 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3703
3704 cp->max_kwq_idx = MAX_KWQ_IDX;
3705 cp->kwq_prod_idx = 0;
3706 cp->kwq_con_idx = 0;
3707 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
3708
3709 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
3710 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
3711 else
3712 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
3713
3714 /* Initialize the kernel work queue context. */
3715 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3716 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3717 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
3718
3719 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
3720 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3721
3722 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
3723 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3724
3725 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
3726 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3727
3728 val = (u32) cp->kwq_info.pgtbl_map;
3729 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3730
3731 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3732 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
3733
3734 cp->kcq_prod_idx = 0;
3735
3736 /* Initialize the kernel complete queue context. */
3737 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3738 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3739 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
3740
3741 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
3742 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3743
3744 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
3745 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3746
3747 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
3748 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3749
3750 val = (u32) cp->kcq_info.pgtbl_map;
3751 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3752
3753 cp->int_num = 0;
3754 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3755 u32 sb_id = cp->status_blk_num;
Michael Chand0549382009-10-28 03:41:59 -07003756 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
Michael Chana4636962009-06-08 18:14:43 -07003757
3758 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
3759 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3760 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3761 }
3762
3763 /* Enable Commnad Scheduler notification when we write to the
3764 * host producer index of the kernel contexts. */
3765 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
3766
3767 /* Enable Command Scheduler notification when we write to either
3768 * the Send Queue or Receive Queue producer indexes of the kernel
3769 * bypass contexts. */
3770 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
3771 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
3772
3773 /* Notify COM when the driver post an application buffer. */
3774 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
3775
3776 /* Set the CP and COM doorbells. These two processors polls the
3777 * doorbell for a non zero value before running. This must be done
3778 * after setting up the kernel queue contexts. */
3779 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
3780 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
3781
3782 cnic_init_bnx2_tx_ring(dev);
3783 cnic_init_bnx2_rx_ring(dev);
3784
3785 err = cnic_init_bnx2_irq(dev);
3786 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00003787 netdev_err(dev->netdev, "cnic_init_irq failed\n");
Michael Chana4636962009-06-08 18:14:43 -07003788 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
3789 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
3790 return err;
3791 }
3792
3793 return 0;
3794}
3795
Michael Chan71034ba2009-10-10 13:46:59 +00003796static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
3797{
3798 struct cnic_local *cp = dev->cnic_priv;
3799 struct cnic_eth_dev *ethdev = cp->ethdev;
3800 u32 start_offset = ethdev->ctx_tbl_offset;
3801 int i;
3802
3803 for (i = 0; i < cp->ctx_blks; i++) {
3804 struct cnic_ctx *ctx = &cp->ctx_arr[i];
3805 dma_addr_t map = ctx->mapping;
3806
3807 if (cp->ctx_align) {
3808 unsigned long mask = cp->ctx_align - 1;
3809
3810 map = (map + mask) & ~mask;
3811 }
3812
3813 cnic_ctx_tbl_wr(dev, start_offset + i, map);
3814 }
3815}
3816
3817static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3818{
3819 struct cnic_local *cp = dev->cnic_priv;
3820 struct cnic_eth_dev *ethdev = cp->ethdev;
3821 int err = 0;
3822
Joe Perches164165d2009-11-19 09:30:10 +00003823 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
Michael Chan71034ba2009-10-10 13:46:59 +00003824 (unsigned long) dev);
3825 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3826 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3827 "cnic", dev);
3828 if (err)
3829 tasklet_disable(&cp->cnic_irq_task);
3830 }
3831 return err;
3832}
3833
3834static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3835{
3836 struct cnic_local *cp = dev->cnic_priv;
3837 u8 sb_id = cp->status_blk_num;
3838 int port = CNIC_PORT(cp);
3839
3840 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3841 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
3842 HC_INDEX_C_ISCSI_EQ_CONS),
3843 64 / 12);
3844 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
3845 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
3846 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3847}
3848
3849static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3850{
3851}
3852
3853static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3854{
3855 struct cnic_local *cp = dev->cnic_priv;
3856 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
3857 struct eth_context *context;
3858 struct regpair context_addr;
3859 dma_addr_t buf_map;
3860 int func = CNIC_FUNC(cp);
3861 int port = CNIC_PORT(cp);
3862 int i;
3863 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3864 u32 val;
3865
3866 memset(txbd, 0, BCM_PAGE_SIZE);
3867
3868 buf_map = cp->l2_buf_map;
3869 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
3870 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
3871 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
3872
3873 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3874 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3875 reg_bd->addr_hi = start_bd->addr_hi;
3876 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
3877 start_bd->nbytes = cpu_to_le16(0x10);
3878 start_bd->nbd = cpu_to_le16(3);
3879 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3880 start_bd->general_data = (UNICAST_ADDRESS <<
3881 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3882 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3883
3884 }
3885 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3886
3887 val = (u64) cp->l2_ring_map >> 32;
3888 txbd->next_bd.addr_hi = cpu_to_le32(val);
3889
3890 context->xstorm_st_context.tx_bd_page_base_hi = val;
3891
3892 val = (u64) cp->l2_ring_map & 0xffffffff;
3893 txbd->next_bd.addr_lo = cpu_to_le32(val);
3894
3895 context->xstorm_st_context.tx_bd_page_base_lo = val;
3896
3897 context->cstorm_st_context.sb_index_number =
3898 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3899 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3900
3901 context->xstorm_st_context.statistics_data = (cli |
3902 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3903
3904 context->xstorm_ag_context.cdu_reserved =
3905 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3906 CDU_REGION_NUMBER_XCM_AG,
3907 ETH_CONNECTION_TYPE);
3908
3909 /* reset xstorm per client statistics */
3910 val = BAR_XSTRORM_INTMEM +
3911 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3912 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
3913 CNIC_WR(dev, val + i * 4, 0);
3914
3915 cp->tx_cons_ptr =
3916 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[
3917 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3918}
3919
3920static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
3921{
3922 struct cnic_local *cp = dev->cnic_priv;
3923 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
3924 BCM_PAGE_SIZE);
3925 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3926 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
3927 struct eth_context *context;
3928 struct regpair context_addr;
3929 int i;
3930 int port = CNIC_PORT(cp);
3931 int func = CNIC_FUNC(cp);
3932 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3933 u32 val;
3934 struct tstorm_eth_client_config tstorm_client = {0};
3935
3936 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3937 dma_addr_t buf_map;
3938 int n = (i % cp->l2_rx_ring_size) + 1;
3939
3940 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3941 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3942 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3943 }
3944 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3945
3946 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3947 rxbd->addr_hi = cpu_to_le32(val);
3948
3949 context->ustorm_st_context.common.bd_page_base_hi = val;
3950
3951 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3952 rxbd->addr_lo = cpu_to_le32(val);
3953
3954 context->ustorm_st_context.common.bd_page_base_lo = val;
3955
3956 context->ustorm_st_context.common.sb_index_numbers =
3957 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3958 context->ustorm_st_context.common.clientId = cli;
3959 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3960 context->ustorm_st_context.common.flags =
3961 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3962 context->ustorm_st_context.common.statistics_counter_id = cli;
3963 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3964 context->ustorm_st_context.common.bd_buff_size =
3965 cp->l2_single_buf_size;
3966
3967 context->ustorm_ag_context.cdu_usage =
3968 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3969 CDU_REGION_NUMBER_UCM_AG,
3970 ETH_CONNECTION_TYPE);
3971
3972 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
3973 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
3974 rxcqe->addr_hi = cpu_to_le32(val);
3975
3976 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3977 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
3978
3979 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3980 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
3981
3982 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
3983 rxcqe->addr_lo = cpu_to_le32(val);
3984
3985 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3986 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
3987
3988 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3989 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
3990
3991 /* client tstorm info */
3992 tstorm_client.mtu = cp->l2_single_buf_size - 14;
3993 tstorm_client.config_flags =
3994 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
3995 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
3996 tstorm_client.statistics_counter_id = cli;
3997
3998 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
3999 TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
4000 ((u32 *)&tstorm_client)[0]);
4001 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4002 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
4003 ((u32 *)&tstorm_client)[1]);
4004
4005 /* reset tstorm per client statistics */
4006 val = BAR_TSTRORM_INTMEM +
4007 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4008 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4009 CNIC_WR(dev, val + i * 4, 0);
4010
4011 /* reset ustorm per client statistics */
4012 val = BAR_USTRORM_INTMEM +
4013 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4014 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4015 CNIC_WR(dev, val + i * 4, 0);
4016
4017 cp->rx_cons_ptr =
4018 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[
4019 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
4020}
4021
4022static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4023{
4024 struct cnic_local *cp = dev->cnic_priv;
4025 u32 base, addr, val;
4026 int port = CNIC_PORT(cp);
4027
4028 dev->max_iscsi_conn = 0;
4029 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4030 if (base < 0xa0000 || base >= 0xc0000)
4031 return;
4032
Michael Chandd2e4db2009-12-02 15:15:37 +00004033 addr = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004034 dev_info.port_hw_config[port].iscsi_mac_upper);
4035
Michael Chandd2e4db2009-12-02 15:15:37 +00004036 val = CNIC_RD(dev, addr);
4037
Michael Chan71034ba2009-10-10 13:46:59 +00004038 dev->mac_addr[0] = (u8) (val >> 8);
4039 dev->mac_addr[1] = (u8) val;
4040
Michael Chandd2e4db2009-12-02 15:15:37 +00004041 addr = BNX2X_SHMEM_ADDR(base,
Michael Chan71034ba2009-10-10 13:46:59 +00004042 dev_info.port_hw_config[port].iscsi_mac_lower);
4043
Michael Chandd2e4db2009-12-02 15:15:37 +00004044 val = CNIC_RD(dev, addr);
4045
Michael Chan71034ba2009-10-10 13:46:59 +00004046 dev->mac_addr[2] = (u8) (val >> 24);
4047 dev->mac_addr[3] = (u8) (val >> 16);
4048 dev->mac_addr[4] = (u8) (val >> 8);
4049 dev->mac_addr[5] = (u8) val;
4050
4051 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4052 val = CNIC_RD(dev, addr);
4053
4054 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4055 u16 val16;
4056
4057 addr = BNX2X_SHMEM_ADDR(base,
4058 drv_lic_key[port].max_iscsi_init_conn);
4059 val16 = CNIC_RD16(dev, addr);
4060
4061 if (val16)
4062 val16 ^= 0x1e1e;
4063 dev->max_iscsi_conn = val16;
4064 }
4065 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
4066 int func = CNIC_FUNC(cp);
4067
4068 addr = BNX2X_SHMEM_ADDR(base,
4069 mf_cfg.func_mf_config[func].e1hov_tag);
4070 val = CNIC_RD(dev, addr);
4071 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4072 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4073 addr = BNX2X_SHMEM_ADDR(base,
4074 mf_cfg.func_mf_config[func].config);
4075 val = CNIC_RD(dev, addr);
4076 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4077 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4078 dev->max_iscsi_conn = 0;
4079 }
4080 }
4081}
4082
4083static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4084{
4085 struct cnic_local *cp = dev->cnic_priv;
4086 int func = CNIC_FUNC(cp), ret, i;
4087 int port = CNIC_PORT(cp);
4088 u16 eq_idx;
4089 u8 sb_id = cp->status_blk_num;
4090
4091 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4092 BNX2X_ISCSI_START_CID);
4093
4094 if (ret)
4095 return -ENOMEM;
4096
4097 cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
4098 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4099 cp->kcq_prod_idx = 0;
4100
4101 cnic_get_bnx2x_iscsi_info(dev);
4102
4103 /* Only 1 EQ */
4104 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
4105 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4106 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4107 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4108 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4109 cp->kcq_info.pg_map_arr[1] & 0xffffffff);
4110 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4111 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4112 (u64) cp->kcq_info.pg_map_arr[1] >> 32);
4113 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4114 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4115 cp->kcq_info.pg_map_arr[0] & 0xffffffff);
4116 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4117 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4118 (u64) cp->kcq_info.pg_map_arr[0] >> 32);
4119 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4120 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4121 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4122 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
4123 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4124 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
4125 HC_INDEX_C_ISCSI_EQ_CONS);
4126
4127 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4128 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4129 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
4130 cp->conn_buf_info.pgtbl[2 * i]);
4131 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4132 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
4133 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4134 }
4135
4136 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4137 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
4138 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4139 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4140 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
4141 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4142
4143 cnic_setup_bnx2x_context(dev);
4144
4145 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4146 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4147 offsetof(struct cstorm_status_block_c,
4148 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4149 if (eq_idx != 0) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004150 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
Michael Chan71034ba2009-10-10 13:46:59 +00004151 return -EBUSY;
4152 }
4153 ret = cnic_init_bnx2x_irq(dev);
4154 if (ret)
4155 return ret;
4156
4157 cnic_init_bnx2x_tx_ring(dev);
4158 cnic_init_bnx2x_rx_ring(dev);
4159
4160 return 0;
4161}
4162
Michael Chan86b53602009-10-10 13:46:57 +00004163static void cnic_init_rings(struct cnic_dev *dev)
4164{
4165 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4166 cnic_init_bnx2_tx_ring(dev);
4167 cnic_init_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004168 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4169 struct cnic_local *cp = dev->cnic_priv;
Michael Chan71034ba2009-10-10 13:46:59 +00004170 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4171 union l5cm_specific_data l5_data;
4172 struct ustorm_eth_rx_producers rx_prods = {0};
Michael Chanc7596b72009-12-02 15:15:35 +00004173 u32 off, i;
Michael Chan71034ba2009-10-10 13:46:59 +00004174
4175 rx_prods.bd_prod = 0;
4176 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4177 barrier();
4178
Michael Chanc7596b72009-12-02 15:15:35 +00004179 off = BAR_USTRORM_INTMEM +
Michael Chan71034ba2009-10-10 13:46:59 +00004180 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
4181
4182 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
Michael Chanc7596b72009-12-02 15:15:35 +00004183 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
Michael Chan71034ba2009-10-10 13:46:59 +00004184
4185 cnic_init_bnx2x_tx_ring(dev);
4186 cnic_init_bnx2x_rx_ring(dev);
4187
4188 l5_data.phy_address.lo = cli;
4189 l5_data.phy_address.hi = 0;
4190 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4191 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4192 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
Michael Chan86b53602009-10-10 13:46:57 +00004193 }
4194}
4195
4196static void cnic_shutdown_rings(struct cnic_dev *dev)
4197{
4198 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4199 cnic_shutdown_bnx2_rx_ring(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004200 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4201 struct cnic_local *cp = dev->cnic_priv;
4202 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
Michael Chan8b065b62009-12-02 15:15:36 +00004203 union l5cm_specific_data l5_data;
Michael Chan71034ba2009-10-10 13:46:59 +00004204
4205 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
Michael Chan8b065b62009-12-02 15:15:36 +00004206
4207 l5_data.phy_address.lo = cli;
4208 l5_data.phy_address.hi = 0;
4209 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4210 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4211 msleep(10);
Michael Chan1bcdc322009-12-10 15:40:57 +00004212
4213 memset(&l5_data, 0, sizeof(l5_data));
4214 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
4215 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE |
4216 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data);
4217 msleep(10);
Michael Chan86b53602009-10-10 13:46:57 +00004218 }
4219}
4220
Michael Chana3059b12009-08-14 15:49:44 +00004221static int cnic_register_netdev(struct cnic_dev *dev)
4222{
4223 struct cnic_local *cp = dev->cnic_priv;
4224 struct cnic_eth_dev *ethdev = cp->ethdev;
4225 int err;
4226
4227 if (!ethdev)
4228 return -ENODEV;
4229
4230 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
4231 return 0;
4232
4233 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
4234 if (err)
Joe Perchesddf79b22010-02-17 15:01:54 +00004235 netdev_err(dev->netdev, "register_cnic failed\n");
Michael Chana3059b12009-08-14 15:49:44 +00004236
4237 return err;
4238}
4239
4240static void cnic_unregister_netdev(struct cnic_dev *dev)
4241{
4242 struct cnic_local *cp = dev->cnic_priv;
4243 struct cnic_eth_dev *ethdev = cp->ethdev;
4244
4245 if (!ethdev)
4246 return;
4247
4248 ethdev->drv_unregister_cnic(dev->netdev);
4249}
4250
Michael Chana4636962009-06-08 18:14:43 -07004251static int cnic_start_hw(struct cnic_dev *dev)
4252{
4253 struct cnic_local *cp = dev->cnic_priv;
4254 struct cnic_eth_dev *ethdev = cp->ethdev;
4255 int err;
4256
4257 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
4258 return -EALREADY;
4259
Michael Chana4636962009-06-08 18:14:43 -07004260 dev->regview = ethdev->io_base;
4261 cp->chip_id = ethdev->chip_id;
4262 pci_dev_get(dev->pcidev);
4263 cp->func = PCI_FUNC(dev->pcidev->devfn);
4264 cp->status_blk = ethdev->irq_arr[0].status_blk;
4265 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
4266
4267 err = cp->alloc_resc(dev);
4268 if (err) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004269 netdev_err(dev->netdev, "allocate resource failure\n");
Michael Chana4636962009-06-08 18:14:43 -07004270 goto err1;
4271 }
4272
4273 err = cp->start_hw(dev);
4274 if (err)
4275 goto err1;
4276
4277 err = cnic_cm_open(dev);
4278 if (err)
4279 goto err1;
4280
4281 set_bit(CNIC_F_CNIC_UP, &dev->flags);
4282
4283 cp->enable_int(dev);
4284
4285 return 0;
4286
4287err1:
Michael Chana4636962009-06-08 18:14:43 -07004288 cp->free_resc(dev);
4289 pci_dev_put(dev->pcidev);
Michael Chana4636962009-06-08 18:14:43 -07004290 return err;
4291}
4292
4293static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4294{
Michael Chana4636962009-06-08 18:14:43 -07004295 cnic_disable_bnx2_int_sync(dev);
4296
4297 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4298 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4299
4300 cnic_init_context(dev, KWQ_CID);
4301 cnic_init_context(dev, KCQ_CID);
4302
4303 cnic_setup_5709_context(dev, 0);
4304 cnic_free_irq(dev);
4305
Michael Chana4636962009-06-08 18:14:43 -07004306 cnic_free_resc(dev);
4307}
4308
Michael Chan71034ba2009-10-10 13:46:59 +00004309
4310static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4311{
4312 struct cnic_local *cp = dev->cnic_priv;
4313 u8 sb_id = cp->status_blk_num;
4314 int port = CNIC_PORT(cp);
4315
4316 cnic_free_irq(dev);
4317 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4318 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4319 offsetof(struct cstorm_status_block_c,
4320 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4321 0);
Michael Chan4e9c4fd2009-12-10 15:40:58 +00004322 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4323 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
4324 CNIC_WR16(dev, cp->kcq_io_addr, 0);
Michael Chan71034ba2009-10-10 13:46:59 +00004325 cnic_free_resc(dev);
4326}
4327
Michael Chana4636962009-06-08 18:14:43 -07004328static void cnic_stop_hw(struct cnic_dev *dev)
4329{
4330 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4331 struct cnic_local *cp = dev->cnic_priv;
4332
4333 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4334 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4335 synchronize_rcu();
4336 cnic_cm_shutdown(dev);
4337 cp->stop_hw(dev);
4338 pci_dev_put(dev->pcidev);
4339 }
4340}
4341
4342static void cnic_free_dev(struct cnic_dev *dev)
4343{
4344 int i = 0;
4345
4346 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
4347 msleep(100);
4348 i++;
4349 }
4350 if (atomic_read(&dev->ref_count) != 0)
Joe Perchesddf79b22010-02-17 15:01:54 +00004351 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
Michael Chana4636962009-06-08 18:14:43 -07004352
Joe Perchesddf79b22010-02-17 15:01:54 +00004353 netdev_info(dev->netdev, "Removed CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07004354 dev_put(dev->netdev);
4355 kfree(dev);
4356}
4357
4358static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4359 struct pci_dev *pdev)
4360{
4361 struct cnic_dev *cdev;
4362 struct cnic_local *cp;
4363 int alloc_size;
4364
4365 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
4366
4367 cdev = kzalloc(alloc_size , GFP_KERNEL);
4368 if (cdev == NULL) {
Joe Perchesddf79b22010-02-17 15:01:54 +00004369 netdev_err(dev, "allocate dev struct failure\n");
Michael Chana4636962009-06-08 18:14:43 -07004370 return NULL;
4371 }
4372
4373 cdev->netdev = dev;
4374 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
4375 cdev->register_device = cnic_register_device;
4376 cdev->unregister_device = cnic_unregister_device;
4377 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
4378
4379 cp = cdev->cnic_priv;
4380 cp->dev = cdev;
4381 cp->uio_dev = -1;
4382 cp->l2_single_buf_size = 0x400;
4383 cp->l2_rx_ring_size = 3;
4384
4385 spin_lock_init(&cp->cnic_ulp_lock);
4386
Joe Perchesddf79b22010-02-17 15:01:54 +00004387 netdev_info(dev, "Added CNIC device\n");
Michael Chana4636962009-06-08 18:14:43 -07004388
4389 return cdev;
4390}
4391
4392static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
4393{
4394 struct pci_dev *pdev;
4395 struct cnic_dev *cdev;
4396 struct cnic_local *cp;
4397 struct cnic_eth_dev *ethdev = NULL;
Michael Chane2ee3612009-06-13 17:43:02 -07004398 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
Michael Chana4636962009-06-08 18:14:43 -07004399
Michael Chane2ee3612009-06-13 17:43:02 -07004400 probe = symbol_get(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07004401 if (probe) {
4402 ethdev = (*probe)(dev);
Michael Chan64c64602009-08-14 15:49:43 +00004403 symbol_put(bnx2_cnic_probe);
Michael Chana4636962009-06-08 18:14:43 -07004404 }
4405 if (!ethdev)
4406 return NULL;
4407
4408 pdev = ethdev->pdev;
4409 if (!pdev)
4410 return NULL;
4411
4412 dev_hold(dev);
4413 pci_dev_get(pdev);
4414 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
4415 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
4416 u8 rev;
4417
4418 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
4419 if (rev < 0x10) {
4420 pci_dev_put(pdev);
4421 goto cnic_err;
4422 }
4423 }
4424 pci_dev_put(pdev);
4425
4426 cdev = cnic_alloc_dev(dev, pdev);
4427 if (cdev == NULL)
4428 goto cnic_err;
4429
4430 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
4431 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
4432
4433 cp = cdev->cnic_priv;
4434 cp->ethdev = ethdev;
4435 cdev->pcidev = pdev;
4436
4437 cp->cnic_ops = &cnic_bnx2_ops;
4438 cp->start_hw = cnic_start_bnx2_hw;
4439 cp->stop_hw = cnic_stop_bnx2_hw;
4440 cp->setup_pgtbl = cnic_setup_page_tbl;
4441 cp->alloc_resc = cnic_alloc_bnx2_resc;
4442 cp->free_resc = cnic_free_resc;
4443 cp->start_cm = cnic_cm_init_bnx2_hw;
4444 cp->stop_cm = cnic_cm_stop_bnx2_hw;
4445 cp->enable_int = cnic_enable_bnx2_int;
4446 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
4447 cp->close_conn = cnic_close_bnx2_conn;
4448 cp->next_idx = cnic_bnx2_next_idx;
4449 cp->hw_idx = cnic_bnx2_hw_idx;
4450 return cdev;
4451
4452cnic_err:
4453 dev_put(dev);
4454 return NULL;
4455}
4456
Michael Chan71034ba2009-10-10 13:46:59 +00004457static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4458{
4459 struct pci_dev *pdev;
4460 struct cnic_dev *cdev;
4461 struct cnic_local *cp;
4462 struct cnic_eth_dev *ethdev = NULL;
4463 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4464
4465 probe = symbol_get(bnx2x_cnic_probe);
4466 if (probe) {
4467 ethdev = (*probe)(dev);
4468 symbol_put(bnx2x_cnic_probe);
4469 }
4470 if (!ethdev)
4471 return NULL;
4472
4473 pdev = ethdev->pdev;
4474 if (!pdev)
4475 return NULL;
4476
4477 dev_hold(dev);
4478 cdev = cnic_alloc_dev(dev, pdev);
4479 if (cdev == NULL) {
4480 dev_put(dev);
4481 return NULL;
4482 }
4483
4484 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4485 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4486
4487 cp = cdev->cnic_priv;
4488 cp->ethdev = ethdev;
4489 cdev->pcidev = pdev;
4490
4491 cp->cnic_ops = &cnic_bnx2x_ops;
4492 cp->start_hw = cnic_start_bnx2x_hw;
4493 cp->stop_hw = cnic_stop_bnx2x_hw;
4494 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4495 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4496 cp->free_resc = cnic_free_resc;
4497 cp->start_cm = cnic_cm_init_bnx2x_hw;
4498 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4499 cp->enable_int = cnic_enable_bnx2x_int;
4500 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4501 cp->ack_int = cnic_ack_bnx2x_msix;
4502 cp->close_conn = cnic_close_bnx2x_conn;
4503 cp->next_idx = cnic_bnx2x_next_idx;
4504 cp->hw_idx = cnic_bnx2x_hw_idx;
4505 return cdev;
4506}
4507
Michael Chana4636962009-06-08 18:14:43 -07004508static struct cnic_dev *is_cnic_dev(struct net_device *dev)
4509{
4510 struct ethtool_drvinfo drvinfo;
4511 struct cnic_dev *cdev = NULL;
4512
4513 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
4514 memset(&drvinfo, 0, sizeof(drvinfo));
4515 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
4516
4517 if (!strcmp(drvinfo.driver, "bnx2"))
4518 cdev = init_bnx2_cnic(dev);
Michael Chan71034ba2009-10-10 13:46:59 +00004519 if (!strcmp(drvinfo.driver, "bnx2x"))
4520 cdev = init_bnx2x_cnic(dev);
Michael Chana4636962009-06-08 18:14:43 -07004521 if (cdev) {
4522 write_lock(&cnic_dev_lock);
4523 list_add(&cdev->list, &cnic_dev_list);
4524 write_unlock(&cnic_dev_lock);
4525 }
4526 }
4527 return cdev;
4528}
4529
4530/**
4531 * netdev event handler
4532 */
4533static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
4534 void *ptr)
4535{
4536 struct net_device *netdev = ptr;
4537 struct cnic_dev *dev;
4538 int if_type;
4539 int new_dev = 0;
4540
4541 dev = cnic_from_netdev(netdev);
4542
4543 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
4544 /* Check for the hot-plug device */
4545 dev = is_cnic_dev(netdev);
4546 if (dev) {
4547 new_dev = 1;
4548 cnic_hold(dev);
4549 }
4550 }
4551 if (dev) {
4552 struct cnic_local *cp = dev->cnic_priv;
4553
4554 if (new_dev)
4555 cnic_ulp_init(dev);
4556 else if (event == NETDEV_UNREGISTER)
4557 cnic_ulp_exit(dev);
Michael Chan6053bbf2009-10-02 11:03:28 -07004558
4559 if (event == NETDEV_UP) {
Michael Chana3059b12009-08-14 15:49:44 +00004560 if (cnic_register_netdev(dev) != 0) {
4561 cnic_put(dev);
4562 goto done;
4563 }
Michael Chana4636962009-06-08 18:14:43 -07004564 if (!cnic_start_hw(dev))
4565 cnic_ulp_start(dev);
Michael Chana4636962009-06-08 18:14:43 -07004566 }
4567
4568 rcu_read_lock();
4569 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
4570 struct cnic_ulp_ops *ulp_ops;
4571 void *ctx;
4572
4573 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
4574 if (!ulp_ops || !ulp_ops->indicate_netevent)
4575 continue;
4576
4577 ctx = cp->ulp_handle[if_type];
4578
4579 ulp_ops->indicate_netevent(ctx, event);
4580 }
4581 rcu_read_unlock();
4582
4583 if (event == NETDEV_GOING_DOWN) {
Michael Chana4636962009-06-08 18:14:43 -07004584 cnic_ulp_stop(dev);
4585 cnic_stop_hw(dev);
Michael Chana3059b12009-08-14 15:49:44 +00004586 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07004587 } else if (event == NETDEV_UNREGISTER) {
4588 write_lock(&cnic_dev_lock);
4589 list_del_init(&dev->list);
4590 write_unlock(&cnic_dev_lock);
4591
4592 cnic_put(dev);
4593 cnic_free_dev(dev);
4594 goto done;
4595 }
4596 cnic_put(dev);
4597 }
4598done:
4599 return NOTIFY_DONE;
4600}
4601
4602static struct notifier_block cnic_netdev_notifier = {
4603 .notifier_call = cnic_netdev_event
4604};
4605
4606static void cnic_release(void)
4607{
4608 struct cnic_dev *dev;
4609
4610 while (!list_empty(&cnic_dev_list)) {
4611 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
4612 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4613 cnic_ulp_stop(dev);
4614 cnic_stop_hw(dev);
4615 }
4616
4617 cnic_ulp_exit(dev);
Michael Chana3059b12009-08-14 15:49:44 +00004618 cnic_unregister_netdev(dev);
Michael Chana4636962009-06-08 18:14:43 -07004619 list_del_init(&dev->list);
4620 cnic_free_dev(dev);
4621 }
4622}
4623
4624static int __init cnic_init(void)
4625{
4626 int rc = 0;
4627
Joe Perchesddf79b22010-02-17 15:01:54 +00004628 pr_info("%s", version);
Michael Chana4636962009-06-08 18:14:43 -07004629
4630 rc = register_netdevice_notifier(&cnic_netdev_notifier);
4631 if (rc) {
4632 cnic_release();
4633 return rc;
4634 }
4635
4636 return 0;
4637}
4638
4639static void __exit cnic_exit(void)
4640{
4641 unregister_netdevice_notifier(&cnic_netdev_notifier);
4642 cnic_release();
4643 return;
4644}
4645
4646module_init(cnic_init);
4647module_exit(cnic_exit);