blob: 2f433fbfca0cfa30e8e95fd38aefb901792fad17 [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/workqueue.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/if_vlan.h>
32#include <linux/ethtool.h>
33#include <linux/in.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000037#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040038#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070039#include <net/ip6_checksum.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070040
41#include "cq_enet_desc.h"
42#include "vnic_dev.h"
43#include "vnic_intr.h"
44#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070045#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070046#include "enic_res.h"
47#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000048#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000049#include "enic_pp.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070050
51#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000052#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
53#define MAX_TSO (1 << 16)
54#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
55
56#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070057#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070058
59/* Supported devices */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000060static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000061 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070062 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070063 { 0, } /* end of table */
64};
65
66MODULE_DESCRIPTION(DRV_DESCRIPTION);
67MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
68MODULE_LICENSE("GPL");
69MODULE_VERSION(DRV_VERSION);
70MODULE_DEVICE_TABLE(pci, enic_id_table);
71
72struct enic_stat {
73 char name[ETH_GSTRING_LEN];
74 unsigned int offset;
75};
76
77#define ENIC_TX_STAT(stat) \
78 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
79#define ENIC_RX_STAT(stat) \
80 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
81
82static const struct enic_stat enic_tx_stats[] = {
83 ENIC_TX_STAT(tx_frames_ok),
84 ENIC_TX_STAT(tx_unicast_frames_ok),
85 ENIC_TX_STAT(tx_multicast_frames_ok),
86 ENIC_TX_STAT(tx_broadcast_frames_ok),
87 ENIC_TX_STAT(tx_bytes_ok),
88 ENIC_TX_STAT(tx_unicast_bytes_ok),
89 ENIC_TX_STAT(tx_multicast_bytes_ok),
90 ENIC_TX_STAT(tx_broadcast_bytes_ok),
91 ENIC_TX_STAT(tx_drops),
92 ENIC_TX_STAT(tx_errors),
93 ENIC_TX_STAT(tx_tso),
94};
95
96static const struct enic_stat enic_rx_stats[] = {
97 ENIC_RX_STAT(rx_frames_ok),
98 ENIC_RX_STAT(rx_frames_total),
99 ENIC_RX_STAT(rx_unicast_frames_ok),
100 ENIC_RX_STAT(rx_multicast_frames_ok),
101 ENIC_RX_STAT(rx_broadcast_frames_ok),
102 ENIC_RX_STAT(rx_bytes_ok),
103 ENIC_RX_STAT(rx_unicast_bytes_ok),
104 ENIC_RX_STAT(rx_multicast_bytes_ok),
105 ENIC_RX_STAT(rx_broadcast_bytes_ok),
106 ENIC_RX_STAT(rx_drop),
107 ENIC_RX_STAT(rx_no_bufs),
108 ENIC_RX_STAT(rx_errors),
109 ENIC_RX_STAT(rx_rss),
110 ENIC_RX_STAT(rx_crc_errors),
111 ENIC_RX_STAT(rx_frames_64),
112 ENIC_RX_STAT(rx_frames_127),
113 ENIC_RX_STAT(rx_frames_255),
114 ENIC_RX_STAT(rx_frames_511),
115 ENIC_RX_STAT(rx_frames_1023),
116 ENIC_RX_STAT(rx_frames_1518),
117 ENIC_RX_STAT(rx_frames_to_max),
118};
119
120static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
121static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
122
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700123static int enic_is_dynamic(struct enic *enic)
124{
125 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
126}
127
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000128static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
129{
130 return rq;
131}
132
133static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
134{
135 return enic->rq_count + wq;
136}
137
138static inline unsigned int enic_legacy_io_intr(void)
139{
140 return 0;
141}
142
143static inline unsigned int enic_legacy_err_intr(void)
144{
145 return 1;
146}
147
148static inline unsigned int enic_legacy_notify_intr(void)
149{
150 return 2;
151}
152
153static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
154{
155 return rq;
156}
157
158static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
159{
160 return enic->rq_count + wq;
161}
162
163static inline unsigned int enic_msix_err_intr(struct enic *enic)
164{
165 return enic->rq_count + enic->wq_count;
166}
167
168static inline unsigned int enic_msix_notify_intr(struct enic *enic)
169{
170 return enic->rq_count + enic->wq_count + 1;
171}
172
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700173static int enic_get_settings(struct net_device *netdev,
174 struct ethtool_cmd *ecmd)
175{
176 struct enic *enic = netdev_priv(netdev);
177
178 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
179 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
180 ecmd->port = PORT_FIBRE;
181 ecmd->transceiver = XCVR_EXTERNAL;
182
183 if (netif_carrier_ok(netdev)) {
David Decotigny70739492011-04-27 18:32:40 +0000184 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700185 ecmd->duplex = DUPLEX_FULL;
186 } else {
David Decotigny70739492011-04-27 18:32:40 +0000187 ethtool_cmd_speed_set(ecmd, -1);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700188 ecmd->duplex = -1;
189 }
190
191 ecmd->autoneg = AUTONEG_DISABLE;
192
193 return 0;
194}
195
196static void enic_get_drvinfo(struct net_device *netdev,
197 struct ethtool_drvinfo *drvinfo)
198{
199 struct enic *enic = netdev_priv(netdev);
200 struct vnic_devcmd_fw_info *fw_info;
201
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000202 enic_dev_fw_info(enic, &fw_info);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700203
204 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
205 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
206 strncpy(drvinfo->fw_version, fw_info->fw_version,
207 sizeof(drvinfo->fw_version));
208 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
209 sizeof(drvinfo->bus_info));
210}
211
212static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
213{
214 unsigned int i;
215
216 switch (stringset) {
217 case ETH_SS_STATS:
218 for (i = 0; i < enic_n_tx_stats; i++) {
219 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
220 data += ETH_GSTRING_LEN;
221 }
222 for (i = 0; i < enic_n_rx_stats; i++) {
223 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
224 data += ETH_GSTRING_LEN;
225 }
226 break;
227 }
228}
229
Scott Feldman25f0a062008-09-24 11:23:32 -0700230static int enic_get_sset_count(struct net_device *netdev, int sset)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700231{
Scott Feldman25f0a062008-09-24 11:23:32 -0700232 switch (sset) {
233 case ETH_SS_STATS:
234 return enic_n_tx_stats + enic_n_rx_stats;
235 default:
236 return -EOPNOTSUPP;
237 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700238}
239
240static void enic_get_ethtool_stats(struct net_device *netdev,
241 struct ethtool_stats *stats, u64 *data)
242{
243 struct enic *enic = netdev_priv(netdev);
244 struct vnic_stats *vstats;
245 unsigned int i;
246
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000247 enic_dev_stats_dump(enic, &vstats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700248
249 for (i = 0; i < enic_n_tx_stats; i++)
250 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
251 for (i = 0; i < enic_n_rx_stats; i++)
252 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
253}
254
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700255static u32 enic_get_msglevel(struct net_device *netdev)
256{
257 struct enic *enic = netdev_priv(netdev);
258 return enic->msg_enable;
259}
260
261static void enic_set_msglevel(struct net_device *netdev, u32 value)
262{
263 struct enic *enic = netdev_priv(netdev);
264 enic->msg_enable = value;
265}
266
Scott Feldman7c844592009-12-23 13:27:54 +0000267static int enic_get_coalesce(struct net_device *netdev,
268 struct ethtool_coalesce *ecmd)
269{
270 struct enic *enic = netdev_priv(netdev);
271
272 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
273 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
274
275 return 0;
276}
277
278static int enic_set_coalesce(struct net_device *netdev,
279 struct ethtool_coalesce *ecmd)
280{
281 struct enic *enic = netdev_priv(netdev);
282 u32 tx_coalesce_usecs;
283 u32 rx_coalesce_usecs;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000284 unsigned int i, intr;
Scott Feldman7c844592009-12-23 13:27:54 +0000285
286 tx_coalesce_usecs = min_t(u32,
287 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
288 ecmd->tx_coalesce_usecs);
289 rx_coalesce_usecs = min_t(u32,
290 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
291 ecmd->rx_coalesce_usecs);
292
293 switch (vnic_dev_get_intr_mode(enic->vdev)) {
294 case VNIC_DEV_INTR_MODE_INTX:
295 if (tx_coalesce_usecs != rx_coalesce_usecs)
296 return -EINVAL;
297
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000298 intr = enic_legacy_io_intr();
299 vnic_intr_coalescing_timer_set(&enic->intr[intr],
Scott Feldman7c844592009-12-23 13:27:54 +0000300 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
301 break;
302 case VNIC_DEV_INTR_MODE_MSI:
303 if (tx_coalesce_usecs != rx_coalesce_usecs)
304 return -EINVAL;
305
306 vnic_intr_coalescing_timer_set(&enic->intr[0],
307 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
308 break;
309 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000310 for (i = 0; i < enic->wq_count; i++) {
311 intr = enic_msix_wq_intr(enic, i);
312 vnic_intr_coalescing_timer_set(&enic->intr[intr],
313 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
314 }
315
316 for (i = 0; i < enic->rq_count; i++) {
317 intr = enic_msix_rq_intr(enic, i);
318 vnic_intr_coalescing_timer_set(&enic->intr[intr],
319 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
320 }
321
Scott Feldman7c844592009-12-23 13:27:54 +0000322 break;
323 default:
324 break;
325 }
326
327 enic->tx_coalesce_usecs = tx_coalesce_usecs;
328 enic->rx_coalesce_usecs = rx_coalesce_usecs;
329
330 return 0;
331}
332
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700333static const struct ethtool_ops enic_ethtool_ops = {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700334 .get_settings = enic_get_settings,
335 .get_drvinfo = enic_get_drvinfo,
336 .get_msglevel = enic_get_msglevel,
337 .set_msglevel = enic_set_msglevel,
338 .get_link = ethtool_op_get_link,
339 .get_strings = enic_get_strings,
Scott Feldman25f0a062008-09-24 11:23:32 -0700340 .get_sset_count = enic_get_sset_count,
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700341 .get_ethtool_stats = enic_get_ethtool_stats,
Scott Feldman7c844592009-12-23 13:27:54 +0000342 .get_coalesce = enic_get_coalesce,
343 .set_coalesce = enic_set_coalesce,
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700344};
345
346static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
347{
348 struct enic *enic = vnic_dev_priv(wq->vdev);
349
350 if (buf->sop)
351 pci_unmap_single(enic->pdev, buf->dma_addr,
352 buf->len, PCI_DMA_TODEVICE);
353 else
354 pci_unmap_page(enic->pdev, buf->dma_addr,
355 buf->len, PCI_DMA_TODEVICE);
356
357 if (buf->os_buf)
358 dev_kfree_skb_any(buf->os_buf);
359}
360
361static void enic_wq_free_buf(struct vnic_wq *wq,
362 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
363{
364 enic_free_wq_buf(wq, buf);
365}
366
367static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
368 u8 type, u16 q_number, u16 completed_index, void *opaque)
369{
370 struct enic *enic = vnic_dev_priv(vdev);
371
372 spin_lock(&enic->wq_lock[q_number]);
373
374 vnic_wq_service(&enic->wq[q_number], cq_desc,
375 completed_index, enic_wq_free_buf,
376 opaque);
377
378 if (netif_queue_stopped(enic->netdev) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000379 vnic_wq_desc_avail(&enic->wq[q_number]) >=
380 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700381 netif_wake_queue(enic->netdev);
382
383 spin_unlock(&enic->wq_lock[q_number]);
384
385 return 0;
386}
387
388static void enic_log_q_error(struct enic *enic)
389{
390 unsigned int i;
391 u32 error_status;
392
393 for (i = 0; i < enic->wq_count; i++) {
394 error_status = vnic_wq_error_status(&enic->wq[i]);
395 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000396 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
397 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700398 }
399
400 for (i = 0; i < enic->rq_count; i++) {
401 error_status = vnic_rq_error_status(&enic->rq[i]);
402 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000403 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
404 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700405 }
406}
407
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000408static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700409{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000410 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700411
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000412 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000413 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
414 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000415 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700416 }
417}
418
419static void enic_mtu_check(struct enic *enic)
420{
421 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000422 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700423
Scott Feldman491598a2009-09-03 17:02:40 +0000424 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000425 enic->port_mtu = mtu;
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000426 if (mtu < netdev->mtu)
427 netdev_warn(netdev,
428 "interface MTU (%d) set higher "
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700429 "than switch port MTU (%d)\n",
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000430 netdev->mtu, mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700431 }
432}
433
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000434static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700435{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000436 int link_status = vnic_dev_link_status(enic->vdev);
437 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700438
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000439 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000440 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000441 netif_carrier_on(enic->netdev);
442 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000443 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000444 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700445 }
446}
447
448static void enic_notify_check(struct enic *enic)
449{
450 enic_msglvl_check(enic);
451 enic_mtu_check(enic);
452 enic_link_check(enic);
453}
454
455#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
456
457static irqreturn_t enic_isr_legacy(int irq, void *data)
458{
459 struct net_device *netdev = data;
460 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000461 unsigned int io_intr = enic_legacy_io_intr();
462 unsigned int err_intr = enic_legacy_err_intr();
463 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700464 u32 pba;
465
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000466 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700467
468 pba = vnic_intr_legacy_pba(enic->legacy_pba);
469 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000470 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700471 return IRQ_NONE; /* not our interrupt */
472 }
473
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000474 if (ENIC_TEST_INTR(pba, notify_intr)) {
475 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700476 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800477 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700478
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000479 if (ENIC_TEST_INTR(pba, err_intr)) {
480 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700481 enic_log_q_error(enic);
482 /* schedule recovery from WQ/RQ error */
483 schedule_work(&enic->reset);
484 return IRQ_HANDLED;
485 }
486
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000487 if (ENIC_TEST_INTR(pba, io_intr)) {
488 if (napi_schedule_prep(&enic->napi[0]))
489 __napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700490 } else {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000491 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700492 }
493
494 return IRQ_HANDLED;
495}
496
497static irqreturn_t enic_isr_msi(int irq, void *data)
498{
499 struct enic *enic = data;
500
501 /* With MSI, there is no sharing of interrupts, so this is
502 * our interrupt and there is no need to ack it. The device
503 * is not providing per-vector masking, so the OS will not
504 * write to PCI config space to mask/unmask the interrupt.
505 * We're using mask_on_assertion for MSI, so the device
506 * automatically masks the interrupt when the interrupt is
507 * generated. Later, when exiting polling, the interrupt
508 * will be unmasked (see enic_poll).
509 *
510 * Also, the device uses the same PCIe Traffic Class (TC)
511 * for Memory Write data and MSI, so there are no ordering
512 * issues; the MSI will always arrive at the Root Complex
513 * _after_ corresponding Memory Writes (i.e. descriptor
514 * writes).
515 */
516
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000517 napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700518
519 return IRQ_HANDLED;
520}
521
522static irqreturn_t enic_isr_msix_rq(int irq, void *data)
523{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000524 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700525
526 /* schedule NAPI polling for RQ cleanup */
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000527 napi_schedule(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700528
529 return IRQ_HANDLED;
530}
531
532static irqreturn_t enic_isr_msix_wq(int irq, void *data)
533{
534 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000535 unsigned int cq = enic_cq_wq(enic, 0);
536 unsigned int intr = enic_msix_wq_intr(enic, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700537 unsigned int wq_work_to_do = -1; /* no limit */
538 unsigned int wq_work_done;
539
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000540 wq_work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700541 wq_work_to_do, enic_wq_service, NULL);
542
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000543 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700544 wq_work_done,
545 1 /* unmask intr */,
546 1 /* reset intr timer */);
547
548 return IRQ_HANDLED;
549}
550
551static irqreturn_t enic_isr_msix_err(int irq, void *data)
552{
553 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000554 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700555
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000556 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800557
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700558 enic_log_q_error(enic);
559
560 /* schedule recovery from WQ/RQ error */
561 schedule_work(&enic->reset);
562
563 return IRQ_HANDLED;
564}
565
566static irqreturn_t enic_isr_msix_notify(int irq, void *data)
567{
568 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000569 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700570
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000571 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700572 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700573
574 return IRQ_HANDLED;
575}
576
577static inline void enic_queue_wq_skb_cont(struct enic *enic,
578 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000579 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700580{
581 skb_frag_t *frag;
582
583 /* Queue additional data fragments */
584 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
585 len_left -= frag->size;
586 enic_queue_wq_desc_cont(wq, skb,
587 pci_map_page(enic->pdev, frag->page,
588 frag->page_offset, frag->size,
589 PCI_DMA_TODEVICE),
590 frag->size,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000591 (len_left == 0), /* EOP? */
592 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700593 }
594}
595
596static inline void enic_queue_wq_skb_vlan(struct enic *enic,
597 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000598 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700599{
600 unsigned int head_len = skb_headlen(skb);
601 unsigned int len_left = skb->len - head_len;
602 int eop = (len_left == 0);
603
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000604 /* Queue the main skb fragment. The fragments are no larger
605 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
606 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
607 * per fragment is queued.
608 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700609 enic_queue_wq_desc(wq, skb,
610 pci_map_single(enic->pdev, skb->data,
611 head_len, PCI_DMA_TODEVICE),
612 head_len,
613 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000614 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700615
616 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000617 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700618}
619
620static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
621 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000622 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700623{
624 unsigned int head_len = skb_headlen(skb);
625 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000626 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700627 unsigned int csum_offset = hdr_len + skb->csum_offset;
628 int eop = (len_left == 0);
629
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000630 /* Queue the main skb fragment. The fragments are no larger
631 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
632 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
633 * per fragment is queued.
634 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700635 enic_queue_wq_desc_csum_l4(wq, skb,
636 pci_map_single(enic->pdev, skb->data,
637 head_len, PCI_DMA_TODEVICE),
638 head_len,
639 csum_offset,
640 hdr_len,
641 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000642 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700643
644 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000645 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700646}
647
648static inline void enic_queue_wq_skb_tso(struct enic *enic,
649 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000650 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700651{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000652 unsigned int frag_len_left = skb_headlen(skb);
653 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700654 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
655 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000656 unsigned int len;
657 dma_addr_t dma_addr;
658 unsigned int offset = 0;
659 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700660
661 /* Preload TCP csum field with IP pseudo hdr calculated
662 * with IP length set to zero. HW will later add in length
663 * to each TCP segment resulting from the TSO.
664 */
665
Harvey Harrison09640e62009-02-01 00:45:17 -0800666 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700667 ip_hdr(skb)->check = 0;
668 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
669 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800670 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700671 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
672 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
673 }
674
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000675 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
676 * for the main skb fragment
677 */
678 while (frag_len_left) {
679 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
680 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
681 len, PCI_DMA_TODEVICE);
682 enic_queue_wq_desc_tso(wq, skb,
683 dma_addr,
684 len,
685 mss, hdr_len,
686 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000687 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000688 frag_len_left -= len;
689 offset += len;
690 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700691
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000692 if (eop)
693 return;
694
695 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
696 * for additional data fragments
697 */
698 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
699 len_left -= frag->size;
700 frag_len_left = frag->size;
701 offset = frag->page_offset;
702
703 while (frag_len_left) {
704 len = min(frag_len_left,
705 (unsigned int)WQ_ENET_MAX_DESC_LEN);
706 dma_addr = pci_map_page(enic->pdev, frag->page,
707 offset, len,
708 PCI_DMA_TODEVICE);
709 enic_queue_wq_desc_cont(wq, skb,
710 dma_addr,
711 len,
712 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000713 (len == frag_len_left), /* EOP? */
714 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000715 frag_len_left -= len;
716 offset += len;
717 }
718 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700719}
720
721static inline void enic_queue_wq_skb(struct enic *enic,
722 struct vnic_wq *wq, struct sk_buff *skb)
723{
724 unsigned int mss = skb_shinfo(skb)->gso_size;
725 unsigned int vlan_tag = 0;
726 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000727 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700728
Jesse Grosseab6d182010-10-20 13:56:03 +0000729 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700730 /* VLAN tag from trunking driver */
731 vlan_tag_insert = 1;
732 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000733 } else if (enic->loop_enable) {
734 vlan_tag = enic->loop_tag;
735 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700736 }
737
738 if (mss)
739 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000740 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700741 else if (skb->ip_summed == CHECKSUM_PARTIAL)
742 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000743 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700744 else
745 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000746 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700747}
748
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800749/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000750static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000751 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700752{
753 struct enic *enic = netdev_priv(netdev);
754 struct vnic_wq *wq = &enic->wq[0];
755 unsigned long flags;
756
757 if (skb->len <= 0) {
758 dev_kfree_skb(skb);
759 return NETDEV_TX_OK;
760 }
761
762 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
763 * which is very likely. In the off chance it's going to take
764 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
765 */
766
767 if (skb_shinfo(skb)->gso_size == 0 &&
768 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
769 skb_linearize(skb)) {
770 dev_kfree_skb(skb);
771 return NETDEV_TX_OK;
772 }
773
774 spin_lock_irqsave(&enic->wq_lock[0], flags);
775
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000776 if (vnic_wq_desc_avail(wq) <
777 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700778 netif_stop_queue(netdev);
779 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000780 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700781 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
782 return NETDEV_TX_BUSY;
783 }
784
785 enic_queue_wq_skb(enic, wq, skb);
786
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000787 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700788 netif_stop_queue(netdev);
789
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700790 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
791
792 return NETDEV_TX_OK;
793}
794
795/* dev_base_lock rwlock held, nominally process context */
796static struct net_device_stats *enic_get_stats(struct net_device *netdev)
797{
798 struct enic *enic = netdev_priv(netdev);
Scott Feldman25f0a062008-09-24 11:23:32 -0700799 struct net_device_stats *net_stats = &netdev->stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700800 struct vnic_stats *stats;
801
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000802 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700803
Scott Feldman25f0a062008-09-24 11:23:32 -0700804 net_stats->tx_packets = stats->tx.tx_frames_ok;
805 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
806 net_stats->tx_errors = stats->tx.tx_errors;
807 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700808
Scott Feldman25f0a062008-09-24 11:23:32 -0700809 net_stats->rx_packets = stats->rx.rx_frames_ok;
810 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
811 net_stats->rx_errors = stats->rx.rx_errors;
812 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000813 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800814 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000815 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700816
Scott Feldman25f0a062008-09-24 11:23:32 -0700817 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700818}
819
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000820void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700821{
822 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000823 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000824 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700825}
826
827static int enic_set_mac_addr(struct net_device *netdev, char *addr)
828{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700829 struct enic *enic = netdev_priv(netdev);
830
831 if (enic_is_dynamic(enic)) {
832 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
833 return -EADDRNOTAVAIL;
834 } else {
835 if (!is_valid_ether_addr(addr))
836 return -EADDRNOTAVAIL;
837 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700838
839 memcpy(netdev->dev_addr, addr, netdev->addr_len);
840
841 return 0;
842}
843
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700844static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
845{
846 struct enic *enic = netdev_priv(netdev);
847 struct sockaddr *saddr = p;
848 char *addr = saddr->sa_data;
849 int err;
850
851 if (netif_running(enic->netdev)) {
852 err = enic_dev_del_station_addr(enic);
853 if (err)
854 return err;
855 }
856
857 err = enic_set_mac_addr(netdev, addr);
858 if (err)
859 return err;
860
861 if (netif_running(enic->netdev)) {
862 err = enic_dev_add_station_addr(enic);
863 if (err)
864 return err;
865 }
866
867 return err;
868}
869
870static int enic_set_mac_address(struct net_device *netdev, void *p)
871{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000872 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000873 char *addr = saddr->sa_data;
874 struct enic *enic = netdev_priv(netdev);
875 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000876
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000877 err = enic_dev_del_station_addr(enic);
878 if (err)
879 return err;
880
881 err = enic_set_mac_addr(netdev, addr);
882 if (err)
883 return err;
884
885 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700886}
887
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000888static void enic_update_multicast_addr_list(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700889{
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000890 struct net_device *netdev = enic->netdev;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000891 struct netdev_hw_addr *ha;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000892 unsigned int mc_count = netdev_mc_count(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700893 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700894 unsigned int i, j;
895
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000896 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
897 netdev_warn(netdev, "Registering only %d out of %d "
898 "multicast addresses\n",
899 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700900 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
Scott Feldman9959a182009-12-23 13:27:43 +0000901 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700902
903 /* Is there an easier way? Trying to minimize to
904 * calls to add/del multicast addrs. We keep the
905 * addrs from the last call in enic->mc_addr and
906 * look for changes to add/del.
907 */
908
Jiri Pirko48e2f182010-02-22 09:22:26 +0000909 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000910 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko48e2f182010-02-22 09:22:26 +0000911 if (i == mc_count)
912 break;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000913 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700914 }
915
916 for (i = 0; i < enic->mc_count; i++) {
917 for (j = 0; j < mc_count; j++)
918 if (compare_ether_addr(enic->mc_addr[i],
919 mc_addr[j]) == 0)
920 break;
921 if (j == mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000922 enic_dev_del_addr(enic, enic->mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700923 }
924
925 for (i = 0; i < mc_count; i++) {
926 for (j = 0; j < enic->mc_count; j++)
927 if (compare_ether_addr(mc_addr[i],
928 enic->mc_addr[j]) == 0)
929 break;
930 if (j == enic->mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000931 enic_dev_add_addr(enic, mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700932 }
933
934 /* Save the list to compare against next time
935 */
936
937 for (i = 0; i < mc_count; i++)
938 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
939
940 enic->mc_count = mc_count;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700941}
942
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000943static void enic_update_unicast_addr_list(struct enic *enic)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000944{
945 struct net_device *netdev = enic->netdev;
946 struct netdev_hw_addr *ha;
947 unsigned int uc_count = netdev_uc_count(netdev);
948 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
949 unsigned int i, j;
950
951 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
952 netdev_warn(netdev, "Registering only %d out of %d "
953 "unicast addresses\n",
954 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
955 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
956 }
957
958 /* Is there an easier way? Trying to minimize to
959 * calls to add/del unicast addrs. We keep the
960 * addrs from the last call in enic->uc_addr and
961 * look for changes to add/del.
962 */
963
964 i = 0;
965 netdev_for_each_uc_addr(ha, netdev) {
966 if (i == uc_count)
967 break;
968 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
969 }
970
971 for (i = 0; i < enic->uc_count; i++) {
972 for (j = 0; j < uc_count; j++)
973 if (compare_ether_addr(enic->uc_addr[i],
974 uc_addr[j]) == 0)
975 break;
976 if (j == uc_count)
977 enic_dev_del_addr(enic, enic->uc_addr[i]);
978 }
979
980 for (i = 0; i < uc_count; i++) {
981 for (j = 0; j < enic->uc_count; j++)
982 if (compare_ether_addr(uc_addr[i],
983 enic->uc_addr[j]) == 0)
984 break;
985 if (j == enic->uc_count)
986 enic_dev_add_addr(enic, uc_addr[i]);
987 }
988
989 /* Save the list to compare against next time
990 */
991
992 for (i = 0; i < uc_count; i++)
993 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
994
995 enic->uc_count = uc_count;
996}
997
998/* netif_tx_lock held, BHs disabled */
999static void enic_set_rx_mode(struct net_device *netdev)
1000{
1001 struct enic *enic = netdev_priv(netdev);
1002 int directed = 1;
1003 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1004 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1005 int promisc = (netdev->flags & IFF_PROMISC) ||
1006 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1007 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1008 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1009 unsigned int flags = netdev->flags |
1010 (allmulti ? IFF_ALLMULTI : 0) |
1011 (promisc ? IFF_PROMISC : 0);
1012
1013 if (enic->flags != flags) {
1014 enic->flags = flags;
1015 enic_dev_packet_filter(enic, directed,
1016 multicast, broadcast, promisc, allmulti);
1017 }
1018
1019 if (!promisc) {
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001020 enic_update_unicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001021 if (!allmulti)
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001022 enic_update_multicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001023 }
1024}
1025
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001026/* rtnl lock is held */
1027static void enic_vlan_rx_register(struct net_device *netdev,
1028 struct vlan_group *vlan_group)
1029{
1030 struct enic *enic = netdev_priv(netdev);
1031 enic->vlan_group = vlan_group;
1032}
1033
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001034/* netif_tx_lock held, BHs disabled */
1035static void enic_tx_timeout(struct net_device *netdev)
1036{
1037 struct enic *enic = netdev_priv(netdev);
1038 schedule_work(&enic->reset);
1039}
1040
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00001041static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1042{
1043 struct enic *enic = netdev_priv(netdev);
1044
1045 if (vf != PORT_SELF_VF)
1046 return -EOPNOTSUPP;
1047
1048 /* Ignore the vf argument for now. We can assume the request
1049 * is coming on a vf.
1050 */
1051 if (is_valid_ether_addr(mac)) {
1052 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1053 return 0;
1054 } else
1055 return -EINVAL;
1056}
1057
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001058static int enic_set_vf_port(struct net_device *netdev, int vf,
1059 struct nlattr *port[])
1060{
1061 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001062 struct enic_port_profile prev_pp;
1063 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001064
1065 /* don't support VFs, yet */
1066 if (vf != PORT_SELF_VF)
1067 return -EOPNOTSUPP;
1068
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001069 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +00001070 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001071
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001072 memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
1073 memset(&enic->pp, 0, sizeof(enic->pp));
1074
1075 enic->pp.set |= ENIC_SET_REQUEST;
1076 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1077
1078 if (port[IFLA_PORT_PROFILE]) {
1079 enic->pp.set |= ENIC_SET_NAME;
1080 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1081 PORT_PROFILE_MAX);
1082 }
1083
1084 if (port[IFLA_PORT_INSTANCE_UUID]) {
1085 enic->pp.set |= ENIC_SET_INSTANCE;
1086 memcpy(enic->pp.instance_uuid,
1087 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1088 }
1089
1090 if (port[IFLA_PORT_HOST_UUID]) {
1091 enic->pp.set |= ENIC_SET_HOST;
1092 memcpy(enic->pp.host_uuid,
1093 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1094 }
1095
1096 /* Special case handling: mac came from IFLA_VF_MAC */
1097 if (!is_zero_ether_addr(prev_pp.vf_mac))
1098 memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +00001099
1100 if (is_zero_ether_addr(netdev->dev_addr))
1101 random_ether_addr(netdev->dev_addr);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001102
1103 err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
1104 if (err) {
1105 if (restore_pp) {
1106 /* Things are still the way they were: Implicit
1107 * DISASSOCIATE failed
1108 */
1109 memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
1110 } else {
1111 memset(&enic->pp, 0, sizeof(enic->pp));
1112 memset(netdev->dev_addr, 0, ETH_ALEN);
1113 }
1114 } else {
1115 /* Set flag to indicate that the port assoc/disassoc
1116 * request has been sent out to fw
1117 */
1118 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1119
1120 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1121 if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1122 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1123 memset(netdev->dev_addr, 0, ETH_ALEN);
1124 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001125 }
1126
Roopa Prabhu29639052010-12-08 13:54:03 +00001127 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1128
Roopa Prabhu29639052010-12-08 13:54:03 +00001129 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001130}
1131
1132static int enic_get_vf_port(struct net_device *netdev, int vf,
1133 struct sk_buff *skb)
1134{
1135 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001136 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001137 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001138
Roopa Prabhu4dce2392011-01-20 14:35:54 +00001139 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
Scott Feldman08f382e2010-06-01 08:59:33 +00001140 return -ENODATA;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001141
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001142 err = enic_process_get_pp_request(enic, enic->pp.request, &response);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001143 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +00001144 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001145
1146 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1147 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
Scott Feldman08f382e2010-06-01 08:59:33 +00001148 if (enic->pp.set & ENIC_SET_NAME)
1149 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1150 enic->pp.name);
1151 if (enic->pp.set & ENIC_SET_INSTANCE)
1152 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1153 enic->pp.instance_uuid);
1154 if (enic->pp.set & ENIC_SET_HOST)
1155 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1156 enic->pp.host_uuid);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001157
1158 return 0;
1159
1160nla_put_failure:
1161 return -EMSGSIZE;
1162}
1163
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001164static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1165{
1166 struct enic *enic = vnic_dev_priv(rq->vdev);
1167
1168 if (!buf->os_buf)
1169 return;
1170
1171 pci_unmap_single(enic->pdev, buf->dma_addr,
1172 buf->len, PCI_DMA_FROMDEVICE);
1173 dev_kfree_skb_any(buf->os_buf);
1174}
1175
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001176static int enic_rq_alloc_buf(struct vnic_rq *rq)
1177{
1178 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +00001179 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001180 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00001181 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001182 unsigned int os_buf_index = 0;
1183 dma_addr_t dma_addr;
1184
Eric Dumazet89d71a62009-10-13 05:34:20 +00001185 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001186 if (!skb)
1187 return -ENOMEM;
1188
1189 dma_addr = pci_map_single(enic->pdev, skb->data,
1190 len, PCI_DMA_FROMDEVICE);
1191
1192 enic_queue_rq_desc(rq, skb, os_buf_index,
1193 dma_addr, len);
1194
1195 return 0;
1196}
1197
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001198static void enic_rq_indicate_buf(struct vnic_rq *rq,
1199 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1200 int skipped, void *opaque)
1201{
1202 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001203 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001204 struct sk_buff *skb;
1205
1206 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1207 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1208 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1209 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1210 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001211 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001212 u32 rss_hash;
1213
1214 if (skipped)
1215 return;
1216
1217 skb = buf->os_buf;
1218 prefetch(skb->data - NET_IP_ALIGN);
1219 pci_unmap_single(enic->pdev, buf->dma_addr,
1220 buf->len, PCI_DMA_FROMDEVICE);
1221
1222 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1223 &type, &color, &q_number, &completed_index,
1224 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1225 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001226 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001227 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1228 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1229 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1230 &fcs_ok);
1231
1232 if (packet_error) {
1233
Scott Feldman350991e2009-09-03 17:02:19 +00001234 if (!fcs_ok) {
1235 if (bytes_written > 0)
1236 enic->rq_bad_fcs++;
1237 else if (bytes_written == 0)
1238 enic->rq_truncated_pkts++;
1239 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001240
1241 dev_kfree_skb_any(skb);
1242
1243 return;
1244 }
1245
1246 if (eop && bytes_written > 0) {
1247
1248 /* Good receive
1249 */
1250
1251 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001252 skb->protocol = eth_type_trans(skb, netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001253
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001254 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001255 skb->csum = htons(checksum);
1256 skb->ip_summed = CHECKSUM_COMPLETE;
1257 }
1258
Scott Feldman86ca9db2008-11-21 21:26:55 -08001259 skb->dev = netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001260
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001261 if (enic->vlan_group && vlan_stripped &&
1262 (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001263
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001264 if (netdev->features & NETIF_F_GRO)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001265 vlan_gro_receive(&enic->napi[q_number],
1266 enic->vlan_group, vlan_tci, skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001267 else
1268 vlan_hwaccel_receive_skb(skb,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001269 enic->vlan_group, vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001270
1271 } else {
1272
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001273 if (netdev->features & NETIF_F_GRO)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001274 napi_gro_receive(&enic->napi[q_number], skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001275 else
1276 netif_receive_skb(skb);
1277
1278 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001279 } else {
1280
1281 /* Buffer overflow
1282 */
1283
1284 dev_kfree_skb_any(skb);
1285 }
1286}
1287
1288static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1289 u8 type, u16 q_number, u16 completed_index, void *opaque)
1290{
1291 struct enic *enic = vnic_dev_priv(vdev);
1292
1293 vnic_rq_service(&enic->rq[q_number], cq_desc,
1294 completed_index, VNIC_RQ_RETURN_DESC,
1295 enic_rq_indicate_buf, opaque);
1296
1297 return 0;
1298}
1299
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001300static int enic_poll(struct napi_struct *napi, int budget)
1301{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001302 struct net_device *netdev = napi->dev;
1303 struct enic *enic = netdev_priv(netdev);
1304 unsigned int cq_rq = enic_cq_rq(enic, 0);
1305 unsigned int cq_wq = enic_cq_wq(enic, 0);
1306 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001307 unsigned int rq_work_to_do = budget;
1308 unsigned int wq_work_to_do = -1; /* no limit */
1309 unsigned int work_done, rq_work_done, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001310 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001311
1312 /* Service RQ (first) and WQ
1313 */
1314
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001315 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001316 rq_work_to_do, enic_rq_service, NULL);
1317
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001318 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001319 wq_work_to_do, enic_wq_service, NULL);
1320
1321 /* Accumulate intr event credits for this polling
1322 * cycle. An intr event is the completion of a
1323 * a WQ or RQ packet.
1324 */
1325
1326 work_done = rq_work_done + wq_work_done;
1327
1328 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001329 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001330 work_done,
1331 0 /* don't unmask intr */,
1332 0 /* don't reset intr timer */);
1333
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001334 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001335
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001336 /* Buffer allocation failed. Stay in polling
1337 * mode so we can try to fill the ring again.
1338 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001339
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001340 if (err)
1341 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001342
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001343 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001344
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001345 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001346 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001347 */
1348
Ben Hutchings288379f2009-01-19 16:43:59 -08001349 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001350 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001351 }
1352
1353 return rq_work_done;
1354}
1355
1356static int enic_poll_msix(struct napi_struct *napi, int budget)
1357{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001358 struct net_device *netdev = napi->dev;
1359 struct enic *enic = netdev_priv(netdev);
1360 unsigned int rq = (napi - &enic->napi[0]);
1361 unsigned int cq = enic_cq_rq(enic, rq);
1362 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001363 unsigned int work_to_do = budget;
1364 unsigned int work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001365 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001366
1367 /* Service RQ
1368 */
1369
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001370 work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001371 work_to_do, enic_rq_service, NULL);
1372
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001373 /* Return intr event credits for this polling
1374 * cycle. An intr event is the completion of a
1375 * RQ packet.
1376 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001377
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001378 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001379 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001380 work_done,
1381 0 /* don't unmask intr */,
1382 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001383
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001384 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001385
1386 /* Buffer allocation failed. Stay in polling mode
1387 * so we can try to fill the ring again.
1388 */
1389
1390 if (err)
1391 work_done = work_to_do;
1392
1393 if (work_done < work_to_do) {
1394
1395 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001396 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001397 */
1398
Ben Hutchings288379f2009-01-19 16:43:59 -08001399 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001400 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001401 }
1402
1403 return work_done;
1404}
1405
1406static void enic_notify_timer(unsigned long data)
1407{
1408 struct enic *enic = (struct enic *)data;
1409
1410 enic_notify_check(enic);
1411
Scott Feldman25f0a062008-09-24 11:23:32 -07001412 mod_timer(&enic->notify_timer,
1413 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001414}
1415
1416static void enic_free_intr(struct enic *enic)
1417{
1418 struct net_device *netdev = enic->netdev;
1419 unsigned int i;
1420
1421 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1422 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001423 free_irq(enic->pdev->irq, netdev);
1424 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001425 case VNIC_DEV_INTR_MODE_MSI:
1426 free_irq(enic->pdev->irq, enic);
1427 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001428 case VNIC_DEV_INTR_MODE_MSIX:
1429 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1430 if (enic->msix[i].requested)
1431 free_irq(enic->msix_entry[i].vector,
1432 enic->msix[i].devid);
1433 break;
1434 default:
1435 break;
1436 }
1437}
1438
1439static int enic_request_intr(struct enic *enic)
1440{
1441 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001442 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001443 int err = 0;
1444
1445 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1446
1447 case VNIC_DEV_INTR_MODE_INTX:
1448
1449 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1450 IRQF_SHARED, netdev->name, netdev);
1451 break;
1452
1453 case VNIC_DEV_INTR_MODE_MSI:
1454
1455 err = request_irq(enic->pdev->irq, enic_isr_msi,
1456 0, netdev->name, enic);
1457 break;
1458
1459 case VNIC_DEV_INTR_MODE_MSIX:
1460
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001461 for (i = 0; i < enic->rq_count; i++) {
1462 intr = enic_msix_rq_intr(enic, i);
1463 sprintf(enic->msix[intr].devname,
1464 "%.11s-rx-%d", netdev->name, i);
1465 enic->msix[intr].isr = enic_isr_msix_rq;
1466 enic->msix[intr].devid = &enic->napi[i];
1467 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001468
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001469 for (i = 0; i < enic->wq_count; i++) {
1470 intr = enic_msix_wq_intr(enic, i);
1471 sprintf(enic->msix[intr].devname,
1472 "%.11s-tx-%d", netdev->name, i);
1473 enic->msix[intr].isr = enic_isr_msix_wq;
1474 enic->msix[intr].devid = enic;
1475 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001476
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001477 intr = enic_msix_err_intr(enic);
1478 sprintf(enic->msix[intr].devname,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001479 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001480 enic->msix[intr].isr = enic_isr_msix_err;
1481 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001482
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001483 intr = enic_msix_notify_intr(enic);
1484 sprintf(enic->msix[intr].devname,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001485 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001486 enic->msix[intr].isr = enic_isr_msix_notify;
1487 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001488
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001489 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1490 enic->msix[i].requested = 0;
1491
1492 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001493 err = request_irq(enic->msix_entry[i].vector,
1494 enic->msix[i].isr, 0,
1495 enic->msix[i].devname,
1496 enic->msix[i].devid);
1497 if (err) {
1498 enic_free_intr(enic);
1499 break;
1500 }
1501 enic->msix[i].requested = 1;
1502 }
1503
1504 break;
1505
1506 default:
1507 break;
1508 }
1509
1510 return err;
1511}
1512
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001513static void enic_synchronize_irqs(struct enic *enic)
1514{
1515 unsigned int i;
1516
1517 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1518 case VNIC_DEV_INTR_MODE_INTX:
1519 case VNIC_DEV_INTR_MODE_MSI:
1520 synchronize_irq(enic->pdev->irq);
1521 break;
1522 case VNIC_DEV_INTR_MODE_MSIX:
1523 for (i = 0; i < enic->intr_count; i++)
1524 synchronize_irq(enic->msix_entry[i].vector);
1525 break;
1526 default:
1527 break;
1528 }
1529}
1530
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001531static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001532{
1533 int err;
1534
Scott Feldman56ac88b32009-09-03 17:02:14 +00001535 spin_lock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001536 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1537 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001538 err = vnic_dev_notify_set(enic->vdev,
1539 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001540 break;
1541 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001542 err = vnic_dev_notify_set(enic->vdev,
1543 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001544 break;
1545 default:
1546 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1547 break;
1548 }
Scott Feldman56ac88b32009-09-03 17:02:14 +00001549 spin_unlock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001550
1551 return err;
1552}
1553
1554static void enic_notify_timer_start(struct enic *enic)
1555{
1556 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1557 case VNIC_DEV_INTR_MODE_MSI:
1558 mod_timer(&enic->notify_timer, jiffies);
1559 break;
1560 default:
1561 /* Using intr for notification for INTx/MSI-X */
1562 break;
1563 };
1564}
1565
1566/* rtnl lock is held, process context */
1567static int enic_open(struct net_device *netdev)
1568{
1569 struct enic *enic = netdev_priv(netdev);
1570 unsigned int i;
1571 int err;
1572
Scott Feldman4b75a442008-09-24 11:23:53 -07001573 err = enic_request_intr(enic);
1574 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001575 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001576 return err;
1577 }
1578
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001579 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001580 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001581 netdev_err(netdev,
1582 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001583 goto err_out_free_intr;
1584 }
1585
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001586 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001587 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001588 /* Need at least one buffer on ring to get going */
1589 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001590 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001591 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001592 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001593 }
1594 }
1595
1596 for (i = 0; i < enic->wq_count; i++)
1597 vnic_wq_enable(&enic->wq[i]);
1598 for (i = 0; i < enic->rq_count; i++)
1599 vnic_rq_enable(&enic->rq[i]);
1600
Roopa Prabhu29639052010-12-08 13:54:03 +00001601 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1602 enic_dev_add_addr(enic, enic->pp.mac_addr);
1603 else
1604 enic_dev_add_station_addr(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001605 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001606
1607 netif_wake_queue(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001608
1609 for (i = 0; i < enic->rq_count; i++)
1610 napi_enable(&enic->napi[i]);
1611
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001612 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001613
1614 for (i = 0; i < enic->intr_count; i++)
1615 vnic_intr_unmask(&enic->intr[i]);
1616
1617 enic_notify_timer_start(enic);
1618
1619 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001620
1621err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001622 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001623err_out_free_intr:
1624 enic_free_intr(enic);
1625
1626 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001627}
1628
1629/* rtnl lock is held, process context */
1630static int enic_stop(struct net_device *netdev)
1631{
1632 struct enic *enic = netdev_priv(netdev);
1633 unsigned int i;
1634 int err;
1635
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001636 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001637 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001638 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1639 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001640
1641 enic_synchronize_irqs(enic);
1642
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001643 del_timer_sync(&enic->notify_timer);
1644
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001645 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001646
1647 for (i = 0; i < enic->rq_count; i++)
1648 napi_disable(&enic->napi[i]);
1649
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001650 netif_carrier_off(netdev);
1651 netif_tx_disable(netdev);
Roopa Prabhu29639052010-12-08 13:54:03 +00001652 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1653 enic_dev_del_addr(enic, enic->pp.mac_addr);
1654 else
1655 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001656
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001657 for (i = 0; i < enic->wq_count; i++) {
1658 err = vnic_wq_disable(&enic->wq[i]);
1659 if (err)
1660 return err;
1661 }
1662 for (i = 0; i < enic->rq_count; i++) {
1663 err = vnic_rq_disable(&enic->rq[i]);
1664 if (err)
1665 return err;
1666 }
1667
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001668 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001669 enic_free_intr(enic);
1670
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001671 for (i = 0; i < enic->wq_count; i++)
1672 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1673 for (i = 0; i < enic->rq_count; i++)
1674 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1675 for (i = 0; i < enic->cq_count; i++)
1676 vnic_cq_clean(&enic->cq[i]);
1677 for (i = 0; i < enic->intr_count; i++)
1678 vnic_intr_clean(&enic->intr[i]);
1679
1680 return 0;
1681}
1682
1683static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1684{
1685 struct enic *enic = netdev_priv(netdev);
1686 int running = netif_running(netdev);
1687
Scott Feldman25f0a062008-09-24 11:23:32 -07001688 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1689 return -EINVAL;
1690
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001691 if (running)
1692 enic_stop(netdev);
1693
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001694 netdev->mtu = new_mtu;
1695
1696 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001697 netdev_warn(netdev,
1698 "interface MTU (%d) set higher than port MTU (%d)\n",
1699 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001700
1701 if (running)
1702 enic_open(netdev);
1703
1704 return 0;
1705}
1706
1707#ifdef CONFIG_NET_POLL_CONTROLLER
1708static void enic_poll_controller(struct net_device *netdev)
1709{
1710 struct enic *enic = netdev_priv(netdev);
1711 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001712 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001713
1714 switch (vnic_dev_get_intr_mode(vdev)) {
1715 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001716 for (i = 0; i < enic->rq_count; i++) {
1717 intr = enic_msix_rq_intr(enic, i);
Vasanthy Kolluri79aeec52010-12-08 13:05:45 +00001718 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1719 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001720 }
1721 intr = enic_msix_wq_intr(enic, i);
1722 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001723 break;
1724 case VNIC_DEV_INTR_MODE_MSI:
1725 enic_isr_msi(enic->pdev->irq, enic);
1726 break;
1727 case VNIC_DEV_INTR_MODE_INTX:
1728 enic_isr_legacy(enic->pdev->irq, netdev);
1729 break;
1730 default:
1731 break;
1732 }
1733}
1734#endif
1735
1736static int enic_dev_wait(struct vnic_dev *vdev,
1737 int (*start)(struct vnic_dev *, int),
1738 int (*finished)(struct vnic_dev *, int *),
1739 int arg)
1740{
1741 unsigned long time;
1742 int done;
1743 int err;
1744
1745 BUG_ON(in_interrupt());
1746
1747 err = start(vdev, arg);
1748 if (err)
1749 return err;
1750
1751 /* Wait for func to complete...2 seconds max
1752 */
1753
1754 time = jiffies + (HZ * 2);
1755 do {
1756
1757 err = finished(vdev, &done);
1758 if (err)
1759 return err;
1760
1761 if (done)
1762 return 0;
1763
1764 schedule_timeout_uninterruptible(HZ / 10);
1765
1766 } while (time_after(time, jiffies));
1767
1768 return -ETIMEDOUT;
1769}
1770
1771static int enic_dev_open(struct enic *enic)
1772{
1773 int err;
1774
1775 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1776 vnic_dev_open_done, 0);
1777 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001778 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1779 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001780
1781 return err;
1782}
1783
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001784static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001785{
1786 int err;
1787
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001788 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1789 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001790 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001791 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1792 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001793
1794 return err;
1795}
1796
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001797static int enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001798{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001799 dma_addr_t rss_key_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001800 union vnic_rss_key *rss_key_buf_va = NULL;
1801 union vnic_rss_key rss_key = {
1802 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1803 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1804 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1805 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1806 };
1807 int err;
1808
1809 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1810 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1811 if (!rss_key_buf_va)
1812 return -ENOMEM;
1813
1814 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1815
1816 spin_lock(&enic->devcmd_lock);
1817 err = enic_set_rss_key(enic,
1818 rss_key_buf_pa,
1819 sizeof(union vnic_rss_key));
1820 spin_unlock(&enic->devcmd_lock);
1821
1822 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1823 rss_key_buf_va, rss_key_buf_pa);
1824
1825 return err;
1826}
1827
1828static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1829{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001830 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001831 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1832 unsigned int i;
1833 int err;
1834
1835 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1836 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1837 if (!rss_cpu_buf_va)
1838 return -ENOMEM;
1839
1840 for (i = 0; i < (1 << rss_hash_bits); i++)
1841 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1842
1843 spin_lock(&enic->devcmd_lock);
1844 err = enic_set_rss_cpu(enic,
1845 rss_cpu_buf_pa,
1846 sizeof(union vnic_rss_cpu));
1847 spin_unlock(&enic->devcmd_lock);
1848
1849 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1850 rss_cpu_buf_va, rss_cpu_buf_pa);
1851
1852 return err;
1853}
1854
1855static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1856 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1857{
Scott Feldman68f71702009-02-09 23:24:24 -08001858 const u8 tso_ipid_split_en = 0;
1859 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001860 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001861
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001862 /* Enable VLAN tag stripping.
1863 */
Scott Feldman68f71702009-02-09 23:24:24 -08001864
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001865 spin_lock(&enic->devcmd_lock);
1866 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001867 rss_default_cpu, rss_hash_type,
1868 rss_hash_bits, rss_base_cpu,
1869 rss_enable, tso_ipid_split_en,
1870 ig_vlan_strip_en);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001871 spin_unlock(&enic->devcmd_lock);
1872
1873 return err;
1874}
1875
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001876static int enic_set_rss_nic_cfg(struct enic *enic)
1877{
1878 struct device *dev = enic_get_dev(enic);
1879 const u8 rss_default_cpu = 0;
1880 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1881 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1882 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1883 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1884 const u8 rss_hash_bits = 7;
1885 const u8 rss_base_cpu = 0;
1886 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1887
1888 if (rss_enable) {
1889 if (!enic_set_rsskey(enic)) {
1890 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1891 rss_enable = 0;
1892 dev_warn(dev, "RSS disabled, "
1893 "Failed to set RSS cpu indirection table.");
1894 }
1895 } else {
1896 rss_enable = 0;
1897 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1898 }
1899 }
1900
1901 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1902 rss_hash_bits, rss_base_cpu, rss_enable);
1903}
1904
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001905static void enic_reset(struct work_struct *work)
1906{
1907 struct enic *enic = container_of(work, struct enic, reset);
1908
1909 if (!netif_running(enic->netdev))
1910 return;
1911
1912 rtnl_lock();
1913
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001914 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001915 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001916 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001917 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001918 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001919 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001920 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001921 enic_open(enic->netdev);
1922
1923 rtnl_unlock();
1924}
1925
1926static int enic_set_intr_mode(struct enic *enic)
1927{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001928 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00001929 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001930 unsigned int i;
1931
1932 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001933 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001934 *
1935 * Try MSI-X first
1936 *
1937 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1938 * (the second to last INTR is used for WQ/RQ errors)
1939 * (the last INTR is used for notifications)
1940 */
1941
1942 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1943 for (i = 0; i < n + m + 2; i++)
1944 enic->msix_entry[i].entry = i;
1945
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001946 /* Use multiple RQs if RSS is enabled
1947 */
1948
1949 if (ENIC_SETTING(enic, RSS) &&
1950 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001951 enic->rq_count >= n &&
1952 enic->wq_count >= m &&
1953 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001954 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001955
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001956 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001957
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001958 enic->rq_count = n;
1959 enic->wq_count = m;
1960 enic->cq_count = n + m;
1961 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001962
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001963 vnic_dev_set_intr_mode(enic->vdev,
1964 VNIC_DEV_INTR_MODE_MSIX);
1965
1966 return 0;
1967 }
1968 }
1969
1970 if (enic->config.intr_mode < 1 &&
1971 enic->rq_count >= 1 &&
1972 enic->wq_count >= m &&
1973 enic->cq_count >= 1 + m &&
1974 enic->intr_count >= 1 + m + 2) {
1975 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
1976
1977 enic->rq_count = 1;
1978 enic->wq_count = m;
1979 enic->cq_count = 1 + m;
1980 enic->intr_count = 1 + m + 2;
1981
1982 vnic_dev_set_intr_mode(enic->vdev,
1983 VNIC_DEV_INTR_MODE_MSIX);
1984
1985 return 0;
1986 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001987 }
1988
1989 /* Next try MSI
1990 *
1991 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1992 */
1993
1994 if (enic->config.intr_mode < 2 &&
1995 enic->rq_count >= 1 &&
1996 enic->wq_count >= 1 &&
1997 enic->cq_count >= 2 &&
1998 enic->intr_count >= 1 &&
1999 !pci_enable_msi(enic->pdev)) {
2000
2001 enic->rq_count = 1;
2002 enic->wq_count = 1;
2003 enic->cq_count = 2;
2004 enic->intr_count = 1;
2005
2006 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2007
2008 return 0;
2009 }
2010
2011 /* Next try INTx
2012 *
2013 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2014 * (the first INTR is used for WQ/RQ)
2015 * (the second INTR is used for WQ/RQ errors)
2016 * (the last INTR is used for notifications)
2017 */
2018
2019 if (enic->config.intr_mode < 3 &&
2020 enic->rq_count >= 1 &&
2021 enic->wq_count >= 1 &&
2022 enic->cq_count >= 2 &&
2023 enic->intr_count >= 3) {
2024
2025 enic->rq_count = 1;
2026 enic->wq_count = 1;
2027 enic->cq_count = 2;
2028 enic->intr_count = 3;
2029
2030 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2031
2032 return 0;
2033 }
2034
2035 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2036
2037 return -EINVAL;
2038}
2039
2040static void enic_clear_intr_mode(struct enic *enic)
2041{
2042 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2043 case VNIC_DEV_INTR_MODE_MSIX:
2044 pci_disable_msix(enic->pdev);
2045 break;
2046 case VNIC_DEV_INTR_MODE_MSI:
2047 pci_disable_msi(enic->pdev);
2048 break;
2049 default:
2050 break;
2051 }
2052
2053 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2054}
2055
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002056static const struct net_device_ops enic_netdev_dynamic_ops = {
2057 .ndo_open = enic_open,
2058 .ndo_stop = enic_stop,
2059 .ndo_start_xmit = enic_hard_start_xmit,
2060 .ndo_get_stats = enic_get_stats,
2061 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002062 .ndo_set_rx_mode = enic_set_rx_mode,
2063 .ndo_set_multicast_list = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002064 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2065 .ndo_change_mtu = enic_change_mtu,
2066 .ndo_vlan_rx_register = enic_vlan_rx_register,
2067 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2068 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2069 .ndo_tx_timeout = enic_tx_timeout,
2070 .ndo_set_vf_port = enic_set_vf_port,
2071 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00002072 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002073#ifdef CONFIG_NET_POLL_CONTROLLER
2074 .ndo_poll_controller = enic_poll_controller,
2075#endif
2076};
2077
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002078static const struct net_device_ops enic_netdev_ops = {
2079 .ndo_open = enic_open,
2080 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08002081 .ndo_start_xmit = enic_hard_start_xmit,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002082 .ndo_get_stats = enic_get_stats,
2083 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002084 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002085 .ndo_set_rx_mode = enic_set_rx_mode,
2086 .ndo_set_multicast_list = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002087 .ndo_change_mtu = enic_change_mtu,
2088 .ndo_vlan_rx_register = enic_vlan_rx_register,
2089 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2090 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2091 .ndo_tx_timeout = enic_tx_timeout,
2092#ifdef CONFIG_NET_POLL_CONTROLLER
2093 .ndo_poll_controller = enic_poll_controller,
2094#endif
2095};
2096
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002097static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002098{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002099 unsigned int i;
2100
2101 for (i = 0; i < enic->rq_count; i++)
2102 netif_napi_del(&enic->napi[i]);
2103
Scott Feldman6fdfa972009-09-03 17:02:45 +00002104 enic_free_vnic_resources(enic);
2105 enic_clear_intr_mode(enic);
2106}
2107
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002108static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002109{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002110 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002111 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002112 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00002113 int err;
2114
2115 /* Get vNIC configuration
2116 */
2117
2118 err = enic_get_vnic_config(enic);
2119 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002120 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002121 return err;
2122 }
2123
2124 /* Get available resource counts
2125 */
2126
2127 enic_get_res_counts(enic);
2128
2129 /* Set interrupt mode based on resource counts and system
2130 * capabilities
2131 */
2132
2133 err = enic_set_intr_mode(enic);
2134 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002135 dev_err(dev, "Failed to set intr mode based on resource "
2136 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002137 return err;
2138 }
2139
2140 /* Allocate and configure vNIC resources
2141 */
2142
2143 err = enic_alloc_vnic_resources(enic);
2144 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002145 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002146 goto err_out_free_vnic_resources;
2147 }
2148
2149 enic_init_vnic_resources(enic);
2150
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002151 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002152 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002153 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002154 goto err_out_free_vnic_resources;
2155 }
2156
2157 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2158 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002159 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002160 break;
2161 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002162 for (i = 0; i < enic->rq_count; i++)
2163 netif_napi_add(netdev, &enic->napi[i],
2164 enic_poll_msix, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002165 break;
2166 }
2167
2168 return 0;
2169
2170err_out_free_vnic_resources:
2171 enic_clear_intr_mode(enic);
2172 enic_free_vnic_resources(enic);
2173
2174 return err;
2175}
2176
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002177static void enic_iounmap(struct enic *enic)
2178{
2179 unsigned int i;
2180
2181 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2182 if (enic->bar[i].vaddr)
2183 iounmap(enic->bar[i].vaddr);
2184}
2185
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002186static int __devinit enic_probe(struct pci_dev *pdev,
2187 const struct pci_device_id *ent)
2188{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002189 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002190 struct net_device *netdev;
2191 struct enic *enic;
2192 int using_dac = 0;
2193 unsigned int i;
2194 int err;
2195
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002196 /* Allocate net device structure and initialize. Private
2197 * instance data is initialized to zero.
2198 */
2199
2200 netdev = alloc_etherdev(sizeof(struct enic));
2201 if (!netdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002202 pr_err("Etherdev alloc failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002203 return -ENOMEM;
2204 }
2205
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002206 pci_set_drvdata(pdev, netdev);
2207
2208 SET_NETDEV_DEV(netdev, &pdev->dev);
2209
2210 enic = netdev_priv(netdev);
2211 enic->netdev = netdev;
2212 enic->pdev = pdev;
2213
2214 /* Setup PCI resources
2215 */
2216
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002217 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002218 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002219 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002220 goto err_out_free_netdev;
2221 }
2222
2223 err = pci_request_regions(pdev, DRV_NAME);
2224 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002225 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002226 goto err_out_disable_device;
2227 }
2228
2229 pci_set_master(pdev);
2230
2231 /* Query PCI controller on system for DMA addressing
2232 * limitation for the device. Try 40-bit first, and
2233 * fail to 32-bit.
2234 */
2235
Yang Hongyang50cf1562009-04-06 19:01:14 -07002236 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002237 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002238 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002239 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002240 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002241 goto err_out_release_regions;
2242 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002243 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002244 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002245 dev_err(dev, "Unable to obtain %u-bit DMA "
2246 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002247 goto err_out_release_regions;
2248 }
2249 } else {
Yang Hongyang50cf1562009-04-06 19:01:14 -07002250 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002251 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002252 dev_err(dev, "Unable to obtain %u-bit DMA "
2253 "for consistent allocations, aborting\n", 40);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002254 goto err_out_release_regions;
2255 }
2256 using_dac = 1;
2257 }
2258
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002259 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002260 */
2261
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002262 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2263 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2264 continue;
2265 enic->bar[i].len = pci_resource_len(pdev, i);
2266 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2267 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002268 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002269 err = -ENODEV;
2270 goto err_out_iounmap;
2271 }
2272 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002273 }
2274
2275 /* Register vNIC device
2276 */
2277
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002278 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2279 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002280 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002281 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002282 err = -ENODEV;
2283 goto err_out_iounmap;
2284 }
2285
2286 /* Issue device open to get device in known state
2287 */
2288
2289 err = enic_dev_open(enic);
2290 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002291 dev_err(dev, "vNIC dev open failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002292 goto err_out_vnic_unregister;
2293 }
2294
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002295 /* Setup devcmd lock
2296 */
2297
2298 spin_lock_init(&enic->devcmd_lock);
2299
2300 /*
2301 * Set ingress vlan rewrite mode before vnic initialization
2302 */
2303
2304 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2305 if (err) {
2306 dev_err(dev,
2307 "Failed to set ingress vlan rewrite mode, aborting.\n");
2308 goto err_out_dev_close;
2309 }
2310
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002311 /* Issue device init to initialize the vnic-to-switch link.
2312 * We'll start with carrier off and wait for link UP
2313 * notification later to turn on carrier. We don't need
2314 * to wait here for the vnic-to-switch link initialization
2315 * to complete; link UP notification is the indication that
2316 * the process is complete.
2317 */
2318
2319 netif_carrier_off(netdev);
2320
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002321 /* Do not call dev_init for a dynamic vnic.
2322 * For a dynamic vnic, init_prov_info will be
2323 * called later by an upper layer.
2324 */
2325
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002326 if (!enic_is_dynamic(enic)) {
2327 err = vnic_dev_init(enic->vdev, 0);
2328 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002329 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002330 goto err_out_dev_close;
2331 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002332 }
2333
Scott Feldman6fdfa972009-09-03 17:02:45 +00002334 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002335 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002336 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002337 goto err_out_dev_close;
2338 }
2339
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002340 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002341 */
2342
2343 init_timer(&enic->notify_timer);
2344 enic->notify_timer.function = enic_notify_timer;
2345 enic->notify_timer.data = (unsigned long)enic;
2346
2347 INIT_WORK(&enic->reset, enic_reset);
2348
2349 for (i = 0; i < enic->wq_count; i++)
2350 spin_lock_init(&enic->wq_lock[i]);
2351
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002352 /* Register net device
2353 */
2354
2355 enic->port_mtu = enic->config.mtu;
2356 (void)enic_change_mtu(netdev, enic->port_mtu);
2357
2358 err = enic_set_mac_addr(netdev, enic->mac_addr);
2359 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002360 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002361 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002362 }
2363
Scott Feldman7c844592009-12-23 13:27:54 +00002364 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2365 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2366
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002367 if (enic_is_dynamic(enic))
2368 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2369 else
2370 netdev->netdev_ops = &enic_netdev_ops;
2371
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002372 netdev->watchdog_timeo = 2 * HZ;
2373 netdev->ethtool_ops = &enic_ethtool_ops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002374
Vasanthy Kolluri73c1ea92010-03-18 16:19:54 +00002375 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002376 if (ENIC_SETTING(enic, LOOP)) {
2377 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2378 enic->loop_enable = 1;
2379 enic->loop_tag = enic->config.loop_tag;
2380 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2381 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002382 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002383 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002384 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002385 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002386 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002387 if (ENIC_SETTING(enic, RXCSUM))
2388 netdev->hw_features |= NETIF_F_RXCSUM;
2389
2390 netdev->features |= netdev->hw_features;
2391
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002392 if (using_dac)
2393 netdev->features |= NETIF_F_HIGHDMA;
2394
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002395 err = register_netdev(netdev);
2396 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002397 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002398 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002399 }
2400
2401 return 0;
2402
Scott Feldman6fdfa972009-09-03 17:02:45 +00002403err_out_dev_deinit:
2404 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002405err_out_dev_close:
2406 vnic_dev_close(enic->vdev);
2407err_out_vnic_unregister:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002408 vnic_dev_unregister(enic->vdev);
2409err_out_iounmap:
2410 enic_iounmap(enic);
2411err_out_release_regions:
2412 pci_release_regions(pdev);
2413err_out_disable_device:
2414 pci_disable_device(pdev);
2415err_out_free_netdev:
2416 pci_set_drvdata(pdev, NULL);
2417 free_netdev(netdev);
2418
2419 return err;
2420}
2421
2422static void __devexit enic_remove(struct pci_dev *pdev)
2423{
2424 struct net_device *netdev = pci_get_drvdata(pdev);
2425
2426 if (netdev) {
2427 struct enic *enic = netdev_priv(netdev);
2428
Tejun Heo23f333a2010-12-12 16:45:14 +01002429 cancel_work_sync(&enic->reset);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002430 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002431 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002432 vnic_dev_close(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002433 vnic_dev_unregister(enic->vdev);
2434 enic_iounmap(enic);
2435 pci_release_regions(pdev);
2436 pci_disable_device(pdev);
2437 pci_set_drvdata(pdev, NULL);
2438 free_netdev(netdev);
2439 }
2440}
2441
2442static struct pci_driver enic_driver = {
2443 .name = DRV_NAME,
2444 .id_table = enic_id_table,
2445 .probe = enic_probe,
2446 .remove = __devexit_p(enic_remove),
2447};
2448
2449static int __init enic_init_module(void)
2450{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002451 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002452
2453 return pci_register_driver(&enic_driver);
2454}
2455
2456static void __exit enic_cleanup_module(void)
2457{
2458 pci_unregister_driver(&enic_driver);
2459}
2460
2461module_init(enic_init_module);
2462module_exit(enic_cleanup_module);