blob: d4918eef5050063e48089fce70c2d078ceef5a1a [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053041#include <linux/ktime.h>
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +053042#ifdef CONFIG_RFS_ACCEL
43#include <linux/cpu_rmap.h>
44#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +053045#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h>
47#endif
Scott Feldman01f2e4e2008-09-15 09:17:11 -070048
49#include "cq_enet_desc.h"
50#include "vnic_dev.h"
51#include "vnic_intr.h"
52#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070053#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070054#include "enic_res.h"
55#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000056#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000057#include "enic_pp.h"
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +053058#include "enic_clsf.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070059
60#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000061#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
62#define MAX_TSO (1 << 16)
63#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
64
65#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070066#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000067#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070068
69/* Supported devices */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000070static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000071 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070072 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000073 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070074 { 0, } /* end of table */
75};
76
77MODULE_DESCRIPTION(DRV_DESCRIPTION);
78MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_VERSION);
81MODULE_DEVICE_TABLE(pci, enic_id_table);
82
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053083#define ENIC_LARGE_PKT_THRESHOLD 1000
84#define ENIC_MAX_COALESCE_TIMERS 10
85/* Interrupt moderation table, which will be used to decide the
86 * coalescing timer values
87 * {rx_rate in Mbps, mapping percentage of the range}
88 */
89struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
90 {4000, 0},
91 {4400, 10},
92 {5060, 20},
93 {5230, 30},
94 {5540, 40},
95 {5820, 50},
96 {6120, 60},
97 {6435, 70},
98 {6745, 80},
99 {7000, 90},
100 {0xFFFFFFFF, 100}
101};
102
103/* This table helps the driver to pick different ranges for rx coalescing
104 * timer depending on the link speed.
105 */
106struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
107 {0, 0}, /* 0 - 4 Gbps */
108 {0, 3}, /* 4 - 10 Gbps */
109 {3, 6}, /* 10 - 40 Gbps */
110};
111
Roopa Prabhu3f192792011-09-22 03:44:43 +0000112int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700113{
114 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
115}
116
Roopa Prabhu8749b422011-09-22 03:44:33 +0000117int enic_sriov_enabled(struct enic *enic)
118{
119 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
120}
121
Roopa Prabhu3a4adef2012-01-18 04:23:55 +0000122static int enic_is_sriov_vf(struct enic *enic)
123{
124 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
125}
126
Roopa Prabhu889d13f2011-09-22 03:44:38 +0000127int enic_is_valid_vf(struct enic *enic, int vf)
128{
129#ifdef CONFIG_PCI_IOV
130 return vf >= 0 && vf < enic->num_vfs;
131#else
132 return 0;
133#endif
134}
135
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700136static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
137{
138 struct enic *enic = vnic_dev_priv(wq->vdev);
139
140 if (buf->sop)
141 pci_unmap_single(enic->pdev, buf->dma_addr,
142 buf->len, PCI_DMA_TODEVICE);
143 else
144 pci_unmap_page(enic->pdev, buf->dma_addr,
145 buf->len, PCI_DMA_TODEVICE);
146
147 if (buf->os_buf)
148 dev_kfree_skb_any(buf->os_buf);
149}
150
151static void enic_wq_free_buf(struct vnic_wq *wq,
152 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
153{
154 enic_free_wq_buf(wq, buf);
155}
156
157static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
158 u8 type, u16 q_number, u16 completed_index, void *opaque)
159{
160 struct enic *enic = vnic_dev_priv(vdev);
161
162 spin_lock(&enic->wq_lock[q_number]);
163
164 vnic_wq_service(&enic->wq[q_number], cq_desc,
165 completed_index, enic_wq_free_buf,
166 opaque);
167
govindarajulu.v822473b2013-09-04 11:17:14 +0530168 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000169 vnic_wq_desc_avail(&enic->wq[q_number]) >=
170 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
govindarajulu.v822473b2013-09-04 11:17:14 +0530171 netif_wake_subqueue(enic->netdev, q_number);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700172
173 spin_unlock(&enic->wq_lock[q_number]);
174
175 return 0;
176}
177
178static void enic_log_q_error(struct enic *enic)
179{
180 unsigned int i;
181 u32 error_status;
182
183 for (i = 0; i < enic->wq_count; i++) {
184 error_status = vnic_wq_error_status(&enic->wq[i]);
185 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000186 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
187 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700188 }
189
190 for (i = 0; i < enic->rq_count; i++) {
191 error_status = vnic_rq_error_status(&enic->rq[i]);
192 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000193 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
194 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700195 }
196}
197
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000198static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700199{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000200 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700201
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000202 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000203 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
204 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000205 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700206 }
207}
208
209static void enic_mtu_check(struct enic *enic)
210{
211 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000212 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700213
Scott Feldman491598a2009-09-03 17:02:40 +0000214 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000215 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000216 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000217 mtu = max_t(int, ENIC_MIN_MTU,
218 min_t(int, ENIC_MAX_MTU, mtu));
219 if (mtu != netdev->mtu)
220 schedule_work(&enic->change_mtu_work);
221 } else {
222 if (mtu < netdev->mtu)
223 netdev_warn(netdev,
224 "interface MTU (%d) set higher "
225 "than switch port MTU (%d)\n",
226 netdev->mtu, mtu);
227 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700228 }
229}
230
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000231static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700232{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000233 int link_status = vnic_dev_link_status(enic->vdev);
234 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700235
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000236 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000237 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000238 netif_carrier_on(enic->netdev);
239 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000240 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000241 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700242 }
243}
244
245static void enic_notify_check(struct enic *enic)
246{
247 enic_msglvl_check(enic);
248 enic_mtu_check(enic);
249 enic_link_check(enic);
250}
251
252#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
253
254static irqreturn_t enic_isr_legacy(int irq, void *data)
255{
256 struct net_device *netdev = data;
257 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000258 unsigned int io_intr = enic_legacy_io_intr();
259 unsigned int err_intr = enic_legacy_err_intr();
260 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700261 u32 pba;
262
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000263 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700264
265 pba = vnic_intr_legacy_pba(enic->legacy_pba);
266 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000267 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700268 return IRQ_NONE; /* not our interrupt */
269 }
270
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000271 if (ENIC_TEST_INTR(pba, notify_intr)) {
272 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700273 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800274 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700275
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000276 if (ENIC_TEST_INTR(pba, err_intr)) {
277 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700278 enic_log_q_error(enic);
279 /* schedule recovery from WQ/RQ error */
280 schedule_work(&enic->reset);
281 return IRQ_HANDLED;
282 }
283
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000284 if (ENIC_TEST_INTR(pba, io_intr)) {
285 if (napi_schedule_prep(&enic->napi[0]))
286 __napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700287 } else {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000288 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700289 }
290
291 return IRQ_HANDLED;
292}
293
294static irqreturn_t enic_isr_msi(int irq, void *data)
295{
296 struct enic *enic = data;
297
298 /* With MSI, there is no sharing of interrupts, so this is
299 * our interrupt and there is no need to ack it. The device
300 * is not providing per-vector masking, so the OS will not
301 * write to PCI config space to mask/unmask the interrupt.
302 * We're using mask_on_assertion for MSI, so the device
303 * automatically masks the interrupt when the interrupt is
304 * generated. Later, when exiting polling, the interrupt
305 * will be unmasked (see enic_poll).
306 *
307 * Also, the device uses the same PCIe Traffic Class (TC)
308 * for Memory Write data and MSI, so there are no ordering
309 * issues; the MSI will always arrive at the Root Complex
310 * _after_ corresponding Memory Writes (i.e. descriptor
311 * writes).
312 */
313
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000314 napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700315
316 return IRQ_HANDLED;
317}
318
319static irqreturn_t enic_isr_msix_rq(int irq, void *data)
320{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000321 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700322
323 /* schedule NAPI polling for RQ cleanup */
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000324 napi_schedule(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700325
326 return IRQ_HANDLED;
327}
328
329static irqreturn_t enic_isr_msix_wq(int irq, void *data)
330{
331 struct enic *enic = data;
govindarajulu.v822473b2013-09-04 11:17:14 +0530332 unsigned int cq;
333 unsigned int intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700334 unsigned int wq_work_to_do = -1; /* no limit */
335 unsigned int wq_work_done;
govindarajulu.v822473b2013-09-04 11:17:14 +0530336 unsigned int wq_irq;
337
338 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
339 cq = enic_cq_wq(enic, wq_irq);
340 intr = enic_msix_wq_intr(enic, wq_irq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700341
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000342 wq_work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700343 wq_work_to_do, enic_wq_service, NULL);
344
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000345 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700346 wq_work_done,
347 1 /* unmask intr */,
348 1 /* reset intr timer */);
349
350 return IRQ_HANDLED;
351}
352
353static irqreturn_t enic_isr_msix_err(int irq, void *data)
354{
355 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000356 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700357
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000358 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800359
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700360 enic_log_q_error(enic);
361
362 /* schedule recovery from WQ/RQ error */
363 schedule_work(&enic->reset);
364
365 return IRQ_HANDLED;
366}
367
368static irqreturn_t enic_isr_msix_notify(int irq, void *data)
369{
370 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000371 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700372
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000373 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700374 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700375
376 return IRQ_HANDLED;
377}
378
379static inline void enic_queue_wq_skb_cont(struct enic *enic,
380 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000381 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700382{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000383 const skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700384
385 /* Queue additional data fragments */
386 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000387 len_left -= skb_frag_size(frag);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700388 enic_queue_wq_desc_cont(wq, skb,
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000389 skb_frag_dma_map(&enic->pdev->dev,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000390 frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100391 DMA_TO_DEVICE),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000392 skb_frag_size(frag),
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000393 (len_left == 0), /* EOP? */
394 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700395 }
396}
397
398static inline void enic_queue_wq_skb_vlan(struct enic *enic,
399 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000400 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700401{
402 unsigned int head_len = skb_headlen(skb);
403 unsigned int len_left = skb->len - head_len;
404 int eop = (len_left == 0);
405
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000406 /* Queue the main skb fragment. The fragments are no larger
407 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
408 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
409 * per fragment is queued.
410 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700411 enic_queue_wq_desc(wq, skb,
412 pci_map_single(enic->pdev, skb->data,
413 head_len, PCI_DMA_TODEVICE),
414 head_len,
415 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000416 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700417
418 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000419 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700420}
421
422static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
423 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000424 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700425{
426 unsigned int head_len = skb_headlen(skb);
427 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000428 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700429 unsigned int csum_offset = hdr_len + skb->csum_offset;
430 int eop = (len_left == 0);
431
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000432 /* Queue the main skb fragment. The fragments are no larger
433 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
434 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
435 * per fragment is queued.
436 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700437 enic_queue_wq_desc_csum_l4(wq, skb,
438 pci_map_single(enic->pdev, skb->data,
439 head_len, PCI_DMA_TODEVICE),
440 head_len,
441 csum_offset,
442 hdr_len,
443 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000444 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700445
446 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000447 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700448}
449
450static inline void enic_queue_wq_skb_tso(struct enic *enic,
451 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000452 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700453{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000454 unsigned int frag_len_left = skb_headlen(skb);
455 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700456 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
457 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000458 unsigned int len;
459 dma_addr_t dma_addr;
460 unsigned int offset = 0;
461 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700462
463 /* Preload TCP csum field with IP pseudo hdr calculated
464 * with IP length set to zero. HW will later add in length
465 * to each TCP segment resulting from the TSO.
466 */
467
Harvey Harrison09640e62009-02-01 00:45:17 -0800468 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700469 ip_hdr(skb)->check = 0;
470 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
471 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800472 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700473 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
474 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
475 }
476
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000477 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
478 * for the main skb fragment
479 */
480 while (frag_len_left) {
481 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
482 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
483 len, PCI_DMA_TODEVICE);
484 enic_queue_wq_desc_tso(wq, skb,
485 dma_addr,
486 len,
487 mss, hdr_len,
488 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000489 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000490 frag_len_left -= len;
491 offset += len;
492 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700493
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000494 if (eop)
495 return;
496
497 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
498 * for additional data fragments
499 */
500 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000501 len_left -= skb_frag_size(frag);
502 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000503 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000504
505 while (frag_len_left) {
506 len = min(frag_len_left,
507 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000508 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
509 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100510 DMA_TO_DEVICE);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000511 enic_queue_wq_desc_cont(wq, skb,
512 dma_addr,
513 len,
514 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000515 (len == frag_len_left), /* EOP? */
516 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000517 frag_len_left -= len;
518 offset += len;
519 }
520 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700521}
522
523static inline void enic_queue_wq_skb(struct enic *enic,
524 struct vnic_wq *wq, struct sk_buff *skb)
525{
526 unsigned int mss = skb_shinfo(skb)->gso_size;
527 unsigned int vlan_tag = 0;
528 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000529 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700530
Jesse Grosseab6d182010-10-20 13:56:03 +0000531 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700532 /* VLAN tag from trunking driver */
533 vlan_tag_insert = 1;
534 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000535 } else if (enic->loop_enable) {
536 vlan_tag = enic->loop_tag;
537 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700538 }
539
540 if (mss)
541 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000542 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700543 else if (skb->ip_summed == CHECKSUM_PARTIAL)
544 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000545 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700546 else
547 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000548 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700549}
550
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800551/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000552static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000553 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700554{
555 struct enic *enic = netdev_priv(netdev);
govindarajulu.v822473b2013-09-04 11:17:14 +0530556 struct vnic_wq *wq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700557 unsigned long flags;
govindarajulu.v822473b2013-09-04 11:17:14 +0530558 unsigned int txq_map;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700559
560 if (skb->len <= 0) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700561 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700562 return NETDEV_TX_OK;
563 }
564
govindarajulu.v822473b2013-09-04 11:17:14 +0530565 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
566 wq = &enic->wq[txq_map];
567
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700568 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
569 * which is very likely. In the off chance it's going to take
570 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
571 */
572
573 if (skb_shinfo(skb)->gso_size == 0 &&
574 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
575 skb_linearize(skb)) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700576 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700577 return NETDEV_TX_OK;
578 }
579
govindarajulu.v822473b2013-09-04 11:17:14 +0530580 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700581
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000582 if (vnic_wq_desc_avail(wq) <
583 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
govindarajulu.v822473b2013-09-04 11:17:14 +0530584 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700585 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000586 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
govindarajulu.v822473b2013-09-04 11:17:14 +0530587 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700588 return NETDEV_TX_BUSY;
589 }
590
591 enic_queue_wq_skb(enic, wq, skb);
592
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000593 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
govindarajulu.v822473b2013-09-04 11:17:14 +0530594 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700595
govindarajulu.v822473b2013-09-04 11:17:14 +0530596 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700597
598 return NETDEV_TX_OK;
599}
600
601/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000602static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
603 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700604{
605 struct enic *enic = netdev_priv(netdev);
606 struct vnic_stats *stats;
607
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000608 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700609
Scott Feldman25f0a062008-09-24 11:23:32 -0700610 net_stats->tx_packets = stats->tx.tx_frames_ok;
611 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
612 net_stats->tx_errors = stats->tx.tx_errors;
613 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700614
Scott Feldman25f0a062008-09-24 11:23:32 -0700615 net_stats->rx_packets = stats->rx.rx_frames_ok;
616 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
617 net_stats->rx_errors = stats->rx.rx_errors;
618 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000619 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800620 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000621 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700622
Scott Feldman25f0a062008-09-24 11:23:32 -0700623 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700624}
625
Alexander Duyckf0096182014-05-28 18:44:52 -0700626static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
627{
628 struct enic *enic = netdev_priv(netdev);
629
630 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
631 unsigned int mc_count = netdev_mc_count(netdev);
632
633 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
634 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
635
636 return -ENOSPC;
637 }
638
639 enic_dev_add_addr(enic, mc_addr);
640 enic->mc_count++;
641
642 return 0;
643}
644
645static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
646{
647 struct enic *enic = netdev_priv(netdev);
648
649 enic_dev_del_addr(enic, mc_addr);
650 enic->mc_count--;
651
652 return 0;
653}
654
655static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
656{
657 struct enic *enic = netdev_priv(netdev);
658
659 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
660 unsigned int uc_count = netdev_uc_count(netdev);
661
662 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
663 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
664
665 return -ENOSPC;
666 }
667
668 enic_dev_add_addr(enic, uc_addr);
669 enic->uc_count++;
670
671 return 0;
672}
673
674static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
675{
676 struct enic *enic = netdev_priv(netdev);
677
678 enic_dev_del_addr(enic, uc_addr);
679 enic->uc_count--;
680
681 return 0;
682}
683
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000684void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700685{
Alexander Duyckf0096182014-05-28 18:44:52 -0700686 struct net_device *netdev = enic->netdev;
687
688 __dev_uc_unsync(netdev, NULL);
689 __dev_mc_unsync(netdev, NULL);
690
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700691 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000692 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000693 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700694}
695
696static int enic_set_mac_addr(struct net_device *netdev, char *addr)
697{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700698 struct enic *enic = netdev_priv(netdev);
699
Roopa Prabhu73359032012-01-18 04:24:02 +0000700 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700701 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
702 return -EADDRNOTAVAIL;
703 } else {
704 if (!is_valid_ether_addr(addr))
705 return -EADDRNOTAVAIL;
706 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700707
708 memcpy(netdev->dev_addr, addr, netdev->addr_len);
709
710 return 0;
711}
712
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700713static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
714{
715 struct enic *enic = netdev_priv(netdev);
716 struct sockaddr *saddr = p;
717 char *addr = saddr->sa_data;
718 int err;
719
720 if (netif_running(enic->netdev)) {
721 err = enic_dev_del_station_addr(enic);
722 if (err)
723 return err;
724 }
725
726 err = enic_set_mac_addr(netdev, addr);
727 if (err)
728 return err;
729
730 if (netif_running(enic->netdev)) {
731 err = enic_dev_add_station_addr(enic);
732 if (err)
733 return err;
734 }
735
736 return err;
737}
738
739static int enic_set_mac_address(struct net_device *netdev, void *p)
740{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000741 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000742 char *addr = saddr->sa_data;
743 struct enic *enic = netdev_priv(netdev);
744 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000745
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000746 err = enic_dev_del_station_addr(enic);
747 if (err)
748 return err;
749
750 err = enic_set_mac_addr(netdev, addr);
751 if (err)
752 return err;
753
754 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700755}
756
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000757/* netif_tx_lock held, BHs disabled */
758static void enic_set_rx_mode(struct net_device *netdev)
759{
760 struct enic *enic = netdev_priv(netdev);
761 int directed = 1;
762 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
763 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
764 int promisc = (netdev->flags & IFF_PROMISC) ||
765 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
766 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
767 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
768 unsigned int flags = netdev->flags |
769 (allmulti ? IFF_ALLMULTI : 0) |
770 (promisc ? IFF_PROMISC : 0);
771
772 if (enic->flags != flags) {
773 enic->flags = flags;
774 enic_dev_packet_filter(enic, directed,
775 multicast, broadcast, promisc, allmulti);
776 }
777
778 if (!promisc) {
Alexander Duyckf0096182014-05-28 18:44:52 -0700779 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000780 if (!allmulti)
Alexander Duyckf0096182014-05-28 18:44:52 -0700781 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000782 }
783}
784
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700785/* netif_tx_lock held, BHs disabled */
786static void enic_tx_timeout(struct net_device *netdev)
787{
788 struct enic *enic = netdev_priv(netdev);
789 schedule_work(&enic->reset);
790}
791
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000792static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
793{
794 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000795 struct enic_port_profile *pp;
796 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000797
Roopa Prabhu3f192792011-09-22 03:44:43 +0000798 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
799 if (err)
800 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000801
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000802 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000803 if (vf == PORT_SELF_VF) {
804 memcpy(pp->vf_mac, mac, ETH_ALEN);
805 return 0;
806 } else {
807 /*
808 * For sriov vf's set the mac in hw
809 */
810 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
811 vnic_dev_set_mac_addr, mac);
812 return enic_dev_status_to_errno(err);
813 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000814 } else
815 return -EINVAL;
816}
817
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700818static int enic_set_vf_port(struct net_device *netdev, int vf,
819 struct nlattr *port[])
820{
821 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000822 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000823 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000824 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700825
Roopa Prabhu3f192792011-09-22 03:44:43 +0000826 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
827 if (err)
828 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700829
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000830 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000831 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700832
Roopa Prabhu3f192792011-09-22 03:44:43 +0000833 memcpy(&prev_pp, pp, sizeof(*enic->pp));
834 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000835
Roopa Prabhu3f192792011-09-22 03:44:43 +0000836 pp->set |= ENIC_SET_REQUEST;
837 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000838
839 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000840 pp->set |= ENIC_SET_NAME;
841 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000842 PORT_PROFILE_MAX);
843 }
844
845 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000846 pp->set |= ENIC_SET_INSTANCE;
847 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000848 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
849 }
850
851 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000852 pp->set |= ENIC_SET_HOST;
853 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000854 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
855 }
856
Roopa Prabhub4765832012-02-20 00:11:58 +0000857 if (vf == PORT_SELF_VF) {
858 /* Special case handling: mac came from IFLA_VF_MAC */
859 if (!is_zero_ether_addr(prev_pp.vf_mac))
860 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000861
Roopa Prabhub4765832012-02-20 00:11:58 +0000862 if (is_zero_ether_addr(netdev->dev_addr))
863 eth_hw_addr_random(netdev);
864 } else {
865 /* SR-IOV VF: get mac from adapter */
866 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
867 vnic_dev_get_mac_addr, pp->mac_addr);
868 if (err) {
869 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
870 memcpy(pp, &prev_pp, sizeof(*pp));
871 return enic_dev_status_to_errno(err);
872 }
873 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000874
Roopa Prabhu3f192792011-09-22 03:44:43 +0000875 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000876 if (err) {
877 if (restore_pp) {
878 /* Things are still the way they were: Implicit
879 * DISASSOCIATE failed
880 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000881 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000882 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000883 memset(pp, 0, sizeof(*pp));
884 if (vf == PORT_SELF_VF)
885 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000886 }
887 } else {
888 /* Set flag to indicate that the port assoc/disassoc
889 * request has been sent out to fw
890 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000891 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000892
893 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000894 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
895 memset(pp->mac_addr, 0, ETH_ALEN);
896 if (vf == PORT_SELF_VF)
897 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000898 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700899 }
900
Roopa Prabhub4765832012-02-20 00:11:58 +0000901 if (vf == PORT_SELF_VF)
902 memset(pp->vf_mac, 0, ETH_ALEN);
Roopa Prabhu296390592010-12-08 13:54:03 +0000903
Roopa Prabhu296390592010-12-08 13:54:03 +0000904 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700905}
906
907static int enic_get_vf_port(struct net_device *netdev, int vf,
908 struct sk_buff *skb)
909{
910 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700911 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000912 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000913 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700914
Roopa Prabhu3f192792011-09-22 03:44:43 +0000915 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700916 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000917 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700918
Roopa Prabhu3f192792011-09-22 03:44:43 +0000919 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
920 return -ENODATA;
921
922 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
923 if (err)
924 return err;
925
David S. Miller1a106de2012-04-01 20:22:22 -0400926 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
927 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
928 ((pp->set & ENIC_SET_NAME) &&
929 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
930 ((pp->set & ENIC_SET_INSTANCE) &&
931 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
932 pp->instance_uuid)) ||
933 ((pp->set & ENIC_SET_HOST) &&
934 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
935 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700936 return 0;
937
938nla_put_failure:
939 return -EMSGSIZE;
940}
941
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700942static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
943{
944 struct enic *enic = vnic_dev_priv(rq->vdev);
945
946 if (!buf->os_buf)
947 return;
948
949 pci_unmap_single(enic->pdev, buf->dma_addr,
950 buf->len, PCI_DMA_FROMDEVICE);
951 dev_kfree_skb_any(buf->os_buf);
952}
953
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700954static int enic_rq_alloc_buf(struct vnic_rq *rq)
955{
956 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000957 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700958 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000959 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700960 unsigned int os_buf_index = 0;
961 dma_addr_t dma_addr;
962
Eric Dumazet89d71a62009-10-13 05:34:20 +0000963 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700964 if (!skb)
965 return -ENOMEM;
966
967 dma_addr = pci_map_single(enic->pdev, skb->data,
968 len, PCI_DMA_FROMDEVICE);
969
970 enic_queue_rq_desc(rq, skb, os_buf_index,
971 dma_addr, len);
972
973 return 0;
974}
975
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530976static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
977 u32 pkt_len)
978{
979 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
980 pkt_size->large_pkt_bytes_cnt += pkt_len;
981 else
982 pkt_size->small_pkt_bytes_cnt += pkt_len;
983}
984
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700985static void enic_rq_indicate_buf(struct vnic_rq *rq,
986 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
987 int skipped, void *opaque)
988{
989 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -0800990 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700991 struct sk_buff *skb;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530992 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700993
994 u8 type, color, eop, sop, ingress_port, vlan_stripped;
995 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
996 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
997 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
998 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +0000999 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001000 u32 rss_hash;
1001
1002 if (skipped)
1003 return;
1004
1005 skb = buf->os_buf;
1006 prefetch(skb->data - NET_IP_ALIGN);
1007 pci_unmap_single(enic->pdev, buf->dma_addr,
1008 buf->len, PCI_DMA_FROMDEVICE);
1009
1010 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1011 &type, &color, &q_number, &completed_index,
1012 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1013 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001014 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001015 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1016 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1017 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1018 &fcs_ok);
1019
1020 if (packet_error) {
1021
Scott Feldman350991e2009-09-03 17:02:19 +00001022 if (!fcs_ok) {
1023 if (bytes_written > 0)
1024 enic->rq_bad_fcs++;
1025 else if (bytes_written == 0)
1026 enic->rq_truncated_pkts++;
1027 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001028
1029 dev_kfree_skb_any(skb);
1030
1031 return;
1032 }
1033
1034 if (eop && bytes_written > 0) {
1035
1036 /* Good receive
1037 */
1038
1039 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001040 skb->protocol = eth_type_trans(skb, netdev);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301041 skb_record_rx_queue(skb, q_number);
1042 if (netdev->features & NETIF_F_RXHASH) {
Tom Herbert3739acd2013-12-17 23:23:42 -08001043 skb_set_hash(skb, rss_hash,
1044 (rss_type &
1045 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1046 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1047 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1048 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301049 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001050
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001051 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001052 skb->csum = htons(checksum);
1053 skb->ip_summed = CHECKSUM_COMPLETE;
1054 }
1055
Jiri Pirko6ede7462011-07-20 04:54:18 +00001056 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001057 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001058
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301059 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1060 if (enic_poll_busy_polling(rq) ||
1061 !(netdev->features & NETIF_F_GRO))
Jiri Pirko6ede7462011-07-20 04:54:18 +00001062 netif_receive_skb(skb);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301063 else
1064 napi_gro_receive(&enic->napi[q_number], skb);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301065 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1066 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1067 bytes_written);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001068 } else {
1069
1070 /* Buffer overflow
1071 */
1072
1073 dev_kfree_skb_any(skb);
1074 }
1075}
1076
1077static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1078 u8 type, u16 q_number, u16 completed_index, void *opaque)
1079{
1080 struct enic *enic = vnic_dev_priv(vdev);
1081
1082 vnic_rq_service(&enic->rq[q_number], cq_desc,
1083 completed_index, VNIC_RQ_RETURN_DESC,
1084 enic_rq_indicate_buf, opaque);
1085
1086 return 0;
1087}
1088
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001089static int enic_poll(struct napi_struct *napi, int budget)
1090{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001091 struct net_device *netdev = napi->dev;
1092 struct enic *enic = netdev_priv(netdev);
1093 unsigned int cq_rq = enic_cq_rq(enic, 0);
1094 unsigned int cq_wq = enic_cq_wq(enic, 0);
1095 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001096 unsigned int rq_work_to_do = budget;
1097 unsigned int wq_work_to_do = -1; /* no limit */
Eric W. Biederman4c502542014-03-14 18:02:08 -07001098 unsigned int work_done, rq_work_done = 0, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001099 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001100
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301101 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1102 enic_wq_service, NULL);
1103
1104 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1105 if (wq_work_done > 0)
1106 vnic_intr_return_credits(&enic->intr[intr],
1107 wq_work_done,
1108 0 /* dont unmask intr */,
1109 0 /* dont reset intr timer */);
1110 return rq_work_done;
1111 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001112
Eric W. Biederman4c502542014-03-14 18:02:08 -07001113 if (budget > 0)
1114 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1115 rq_work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001116
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001117 /* Accumulate intr event credits for this polling
1118 * cycle. An intr event is the completion of a
1119 * a WQ or RQ packet.
1120 */
1121
1122 work_done = rq_work_done + wq_work_done;
1123
1124 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001125 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001126 work_done,
1127 0 /* don't unmask intr */,
1128 0 /* don't reset intr timer */);
1129
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001130 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001131
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001132 /* Buffer allocation failed. Stay in polling
1133 * mode so we can try to fill the ring again.
1134 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001135
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001136 if (err)
1137 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001138
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001139 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001140
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001141 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001142 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001143 */
1144
Ben Hutchings288379f2009-01-19 16:43:59 -08001145 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001146 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001147 }
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301148 enic_poll_unlock_napi(&enic->rq[cq_rq]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001149
1150 return rq_work_done;
1151}
1152
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301153static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1154{
1155 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1156 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1157 u32 timer = cq->tobe_rx_coal_timeval;
1158
1159 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1160 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1161 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1162 }
1163}
1164
1165static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1166{
1167 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1168 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1169 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1170 int index;
1171 u32 timer;
1172 u32 range_start;
1173 u32 traffic;
1174 u64 delta;
1175 ktime_t now = ktime_get();
1176
1177 delta = ktime_us_delta(now, cq->prev_ts);
1178 if (delta < ENIC_AIC_TS_BREAK)
1179 return;
1180 cq->prev_ts = now;
1181
1182 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1183 pkt_size_counter->small_pkt_bytes_cnt;
1184 /* The table takes Mbps
1185 * traffic *= 8 => bits
1186 * traffic *= (10^6 / delta) => bps
1187 * traffic /= 10^6 => Mbps
1188 *
1189 * Combining, traffic *= (8 / delta)
1190 */
1191
1192 traffic <<= 3;
Govindarajulu Varadarajan958c4922014-05-26 15:52:43 +05301193 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301194
1195 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1196 if (traffic < mod_table[index].rx_rate)
1197 break;
1198 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1199 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1200 rx_coal->small_pkt_range_start :
1201 rx_coal->large_pkt_range_start;
1202 timer = range_start + ((rx_coal->range_end - range_start) *
1203 mod_table[index].range_percent / 100);
1204 /* Damping */
1205 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1206
1207 pkt_size_counter->large_pkt_bytes_cnt = 0;
1208 pkt_size_counter->small_pkt_bytes_cnt = 0;
1209}
1210
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301211#ifdef CONFIG_RFS_ACCEL
1212static void enic_free_rx_cpu_rmap(struct enic *enic)
1213{
1214 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1215 enic->netdev->rx_cpu_rmap = NULL;
1216}
1217
1218static void enic_set_rx_cpu_rmap(struct enic *enic)
1219{
1220 int i, res;
1221
1222 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1223 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1224 if (unlikely(!enic->netdev->rx_cpu_rmap))
1225 return;
1226 for (i = 0; i < enic->rq_count; i++) {
1227 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1228 enic->msix_entry[i].vector);
1229 if (unlikely(res)) {
1230 enic_free_rx_cpu_rmap(enic);
1231 return;
1232 }
1233 }
1234 }
1235}
1236
1237#else
1238
1239static void enic_free_rx_cpu_rmap(struct enic *enic)
1240{
1241}
1242
1243static void enic_set_rx_cpu_rmap(struct enic *enic)
1244{
1245}
1246
1247#endif /* CONFIG_RFS_ACCEL */
1248
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301249#ifdef CONFIG_NET_RX_BUSY_POLL
1250int enic_busy_poll(struct napi_struct *napi)
1251{
1252 struct net_device *netdev = napi->dev;
1253 struct enic *enic = netdev_priv(netdev);
1254 unsigned int rq = (napi - &enic->napi[0]);
1255 unsigned int cq = enic_cq_rq(enic, rq);
1256 unsigned int intr = enic_msix_rq_intr(enic, rq);
1257 unsigned int work_to_do = -1; /* clean all pkts possible */
1258 unsigned int work_done;
1259
1260 if (!enic_poll_lock_poll(&enic->rq[rq]))
1261 return LL_FLUSH_BUSY;
1262 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1263 enic_rq_service, NULL);
1264
1265 if (work_done > 0)
1266 vnic_intr_return_credits(&enic->intr[intr],
1267 work_done, 0, 0);
1268 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1269 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1270 enic_calc_int_moderation(enic, &enic->rq[rq]);
1271 enic_poll_unlock_poll(&enic->rq[rq]);
1272
1273 return work_done;
1274}
1275#endif /* CONFIG_NET_RX_BUSY_POLL */
1276
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001277static int enic_poll_msix(struct napi_struct *napi, int budget)
1278{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001279 struct net_device *netdev = napi->dev;
1280 struct enic *enic = netdev_priv(netdev);
1281 unsigned int rq = (napi - &enic->napi[0]);
1282 unsigned int cq = enic_cq_rq(enic, rq);
1283 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001284 unsigned int work_to_do = budget;
Eric W. Biederman4c502542014-03-14 18:02:08 -07001285 unsigned int work_done = 0;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001286 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001287
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301288 if (!enic_poll_lock_napi(&enic->rq[rq]))
1289 return work_done;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001290 /* Service RQ
1291 */
1292
Eric W. Biederman4c502542014-03-14 18:02:08 -07001293 if (budget > 0)
1294 work_done = vnic_cq_service(&enic->cq[cq],
1295 work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001296
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001297 /* Return intr event credits for this polling
1298 * cycle. An intr event is the completion of a
1299 * RQ packet.
1300 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001301
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001302 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001303 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001304 work_done,
1305 0 /* don't unmask intr */,
1306 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001307
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001308 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001309
1310 /* Buffer allocation failed. Stay in polling mode
1311 * so we can try to fill the ring again.
1312 */
1313
1314 if (err)
1315 work_done = work_to_do;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301316 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1317 /* Call the function which refreshes
1318 * the intr coalescing timer value based on
1319 * the traffic. This is supported only in
1320 * the case of MSI-x mode
1321 */
1322 enic_calc_int_moderation(enic, &enic->rq[rq]);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001323
1324 if (work_done < work_to_do) {
1325
1326 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001327 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001328 */
1329
Ben Hutchings288379f2009-01-19 16:43:59 -08001330 napi_complete(napi);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301331 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1332 enic_set_int_moderation(enic, &enic->rq[rq]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001333 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001334 }
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301335 enic_poll_unlock_napi(&enic->rq[rq]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001336
1337 return work_done;
1338}
1339
1340static void enic_notify_timer(unsigned long data)
1341{
1342 struct enic *enic = (struct enic *)data;
1343
1344 enic_notify_check(enic);
1345
Scott Feldman25f0a062008-09-24 11:23:32 -07001346 mod_timer(&enic->notify_timer,
1347 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001348}
1349
1350static void enic_free_intr(struct enic *enic)
1351{
1352 struct net_device *netdev = enic->netdev;
1353 unsigned int i;
1354
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301355 enic_free_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001356 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1357 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001358 free_irq(enic->pdev->irq, netdev);
1359 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001360 case VNIC_DEV_INTR_MODE_MSI:
1361 free_irq(enic->pdev->irq, enic);
1362 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001363 case VNIC_DEV_INTR_MODE_MSIX:
1364 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1365 if (enic->msix[i].requested)
1366 free_irq(enic->msix_entry[i].vector,
1367 enic->msix[i].devid);
1368 break;
1369 default:
1370 break;
1371 }
1372}
1373
1374static int enic_request_intr(struct enic *enic)
1375{
1376 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001377 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001378 int err = 0;
1379
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301380 enic_set_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001381 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1382
1383 case VNIC_DEV_INTR_MODE_INTX:
1384
1385 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1386 IRQF_SHARED, netdev->name, netdev);
1387 break;
1388
1389 case VNIC_DEV_INTR_MODE_MSI:
1390
1391 err = request_irq(enic->pdev->irq, enic_isr_msi,
1392 0, netdev->name, enic);
1393 break;
1394
1395 case VNIC_DEV_INTR_MODE_MSIX:
1396
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001397 for (i = 0; i < enic->rq_count; i++) {
1398 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001399 snprintf(enic->msix[intr].devname,
1400 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001401 "%.11s-rx-%d", netdev->name, i);
1402 enic->msix[intr].isr = enic_isr_msix_rq;
1403 enic->msix[intr].devid = &enic->napi[i];
1404 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001405
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001406 for (i = 0; i < enic->wq_count; i++) {
1407 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001408 snprintf(enic->msix[intr].devname,
1409 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001410 "%.11s-tx-%d", netdev->name, i);
1411 enic->msix[intr].isr = enic_isr_msix_wq;
1412 enic->msix[intr].devid = enic;
1413 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001414
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001415 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001416 snprintf(enic->msix[intr].devname,
1417 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001418 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001419 enic->msix[intr].isr = enic_isr_msix_err;
1420 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001421
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001422 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001423 snprintf(enic->msix[intr].devname,
1424 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001425 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001426 enic->msix[intr].isr = enic_isr_msix_notify;
1427 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001428
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001429 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1430 enic->msix[i].requested = 0;
1431
1432 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001433 err = request_irq(enic->msix_entry[i].vector,
1434 enic->msix[i].isr, 0,
1435 enic->msix[i].devname,
1436 enic->msix[i].devid);
1437 if (err) {
1438 enic_free_intr(enic);
1439 break;
1440 }
1441 enic->msix[i].requested = 1;
1442 }
1443
1444 break;
1445
1446 default:
1447 break;
1448 }
1449
1450 return err;
1451}
1452
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001453static void enic_synchronize_irqs(struct enic *enic)
1454{
1455 unsigned int i;
1456
1457 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1458 case VNIC_DEV_INTR_MODE_INTX:
1459 case VNIC_DEV_INTR_MODE_MSI:
1460 synchronize_irq(enic->pdev->irq);
1461 break;
1462 case VNIC_DEV_INTR_MODE_MSIX:
1463 for (i = 0; i < enic->intr_count; i++)
1464 synchronize_irq(enic->msix_entry[i].vector);
1465 break;
1466 default:
1467 break;
1468 }
1469}
1470
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301471static void enic_set_rx_coal_setting(struct enic *enic)
1472{
1473 unsigned int speed;
1474 int index = -1;
1475 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1476
1477 /* If intr mode is not MSIX, do not do adaptive coalescing */
1478 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1479 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1480 return;
1481 }
1482
1483 /* 1. Read the link speed from fw
1484 * 2. Pick the default range for the speed
1485 * 3. Update it in enic->rx_coalesce_setting
1486 */
1487 speed = vnic_dev_port_speed(enic->vdev);
1488 if (ENIC_LINK_SPEED_10G < speed)
1489 index = ENIC_LINK_40G_INDEX;
1490 else if (ENIC_LINK_SPEED_4G < speed)
1491 index = ENIC_LINK_10G_INDEX;
1492 else
1493 index = ENIC_LINK_4G_INDEX;
1494
1495 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1496 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1497 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1498
1499 /* Start with the value provided by UCSM */
1500 for (index = 0; index < enic->rq_count; index++)
1501 enic->cq[index].cur_rx_coal_timeval =
1502 enic->config.intr_timer_usec;
1503
1504 rx_coal->use_adaptive_rx_coalesce = 1;
1505}
1506
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001507static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001508{
1509 int err;
1510
Tony Camuso8e091342014-06-23 16:08:03 +05301511 spin_lock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001512 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1513 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001514 err = vnic_dev_notify_set(enic->vdev,
1515 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001516 break;
1517 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001518 err = vnic_dev_notify_set(enic->vdev,
1519 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001520 break;
1521 default:
1522 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1523 break;
1524 }
Tony Camuso8e091342014-06-23 16:08:03 +05301525 spin_unlock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001526
1527 return err;
1528}
1529
1530static void enic_notify_timer_start(struct enic *enic)
1531{
1532 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1533 case VNIC_DEV_INTR_MODE_MSI:
1534 mod_timer(&enic->notify_timer, jiffies);
1535 break;
1536 default:
1537 /* Using intr for notification for INTx/MSI-X */
1538 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001539 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001540}
1541
1542/* rtnl lock is held, process context */
1543static int enic_open(struct net_device *netdev)
1544{
1545 struct enic *enic = netdev_priv(netdev);
1546 unsigned int i;
1547 int err;
1548
Scott Feldman4b75a442008-09-24 11:23:53 -07001549 err = enic_request_intr(enic);
1550 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001551 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001552 return err;
1553 }
1554
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001555 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001556 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001557 netdev_err(netdev,
1558 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001559 goto err_out_free_intr;
1560 }
1561
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001562 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001563 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001564 /* Need at least one buffer on ring to get going */
1565 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001566 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001567 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001568 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001569 }
1570 }
1571
1572 for (i = 0; i < enic->wq_count; i++)
1573 vnic_wq_enable(&enic->wq[i]);
1574 for (i = 0; i < enic->rq_count; i++)
1575 vnic_rq_enable(&enic->rq[i]);
1576
Roopa Prabhu73359032012-01-18 04:24:02 +00001577 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001578 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001579
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001580 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001581
govindarajulu.v822473b2013-09-04 11:17:14 +05301582 netif_tx_wake_all_queues(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001583
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301584 for (i = 0; i < enic->rq_count; i++) {
1585 enic_busy_poll_init_lock(&enic->rq[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001586 napi_enable(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301587 }
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001588
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001589 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001590
1591 for (i = 0; i < enic->intr_count; i++)
1592 vnic_intr_unmask(&enic->intr[i]);
1593
1594 enic_notify_timer_start(enic);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301595 enic_rfs_flw_tbl_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001596
1597 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001598
1599err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001600 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001601err_out_free_intr:
1602 enic_free_intr(enic);
1603
1604 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001605}
1606
1607/* rtnl lock is held, process context */
1608static int enic_stop(struct net_device *netdev)
1609{
1610 struct enic *enic = netdev_priv(netdev);
1611 unsigned int i;
1612 int err;
1613
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001614 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001615 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001616 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1617 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001618
1619 enic_synchronize_irqs(enic);
1620
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001621 del_timer_sync(&enic->notify_timer);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301622 enic_rfs_flw_tbl_free(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001623
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001624 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001625
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301626 local_bh_disable();
1627 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001628 napi_disable(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301629 while (!enic_poll_lock_napi(&enic->rq[i]))
1630 mdelay(1);
1631 }
1632 local_bh_enable();
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001633
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001634 netif_carrier_off(netdev);
1635 netif_tx_disable(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001636
Roopa Prabhu73359032012-01-18 04:24:02 +00001637 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001638 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001639
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001640 for (i = 0; i < enic->wq_count; i++) {
1641 err = vnic_wq_disable(&enic->wq[i]);
1642 if (err)
1643 return err;
1644 }
1645 for (i = 0; i < enic->rq_count; i++) {
1646 err = vnic_rq_disable(&enic->rq[i]);
1647 if (err)
1648 return err;
1649 }
1650
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001651 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001652 enic_free_intr(enic);
1653
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001654 for (i = 0; i < enic->wq_count; i++)
1655 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1656 for (i = 0; i < enic->rq_count; i++)
1657 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1658 for (i = 0; i < enic->cq_count; i++)
1659 vnic_cq_clean(&enic->cq[i]);
1660 for (i = 0; i < enic->intr_count; i++)
1661 vnic_intr_clean(&enic->intr[i]);
1662
1663 return 0;
1664}
1665
1666static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1667{
1668 struct enic *enic = netdev_priv(netdev);
1669 int running = netif_running(netdev);
1670
Scott Feldman25f0a062008-09-24 11:23:32 -07001671 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1672 return -EINVAL;
1673
Roopa Prabhu73359032012-01-18 04:24:02 +00001674 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001675 return -EOPNOTSUPP;
1676
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001677 if (running)
1678 enic_stop(netdev);
1679
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001680 netdev->mtu = new_mtu;
1681
1682 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001683 netdev_warn(netdev,
1684 "interface MTU (%d) set higher than port MTU (%d)\n",
1685 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001686
1687 if (running)
1688 enic_open(netdev);
1689
1690 return 0;
1691}
1692
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001693static void enic_change_mtu_work(struct work_struct *work)
1694{
1695 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1696 struct net_device *netdev = enic->netdev;
1697 int new_mtu = vnic_dev_mtu(enic->vdev);
1698 int err;
1699 unsigned int i;
1700
1701 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1702
1703 rtnl_lock();
1704
1705 /* Stop RQ */
1706 del_timer_sync(&enic->notify_timer);
1707
1708 for (i = 0; i < enic->rq_count; i++)
1709 napi_disable(&enic->napi[i]);
1710
1711 vnic_intr_mask(&enic->intr[0]);
1712 enic_synchronize_irqs(enic);
1713 err = vnic_rq_disable(&enic->rq[0]);
1714 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001715 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001716 netdev_err(netdev, "Unable to disable RQ.\n");
1717 return;
1718 }
1719 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1720 vnic_cq_clean(&enic->cq[0]);
1721 vnic_intr_clean(&enic->intr[0]);
1722
1723 /* Fill RQ with new_mtu-sized buffers */
1724 netdev->mtu = new_mtu;
1725 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1726 /* Need at least one buffer on ring to get going */
1727 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001728 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001729 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1730 return;
1731 }
1732
1733 /* Start RQ */
1734 vnic_rq_enable(&enic->rq[0]);
1735 napi_enable(&enic->napi[0]);
1736 vnic_intr_unmask(&enic->intr[0]);
1737 enic_notify_timer_start(enic);
1738
1739 rtnl_unlock();
1740
1741 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1742}
1743
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001744#ifdef CONFIG_NET_POLL_CONTROLLER
1745static void enic_poll_controller(struct net_device *netdev)
1746{
1747 struct enic *enic = netdev_priv(netdev);
1748 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001749 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001750
1751 switch (vnic_dev_get_intr_mode(vdev)) {
1752 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001753 for (i = 0; i < enic->rq_count; i++) {
1754 intr = enic_msix_rq_intr(enic, i);
Vasanthy Kolluri79aeec52010-12-08 13:05:45 +00001755 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1756 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001757 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001758
1759 for (i = 0; i < enic->wq_count; i++) {
1760 intr = enic_msix_wq_intr(enic, i);
1761 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1762 }
1763
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001764 break;
1765 case VNIC_DEV_INTR_MODE_MSI:
1766 enic_isr_msi(enic->pdev->irq, enic);
1767 break;
1768 case VNIC_DEV_INTR_MODE_INTX:
1769 enic_isr_legacy(enic->pdev->irq, netdev);
1770 break;
1771 default:
1772 break;
1773 }
1774}
1775#endif
1776
1777static int enic_dev_wait(struct vnic_dev *vdev,
1778 int (*start)(struct vnic_dev *, int),
1779 int (*finished)(struct vnic_dev *, int *),
1780 int arg)
1781{
1782 unsigned long time;
1783 int done;
1784 int err;
1785
1786 BUG_ON(in_interrupt());
1787
1788 err = start(vdev, arg);
1789 if (err)
1790 return err;
1791
1792 /* Wait for func to complete...2 seconds max
1793 */
1794
1795 time = jiffies + (HZ * 2);
1796 do {
1797
1798 err = finished(vdev, &done);
1799 if (err)
1800 return err;
1801
1802 if (done)
1803 return 0;
1804
1805 schedule_timeout_uninterruptible(HZ / 10);
1806
1807 } while (time_after(time, jiffies));
1808
1809 return -ETIMEDOUT;
1810}
1811
1812static int enic_dev_open(struct enic *enic)
1813{
1814 int err;
1815
1816 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1817 vnic_dev_open_done, 0);
1818 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001819 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1820 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001821
1822 return err;
1823}
1824
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001825static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001826{
1827 int err;
1828
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001829 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1830 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001831 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001832 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1833 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001834
1835 return err;
1836}
1837
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001838static int enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001839{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001840 dma_addr_t rss_key_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001841 union vnic_rss_key *rss_key_buf_va = NULL;
1842 union vnic_rss_key rss_key = {
1843 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1844 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1845 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1846 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1847 };
1848 int err;
1849
1850 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1851 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1852 if (!rss_key_buf_va)
1853 return -ENOMEM;
1854
1855 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1856
Tony Camuso8e091342014-06-23 16:08:03 +05301857 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001858 err = enic_set_rss_key(enic,
1859 rss_key_buf_pa,
1860 sizeof(union vnic_rss_key));
Tony Camuso8e091342014-06-23 16:08:03 +05301861 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001862
1863 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1864 rss_key_buf_va, rss_key_buf_pa);
1865
1866 return err;
1867}
1868
1869static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1870{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001871 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001872 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1873 unsigned int i;
1874 int err;
1875
1876 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1877 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1878 if (!rss_cpu_buf_va)
1879 return -ENOMEM;
1880
1881 for (i = 0; i < (1 << rss_hash_bits); i++)
1882 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1883
Tony Camuso8e091342014-06-23 16:08:03 +05301884 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001885 err = enic_set_rss_cpu(enic,
1886 rss_cpu_buf_pa,
1887 sizeof(union vnic_rss_cpu));
Tony Camuso8e091342014-06-23 16:08:03 +05301888 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001889
1890 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1891 rss_cpu_buf_va, rss_cpu_buf_pa);
1892
1893 return err;
1894}
1895
1896static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1897 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1898{
Scott Feldman68f71702009-02-09 23:24:24 -08001899 const u8 tso_ipid_split_en = 0;
1900 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001901 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001902
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001903 /* Enable VLAN tag stripping.
1904 */
Scott Feldman68f71702009-02-09 23:24:24 -08001905
Tony Camuso8e091342014-06-23 16:08:03 +05301906 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001907 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001908 rss_default_cpu, rss_hash_type,
1909 rss_hash_bits, rss_base_cpu,
1910 rss_enable, tso_ipid_split_en,
1911 ig_vlan_strip_en);
Tony Camuso8e091342014-06-23 16:08:03 +05301912 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001913
1914 return err;
1915}
1916
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001917static int enic_set_rss_nic_cfg(struct enic *enic)
1918{
1919 struct device *dev = enic_get_dev(enic);
1920 const u8 rss_default_cpu = 0;
1921 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1922 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1923 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1924 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1925 const u8 rss_hash_bits = 7;
1926 const u8 rss_base_cpu = 0;
1927 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1928
1929 if (rss_enable) {
1930 if (!enic_set_rsskey(enic)) {
1931 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1932 rss_enable = 0;
1933 dev_warn(dev, "RSS disabled, "
1934 "Failed to set RSS cpu indirection table.");
1935 }
1936 } else {
1937 rss_enable = 0;
1938 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1939 }
1940 }
1941
1942 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1943 rss_hash_bits, rss_base_cpu, rss_enable);
1944}
1945
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001946static void enic_reset(struct work_struct *work)
1947{
1948 struct enic *enic = container_of(work, struct enic, reset);
1949
1950 if (!netif_running(enic->netdev))
1951 return;
1952
1953 rtnl_lock();
1954
Neel Patel0b038562013-08-16 15:47:40 -07001955 spin_lock(&enic->enic_api_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001956 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001957 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001958 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001959 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001960 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001961 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001962 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001963 enic_open(enic->netdev);
Neel Patel0b038562013-08-16 15:47:40 -07001964 spin_unlock(&enic->enic_api_lock);
Neel Pateld765bb42013-08-16 15:47:41 -07001965 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001966
1967 rtnl_unlock();
1968}
1969
1970static int enic_set_intr_mode(struct enic *enic)
1971{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001972 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00001973 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001974 unsigned int i;
1975
1976 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001977 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001978 *
1979 * Try MSI-X first
1980 *
1981 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1982 * (the second to last INTR is used for WQ/RQ errors)
1983 * (the last INTR is used for notifications)
1984 */
1985
1986 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1987 for (i = 0; i < n + m + 2; i++)
1988 enic->msix_entry[i].entry = i;
1989
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001990 /* Use multiple RQs if RSS is enabled
1991 */
1992
1993 if (ENIC_SETTING(enic, RSS) &&
1994 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001995 enic->rq_count >= n &&
1996 enic->wq_count >= m &&
1997 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001998 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001999
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002000 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2001 n + m + 2, n + m + 2) > 0) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002002
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002003 enic->rq_count = n;
2004 enic->wq_count = m;
2005 enic->cq_count = n + m;
2006 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002007
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002008 vnic_dev_set_intr_mode(enic->vdev,
2009 VNIC_DEV_INTR_MODE_MSIX);
2010
2011 return 0;
2012 }
2013 }
2014
2015 if (enic->config.intr_mode < 1 &&
2016 enic->rq_count >= 1 &&
2017 enic->wq_count >= m &&
2018 enic->cq_count >= 1 + m &&
2019 enic->intr_count >= 1 + m + 2) {
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002020 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2021 1 + m + 2, 1 + m + 2) > 0) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002022
2023 enic->rq_count = 1;
2024 enic->wq_count = m;
2025 enic->cq_count = 1 + m;
2026 enic->intr_count = 1 + m + 2;
2027
2028 vnic_dev_set_intr_mode(enic->vdev,
2029 VNIC_DEV_INTR_MODE_MSIX);
2030
2031 return 0;
2032 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002033 }
2034
2035 /* Next try MSI
2036 *
2037 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2038 */
2039
2040 if (enic->config.intr_mode < 2 &&
2041 enic->rq_count >= 1 &&
2042 enic->wq_count >= 1 &&
2043 enic->cq_count >= 2 &&
2044 enic->intr_count >= 1 &&
2045 !pci_enable_msi(enic->pdev)) {
2046
2047 enic->rq_count = 1;
2048 enic->wq_count = 1;
2049 enic->cq_count = 2;
2050 enic->intr_count = 1;
2051
2052 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2053
2054 return 0;
2055 }
2056
2057 /* Next try INTx
2058 *
2059 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2060 * (the first INTR is used for WQ/RQ)
2061 * (the second INTR is used for WQ/RQ errors)
2062 * (the last INTR is used for notifications)
2063 */
2064
2065 if (enic->config.intr_mode < 3 &&
2066 enic->rq_count >= 1 &&
2067 enic->wq_count >= 1 &&
2068 enic->cq_count >= 2 &&
2069 enic->intr_count >= 3) {
2070
2071 enic->rq_count = 1;
2072 enic->wq_count = 1;
2073 enic->cq_count = 2;
2074 enic->intr_count = 3;
2075
2076 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2077
2078 return 0;
2079 }
2080
2081 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2082
2083 return -EINVAL;
2084}
2085
2086static void enic_clear_intr_mode(struct enic *enic)
2087{
2088 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2089 case VNIC_DEV_INTR_MODE_MSIX:
2090 pci_disable_msix(enic->pdev);
2091 break;
2092 case VNIC_DEV_INTR_MODE_MSI:
2093 pci_disable_msi(enic->pdev);
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2100}
2101
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002102static const struct net_device_ops enic_netdev_dynamic_ops = {
2103 .ndo_open = enic_open,
2104 .ndo_stop = enic_stop,
2105 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002106 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002107 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002108 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002109 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2110 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002111 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2112 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2113 .ndo_tx_timeout = enic_tx_timeout,
2114 .ndo_set_vf_port = enic_set_vf_port,
2115 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00002116 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002117#ifdef CONFIG_NET_POLL_CONTROLLER
2118 .ndo_poll_controller = enic_poll_controller,
2119#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302120#ifdef CONFIG_RFS_ACCEL
2121 .ndo_rx_flow_steer = enic_rx_flow_steer,
2122#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302123#ifdef CONFIG_NET_RX_BUSY_POLL
2124 .ndo_busy_poll = enic_busy_poll,
2125#endif
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002126};
2127
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002128static const struct net_device_ops enic_netdev_ops = {
2129 .ndo_open = enic_open,
2130 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08002131 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002132 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002133 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002134 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002135 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002136 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002137 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2138 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2139 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00002140 .ndo_set_vf_port = enic_set_vf_port,
2141 .ndo_get_vf_port = enic_get_vf_port,
2142 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002143#ifdef CONFIG_NET_POLL_CONTROLLER
2144 .ndo_poll_controller = enic_poll_controller,
2145#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302146#ifdef CONFIG_RFS_ACCEL
2147 .ndo_rx_flow_steer = enic_rx_flow_steer,
2148#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302149#ifdef CONFIG_NET_RX_BUSY_POLL
2150 .ndo_busy_poll = enic_busy_poll,
2151#endif
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002152};
2153
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002154static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002155{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002156 unsigned int i;
2157
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302158 for (i = 0; i < enic->rq_count; i++) {
2159 napi_hash_del(&enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002160 netif_napi_del(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302161 }
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002162
Scott Feldman6fdfa972009-09-03 17:02:45 +00002163 enic_free_vnic_resources(enic);
2164 enic_clear_intr_mode(enic);
2165}
2166
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002167static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002168{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002169 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002170 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002171 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00002172 int err;
2173
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00002174 /* Get interrupt coalesce timer info */
2175 err = enic_dev_intr_coal_timer_info(enic);
2176 if (err) {
2177 dev_warn(dev, "Using default conversion factor for "
2178 "interrupt coalesce timer\n");
2179 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2180 }
2181
Scott Feldman6fdfa972009-09-03 17:02:45 +00002182 /* Get vNIC configuration
2183 */
2184
2185 err = enic_get_vnic_config(enic);
2186 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002187 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002188 return err;
2189 }
2190
2191 /* Get available resource counts
2192 */
2193
2194 enic_get_res_counts(enic);
2195
2196 /* Set interrupt mode based on resource counts and system
2197 * capabilities
2198 */
2199
2200 err = enic_set_intr_mode(enic);
2201 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002202 dev_err(dev, "Failed to set intr mode based on resource "
2203 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002204 return err;
2205 }
2206
2207 /* Allocate and configure vNIC resources
2208 */
2209
2210 err = enic_alloc_vnic_resources(enic);
2211 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002212 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002213 goto err_out_free_vnic_resources;
2214 }
2215
2216 enic_init_vnic_resources(enic);
2217
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002218 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002219 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002220 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002221 goto err_out_free_vnic_resources;
2222 }
2223
2224 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2225 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002226 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302227 napi_hash_add(&enic->napi[0]);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002228 break;
2229 case VNIC_DEV_INTR_MODE_MSIX:
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302230 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002231 netif_napi_add(netdev, &enic->napi[i],
2232 enic_poll_msix, 64);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302233 napi_hash_add(&enic->napi[i]);
2234 }
Scott Feldman6fdfa972009-09-03 17:02:45 +00002235 break;
2236 }
2237
2238 return 0;
2239
2240err_out_free_vnic_resources:
2241 enic_clear_intr_mode(enic);
2242 enic_free_vnic_resources(enic);
2243
2244 return err;
2245}
2246
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002247static void enic_iounmap(struct enic *enic)
2248{
2249 unsigned int i;
2250
2251 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2252 if (enic->bar[i].vaddr)
2253 iounmap(enic->bar[i].vaddr);
2254}
2255
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002256static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002257{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002258 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002259 struct net_device *netdev;
2260 struct enic *enic;
2261 int using_dac = 0;
2262 unsigned int i;
2263 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002264#ifdef CONFIG_PCI_IOV
2265 int pos = 0;
2266#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002267 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002268
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002269 /* Allocate net device structure and initialize. Private
2270 * instance data is initialized to zero.
2271 */
2272
govindarajulu.v822473b2013-09-04 11:17:14 +05302273 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2274 ENIC_RQ_MAX, ENIC_WQ_MAX);
Joe Perches41de8d42012-01-29 13:47:52 +00002275 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002276 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002277
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002278 pci_set_drvdata(pdev, netdev);
2279
2280 SET_NETDEV_DEV(netdev, &pdev->dev);
2281
2282 enic = netdev_priv(netdev);
2283 enic->netdev = netdev;
2284 enic->pdev = pdev;
2285
2286 /* Setup PCI resources
2287 */
2288
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002289 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002290 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002291 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002292 goto err_out_free_netdev;
2293 }
2294
2295 err = pci_request_regions(pdev, DRV_NAME);
2296 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002297 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002298 goto err_out_disable_device;
2299 }
2300
2301 pci_set_master(pdev);
2302
2303 /* Query PCI controller on system for DMA addressing
govindarajulu.v624dbf52013-09-04 11:17:16 +05302304 * limitation for the device. Try 64-bit first, and
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002305 * fail to 32-bit.
2306 */
2307
govindarajulu.v624dbf52013-09-04 11:17:16 +05302308 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002309 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002310 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002311 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002312 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002313 goto err_out_release_regions;
2314 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002315 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002316 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002317 dev_err(dev, "Unable to obtain %u-bit DMA "
2318 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002319 goto err_out_release_regions;
2320 }
2321 } else {
govindarajulu.v624dbf52013-09-04 11:17:16 +05302322 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002323 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002324 dev_err(dev, "Unable to obtain %u-bit DMA "
govindarajulu.v624dbf52013-09-04 11:17:16 +05302325 "for consistent allocations, aborting\n", 64);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002326 goto err_out_release_regions;
2327 }
2328 using_dac = 1;
2329 }
2330
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002331 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002332 */
2333
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002334 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2335 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2336 continue;
2337 enic->bar[i].len = pci_resource_len(pdev, i);
2338 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2339 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002340 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002341 err = -ENODEV;
2342 goto err_out_iounmap;
2343 }
2344 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002345 }
2346
2347 /* Register vNIC device
2348 */
2349
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002350 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2351 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002352 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002353 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002354 err = -ENODEV;
2355 goto err_out_iounmap;
2356 }
2357
Roopa Prabhu8749b422011-09-22 03:44:33 +00002358#ifdef CONFIG_PCI_IOV
2359 /* Get number of subvnics */
2360 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2361 if (pos) {
2362 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002363 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002364 if (enic->num_vfs) {
2365 err = pci_enable_sriov(pdev, enic->num_vfs);
2366 if (err) {
2367 dev_err(dev, "SRIOV enable failed, aborting."
2368 " pci_enable_sriov() returned %d\n",
2369 err);
2370 goto err_out_vnic_unregister;
2371 }
2372 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002373 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002374 }
2375 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002376#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002377
Roopa Prabhu3f192792011-09-22 03:44:43 +00002378 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002379 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002380 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002381 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002382 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002383 }
2384
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002385 /* Issue device open to get device in known state
2386 */
2387
2388 err = enic_dev_open(enic);
2389 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002390 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002391 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002392 }
2393
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002394 /* Setup devcmd lock
2395 */
2396
2397 spin_lock_init(&enic->devcmd_lock);
Neel Patel0b038562013-08-16 15:47:40 -07002398 spin_lock_init(&enic->enic_api_lock);
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002399
2400 /*
2401 * Set ingress vlan rewrite mode before vnic initialization
2402 */
2403
2404 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2405 if (err) {
2406 dev_err(dev,
2407 "Failed to set ingress vlan rewrite mode, aborting.\n");
2408 goto err_out_dev_close;
2409 }
2410
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002411 /* Issue device init to initialize the vnic-to-switch link.
2412 * We'll start with carrier off and wait for link UP
2413 * notification later to turn on carrier. We don't need
2414 * to wait here for the vnic-to-switch link initialization
2415 * to complete; link UP notification is the indication that
2416 * the process is complete.
2417 */
2418
2419 netif_carrier_off(netdev);
2420
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002421 /* Do not call dev_init for a dynamic vnic.
2422 * For a dynamic vnic, init_prov_info will be
2423 * called later by an upper layer.
2424 */
2425
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002426 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002427 err = vnic_dev_init(enic->vdev, 0);
2428 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002429 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002430 goto err_out_dev_close;
2431 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002432 }
2433
Scott Feldman6fdfa972009-09-03 17:02:45 +00002434 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002435 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002436 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002437 goto err_out_dev_close;
2438 }
2439
govindarajulu.v822473b2013-09-04 11:17:14 +05302440 netif_set_real_num_tx_queues(netdev, enic->wq_count);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302441 netif_set_real_num_rx_queues(netdev, enic->rq_count);
govindarajulu.v822473b2013-09-04 11:17:14 +05302442
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002443 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002444 */
2445
2446 init_timer(&enic->notify_timer);
2447 enic->notify_timer.function = enic_notify_timer;
2448 enic->notify_timer.data = (unsigned long)enic;
2449
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302450 enic_set_rx_coal_setting(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002451 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002452 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002453
2454 for (i = 0; i < enic->wq_count; i++)
2455 spin_lock_init(&enic->wq_lock[i]);
2456
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002457 /* Register net device
2458 */
2459
2460 enic->port_mtu = enic->config.mtu;
2461 (void)enic_change_mtu(netdev, enic->port_mtu);
2462
2463 err = enic_set_mac_addr(netdev, enic->mac_addr);
2464 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002465 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002466 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002467 }
2468
Scott Feldman7c844592009-12-23 13:27:54 +00002469 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302470 /* rx coalesce time already got initialized. This gets used
2471 * if adaptive coal is turned off
2472 */
Scott Feldman7c844592009-12-23 13:27:54 +00002473 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2474
Roopa Prabhu73359032012-01-18 04:24:02 +00002475 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002476 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2477 else
2478 netdev->netdev_ops = &enic_netdev_ops;
2479
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002480 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002481 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002482
Patrick McHardyf6469682013-04-19 02:04:27 +00002483 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002484 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002485 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002486 enic->loop_enable = 1;
2487 enic->loop_tag = enic->config.loop_tag;
2488 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2489 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002490 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002491 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002492 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002493 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002494 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302495 if (ENIC_SETTING(enic, RSS))
2496 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002497 if (ENIC_SETTING(enic, RXCSUM))
2498 netdev->hw_features |= NETIF_F_RXCSUM;
2499
2500 netdev->features |= netdev->hw_features;
2501
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302502#ifdef CONFIG_RFS_ACCEL
2503 netdev->hw_features |= NETIF_F_NTUPLE;
2504#endif
2505
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002506 if (using_dac)
2507 netdev->features |= NETIF_F_HIGHDMA;
2508
Jiri Pirko01789342011-08-16 06:29:00 +00002509 netdev->priv_flags |= IFF_UNICAST_FLT;
2510
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002511 err = register_netdev(netdev);
2512 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002513 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002514 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002515 }
2516
2517 return 0;
2518
Scott Feldman6fdfa972009-09-03 17:02:45 +00002519err_out_dev_deinit:
2520 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002521err_out_dev_close:
2522 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002523err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002524 kfree(enic->pp);
2525err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002526#ifdef CONFIG_PCI_IOV
2527 if (enic_sriov_enabled(enic)) {
2528 pci_disable_sriov(pdev);
2529 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2530 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002531err_out_vnic_unregister:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002532#endif
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002533 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002534err_out_iounmap:
2535 enic_iounmap(enic);
2536err_out_release_regions:
2537 pci_release_regions(pdev);
2538err_out_disable_device:
2539 pci_disable_device(pdev);
2540err_out_free_netdev:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002541 free_netdev(netdev);
2542
2543 return err;
2544}
2545
Bill Pemberton854de922012-12-03 09:23:05 -05002546static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002547{
2548 struct net_device *netdev = pci_get_drvdata(pdev);
2549
2550 if (netdev) {
2551 struct enic *enic = netdev_priv(netdev);
2552
Tejun Heo23f333a2010-12-12 16:45:14 +01002553 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002554 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002555 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002556 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002557 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002558#ifdef CONFIG_PCI_IOV
2559 if (enic_sriov_enabled(enic)) {
2560 pci_disable_sriov(pdev);
2561 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2562 }
2563#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002564 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002565 vnic_dev_unregister(enic->vdev);
2566 enic_iounmap(enic);
2567 pci_release_regions(pdev);
2568 pci_disable_device(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002569 free_netdev(netdev);
2570 }
2571}
2572
2573static struct pci_driver enic_driver = {
2574 .name = DRV_NAME,
2575 .id_table = enic_id_table,
2576 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002577 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002578};
2579
2580static int __init enic_init_module(void)
2581{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002582 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002583
2584 return pci_register_driver(&enic_driver);
2585}
2586
2587static void __exit enic_cleanup_module(void)
2588{
2589 pci_unregister_driver(&enic_driver);
2590}
2591
2592module_init(enic_init_module);
2593module_exit(enic_cleanup_module);