blob: 868d0f605d60524053c46d87d010bf50e3341a1d [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053041#include <linux/ktime.h>
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +053042#ifdef CONFIG_RFS_ACCEL
43#include <linux/cpu_rmap.h>
44#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +053045#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h>
47#endif
Scott Feldman01f2e4e2008-09-15 09:17:11 -070048
49#include "cq_enet_desc.h"
50#include "vnic_dev.h"
51#include "vnic_intr.h"
52#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070053#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070054#include "enic_res.h"
55#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000056#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000057#include "enic_pp.h"
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +053058#include "enic_clsf.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070059
60#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000061#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
62#define MAX_TSO (1 << 16)
63#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
64
65#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070066#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000067#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070068
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +053069#define RX_COPYBREAK_DEFAULT 256
70
Scott Feldman01f2e4e2008-09-15 09:17:11 -070071/* Supported devices */
Benoit Taine9baa3c32014-08-08 15:56:03 +020072static const struct pci_device_id enic_id_table[] = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000073 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070074 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000075 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070076 { 0, } /* end of table */
77};
78
79MODULE_DESCRIPTION(DRV_DESCRIPTION);
80MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_VERSION);
83MODULE_DEVICE_TABLE(pci, enic_id_table);
84
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053085#define ENIC_LARGE_PKT_THRESHOLD 1000
86#define ENIC_MAX_COALESCE_TIMERS 10
87/* Interrupt moderation table, which will be used to decide the
88 * coalescing timer values
89 * {rx_rate in Mbps, mapping percentage of the range}
90 */
91struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
92 {4000, 0},
93 {4400, 10},
94 {5060, 20},
95 {5230, 30},
96 {5540, 40},
97 {5820, 50},
98 {6120, 60},
99 {6435, 70},
100 {6745, 80},
101 {7000, 90},
102 {0xFFFFFFFF, 100}
103};
104
105/* This table helps the driver to pick different ranges for rx coalescing
106 * timer depending on the link speed.
107 */
108struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
109 {0, 0}, /* 0 - 4 Gbps */
110 {0, 3}, /* 4 - 10 Gbps */
111 {3, 6}, /* 10 - 40 Gbps */
112};
113
Roopa Prabhu3f192792011-09-22 03:44:43 +0000114int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700115{
116 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
117}
118
Roopa Prabhu8749b422011-09-22 03:44:33 +0000119int enic_sriov_enabled(struct enic *enic)
120{
121 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
122}
123
Roopa Prabhu3a4adef2012-01-18 04:23:55 +0000124static int enic_is_sriov_vf(struct enic *enic)
125{
126 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
127}
128
Roopa Prabhu889d13f2011-09-22 03:44:38 +0000129int enic_is_valid_vf(struct enic *enic, int vf)
130{
131#ifdef CONFIG_PCI_IOV
132 return vf >= 0 && vf < enic->num_vfs;
133#else
134 return 0;
135#endif
136}
137
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700138static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
139{
140 struct enic *enic = vnic_dev_priv(wq->vdev);
141
142 if (buf->sop)
143 pci_unmap_single(enic->pdev, buf->dma_addr,
144 buf->len, PCI_DMA_TODEVICE);
145 else
146 pci_unmap_page(enic->pdev, buf->dma_addr,
147 buf->len, PCI_DMA_TODEVICE);
148
149 if (buf->os_buf)
150 dev_kfree_skb_any(buf->os_buf);
151}
152
153static void enic_wq_free_buf(struct vnic_wq *wq,
154 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
155{
156 enic_free_wq_buf(wq, buf);
157}
158
159static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
160 u8 type, u16 q_number, u16 completed_index, void *opaque)
161{
162 struct enic *enic = vnic_dev_priv(vdev);
163
164 spin_lock(&enic->wq_lock[q_number]);
165
166 vnic_wq_service(&enic->wq[q_number], cq_desc,
167 completed_index, enic_wq_free_buf,
168 opaque);
169
govindarajulu.v822473b2013-09-04 11:17:14 +0530170 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000171 vnic_wq_desc_avail(&enic->wq[q_number]) >=
172 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
govindarajulu.v822473b2013-09-04 11:17:14 +0530173 netif_wake_subqueue(enic->netdev, q_number);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700174
175 spin_unlock(&enic->wq_lock[q_number]);
176
177 return 0;
178}
179
180static void enic_log_q_error(struct enic *enic)
181{
182 unsigned int i;
183 u32 error_status;
184
185 for (i = 0; i < enic->wq_count; i++) {
186 error_status = vnic_wq_error_status(&enic->wq[i]);
187 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000188 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
189 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700190 }
191
192 for (i = 0; i < enic->rq_count; i++) {
193 error_status = vnic_rq_error_status(&enic->rq[i]);
194 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000195 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
196 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700197 }
198}
199
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000200static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700201{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000202 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700203
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000204 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000205 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
206 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000207 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700208 }
209}
210
211static void enic_mtu_check(struct enic *enic)
212{
213 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000214 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700215
Scott Feldman491598a2009-09-03 17:02:40 +0000216 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000217 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000218 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000219 mtu = max_t(int, ENIC_MIN_MTU,
220 min_t(int, ENIC_MAX_MTU, mtu));
221 if (mtu != netdev->mtu)
222 schedule_work(&enic->change_mtu_work);
223 } else {
224 if (mtu < netdev->mtu)
225 netdev_warn(netdev,
226 "interface MTU (%d) set higher "
227 "than switch port MTU (%d)\n",
228 netdev->mtu, mtu);
229 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700230 }
231}
232
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000233static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700234{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000235 int link_status = vnic_dev_link_status(enic->vdev);
236 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700237
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000238 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000239 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000240 netif_carrier_on(enic->netdev);
241 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000242 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000243 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700244 }
245}
246
247static void enic_notify_check(struct enic *enic)
248{
249 enic_msglvl_check(enic);
250 enic_mtu_check(enic);
251 enic_link_check(enic);
252}
253
254#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
255
256static irqreturn_t enic_isr_legacy(int irq, void *data)
257{
258 struct net_device *netdev = data;
259 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000260 unsigned int io_intr = enic_legacy_io_intr();
261 unsigned int err_intr = enic_legacy_err_intr();
262 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700263 u32 pba;
264
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000265 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700266
267 pba = vnic_intr_legacy_pba(enic->legacy_pba);
268 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000269 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700270 return IRQ_NONE; /* not our interrupt */
271 }
272
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000273 if (ENIC_TEST_INTR(pba, notify_intr)) {
274 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700275 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800276 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700277
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000278 if (ENIC_TEST_INTR(pba, err_intr)) {
279 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700280 enic_log_q_error(enic);
281 /* schedule recovery from WQ/RQ error */
282 schedule_work(&enic->reset);
283 return IRQ_HANDLED;
284 }
285
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530286 if (ENIC_TEST_INTR(pba, io_intr))
287 napi_schedule_irqoff(&enic->napi[0]);
288 else
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000289 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700290
291 return IRQ_HANDLED;
292}
293
294static irqreturn_t enic_isr_msi(int irq, void *data)
295{
296 struct enic *enic = data;
297
298 /* With MSI, there is no sharing of interrupts, so this is
299 * our interrupt and there is no need to ack it. The device
300 * is not providing per-vector masking, so the OS will not
301 * write to PCI config space to mask/unmask the interrupt.
302 * We're using mask_on_assertion for MSI, so the device
303 * automatically masks the interrupt when the interrupt is
304 * generated. Later, when exiting polling, the interrupt
305 * will be unmasked (see enic_poll).
306 *
307 * Also, the device uses the same PCIe Traffic Class (TC)
308 * for Memory Write data and MSI, so there are no ordering
309 * issues; the MSI will always arrive at the Root Complex
310 * _after_ corresponding Memory Writes (i.e. descriptor
311 * writes).
312 */
313
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530314 napi_schedule_irqoff(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700315
316 return IRQ_HANDLED;
317}
318
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +0530319static irqreturn_t enic_isr_msix(int irq, void *data)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700320{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000321 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700322
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530323 napi_schedule_irqoff(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700324
325 return IRQ_HANDLED;
326}
327
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700328static irqreturn_t enic_isr_msix_err(int irq, void *data)
329{
330 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000331 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700332
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000333 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800334
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700335 enic_log_q_error(enic);
336
337 /* schedule recovery from WQ/RQ error */
338 schedule_work(&enic->reset);
339
340 return IRQ_HANDLED;
341}
342
343static irqreturn_t enic_isr_msix_notify(int irq, void *data)
344{
345 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000346 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700347
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000348 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700349 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700350
351 return IRQ_HANDLED;
352}
353
354static inline void enic_queue_wq_skb_cont(struct enic *enic,
355 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000356 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700357{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000358 const skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700359
360 /* Queue additional data fragments */
361 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000362 len_left -= skb_frag_size(frag);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700363 enic_queue_wq_desc_cont(wq, skb,
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000364 skb_frag_dma_map(&enic->pdev->dev,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000365 frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100366 DMA_TO_DEVICE),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000367 skb_frag_size(frag),
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000368 (len_left == 0), /* EOP? */
369 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700370 }
371}
372
373static inline void enic_queue_wq_skb_vlan(struct enic *enic,
374 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000375 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700376{
377 unsigned int head_len = skb_headlen(skb);
378 unsigned int len_left = skb->len - head_len;
379 int eop = (len_left == 0);
380
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000381 /* Queue the main skb fragment. The fragments are no larger
382 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
383 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
384 * per fragment is queued.
385 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700386 enic_queue_wq_desc(wq, skb,
387 pci_map_single(enic->pdev, skb->data,
388 head_len, PCI_DMA_TODEVICE),
389 head_len,
390 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000391 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700392
393 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000394 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700395}
396
397static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
398 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000399 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700400{
401 unsigned int head_len = skb_headlen(skb);
402 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000403 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700404 unsigned int csum_offset = hdr_len + skb->csum_offset;
405 int eop = (len_left == 0);
406
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000407 /* Queue the main skb fragment. The fragments are no larger
408 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
409 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
410 * per fragment is queued.
411 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700412 enic_queue_wq_desc_csum_l4(wq, skb,
413 pci_map_single(enic->pdev, skb->data,
414 head_len, PCI_DMA_TODEVICE),
415 head_len,
416 csum_offset,
417 hdr_len,
418 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000419 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700420
421 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000422 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700423}
424
425static inline void enic_queue_wq_skb_tso(struct enic *enic,
426 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000427 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700428{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000429 unsigned int frag_len_left = skb_headlen(skb);
430 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700431 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
432 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000433 unsigned int len;
434 dma_addr_t dma_addr;
435 unsigned int offset = 0;
436 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700437
438 /* Preload TCP csum field with IP pseudo hdr calculated
439 * with IP length set to zero. HW will later add in length
440 * to each TCP segment resulting from the TSO.
441 */
442
Harvey Harrison09640e62009-02-01 00:45:17 -0800443 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700444 ip_hdr(skb)->check = 0;
445 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
446 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800447 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700448 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
449 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
450 }
451
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000452 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
453 * for the main skb fragment
454 */
455 while (frag_len_left) {
456 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
457 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
458 len, PCI_DMA_TODEVICE);
459 enic_queue_wq_desc_tso(wq, skb,
460 dma_addr,
461 len,
462 mss, hdr_len,
463 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000464 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000465 frag_len_left -= len;
466 offset += len;
467 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700468
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000469 if (eop)
470 return;
471
472 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
473 * for additional data fragments
474 */
475 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000476 len_left -= skb_frag_size(frag);
477 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000478 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000479
480 while (frag_len_left) {
481 len = min(frag_len_left,
482 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000483 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
484 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100485 DMA_TO_DEVICE);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000486 enic_queue_wq_desc_cont(wq, skb,
487 dma_addr,
488 len,
489 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000490 (len == frag_len_left), /* EOP? */
491 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000492 frag_len_left -= len;
493 offset += len;
494 }
495 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700496}
497
498static inline void enic_queue_wq_skb(struct enic *enic,
499 struct vnic_wq *wq, struct sk_buff *skb)
500{
501 unsigned int mss = skb_shinfo(skb)->gso_size;
502 unsigned int vlan_tag = 0;
503 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000504 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700505
Jesse Grosseab6d182010-10-20 13:56:03 +0000506 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700507 /* VLAN tag from trunking driver */
508 vlan_tag_insert = 1;
509 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000510 } else if (enic->loop_enable) {
511 vlan_tag = enic->loop_tag;
512 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700513 }
514
515 if (mss)
516 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000517 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700518 else if (skb->ip_summed == CHECKSUM_PARTIAL)
519 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000520 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700521 else
522 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000523 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700524}
525
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800526/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000527static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000528 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700529{
530 struct enic *enic = netdev_priv(netdev);
govindarajulu.v822473b2013-09-04 11:17:14 +0530531 struct vnic_wq *wq;
govindarajulu.v822473b2013-09-04 11:17:14 +0530532 unsigned int txq_map;
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530533 struct netdev_queue *txq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700534
535 if (skb->len <= 0) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700536 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700537 return NETDEV_TX_OK;
538 }
539
govindarajulu.v822473b2013-09-04 11:17:14 +0530540 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
541 wq = &enic->wq[txq_map];
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530542 txq = netdev_get_tx_queue(netdev, txq_map);
govindarajulu.v822473b2013-09-04 11:17:14 +0530543
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700544 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
545 * which is very likely. In the off chance it's going to take
546 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
547 */
548
549 if (skb_shinfo(skb)->gso_size == 0 &&
550 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
551 skb_linearize(skb)) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700552 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700553 return NETDEV_TX_OK;
554 }
555
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530556 spin_lock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700557
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000558 if (vnic_wq_desc_avail(wq) <
559 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530560 netif_tx_stop_queue(txq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700561 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000562 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530563 spin_unlock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700564 return NETDEV_TX_BUSY;
565 }
566
567 enic_queue_wq_skb(enic, wq, skb);
568
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000569 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530570 netif_tx_stop_queue(txq);
571 if (!skb->xmit_more || netif_xmit_stopped(txq))
572 vnic_wq_doorbell(wq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700573
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530574 spin_unlock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700575
576 return NETDEV_TX_OK;
577}
578
579/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000580static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
581 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700582{
583 struct enic *enic = netdev_priv(netdev);
584 struct vnic_stats *stats;
585
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000586 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700587
Scott Feldman25f0a062008-09-24 11:23:32 -0700588 net_stats->tx_packets = stats->tx.tx_frames_ok;
589 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
590 net_stats->tx_errors = stats->tx.tx_errors;
591 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700592
Scott Feldman25f0a062008-09-24 11:23:32 -0700593 net_stats->rx_packets = stats->rx.rx_frames_ok;
594 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
595 net_stats->rx_errors = stats->rx.rx_errors;
596 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000597 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800598 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000599 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700600
Scott Feldman25f0a062008-09-24 11:23:32 -0700601 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700602}
603
Alexander Duyckf0096182014-05-28 18:44:52 -0700604static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
605{
606 struct enic *enic = netdev_priv(netdev);
607
608 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
609 unsigned int mc_count = netdev_mc_count(netdev);
610
611 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
612 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
613
614 return -ENOSPC;
615 }
616
617 enic_dev_add_addr(enic, mc_addr);
618 enic->mc_count++;
619
620 return 0;
621}
622
623static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
624{
625 struct enic *enic = netdev_priv(netdev);
626
627 enic_dev_del_addr(enic, mc_addr);
628 enic->mc_count--;
629
630 return 0;
631}
632
633static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
634{
635 struct enic *enic = netdev_priv(netdev);
636
637 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
638 unsigned int uc_count = netdev_uc_count(netdev);
639
640 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
641 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
642
643 return -ENOSPC;
644 }
645
646 enic_dev_add_addr(enic, uc_addr);
647 enic->uc_count++;
648
649 return 0;
650}
651
652static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
653{
654 struct enic *enic = netdev_priv(netdev);
655
656 enic_dev_del_addr(enic, uc_addr);
657 enic->uc_count--;
658
659 return 0;
660}
661
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000662void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700663{
Alexander Duyckf0096182014-05-28 18:44:52 -0700664 struct net_device *netdev = enic->netdev;
665
666 __dev_uc_unsync(netdev, NULL);
667 __dev_mc_unsync(netdev, NULL);
668
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700669 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000670 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000671 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700672}
673
674static int enic_set_mac_addr(struct net_device *netdev, char *addr)
675{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700676 struct enic *enic = netdev_priv(netdev);
677
Roopa Prabhu73359032012-01-18 04:24:02 +0000678 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700679 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
680 return -EADDRNOTAVAIL;
681 } else {
682 if (!is_valid_ether_addr(addr))
683 return -EADDRNOTAVAIL;
684 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700685
686 memcpy(netdev->dev_addr, addr, netdev->addr_len);
687
688 return 0;
689}
690
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700691static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
692{
693 struct enic *enic = netdev_priv(netdev);
694 struct sockaddr *saddr = p;
695 char *addr = saddr->sa_data;
696 int err;
697
698 if (netif_running(enic->netdev)) {
699 err = enic_dev_del_station_addr(enic);
700 if (err)
701 return err;
702 }
703
704 err = enic_set_mac_addr(netdev, addr);
705 if (err)
706 return err;
707
708 if (netif_running(enic->netdev)) {
709 err = enic_dev_add_station_addr(enic);
710 if (err)
711 return err;
712 }
713
714 return err;
715}
716
717static int enic_set_mac_address(struct net_device *netdev, void *p)
718{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000719 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000720 char *addr = saddr->sa_data;
721 struct enic *enic = netdev_priv(netdev);
722 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000723
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000724 err = enic_dev_del_station_addr(enic);
725 if (err)
726 return err;
727
728 err = enic_set_mac_addr(netdev, addr);
729 if (err)
730 return err;
731
732 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700733}
734
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000735/* netif_tx_lock held, BHs disabled */
736static void enic_set_rx_mode(struct net_device *netdev)
737{
738 struct enic *enic = netdev_priv(netdev);
739 int directed = 1;
740 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
741 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
742 int promisc = (netdev->flags & IFF_PROMISC) ||
743 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
744 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
745 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
746 unsigned int flags = netdev->flags |
747 (allmulti ? IFF_ALLMULTI : 0) |
748 (promisc ? IFF_PROMISC : 0);
749
750 if (enic->flags != flags) {
751 enic->flags = flags;
752 enic_dev_packet_filter(enic, directed,
753 multicast, broadcast, promisc, allmulti);
754 }
755
756 if (!promisc) {
Alexander Duyckf0096182014-05-28 18:44:52 -0700757 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000758 if (!allmulti)
Alexander Duyckf0096182014-05-28 18:44:52 -0700759 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000760 }
761}
762
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700763/* netif_tx_lock held, BHs disabled */
764static void enic_tx_timeout(struct net_device *netdev)
765{
766 struct enic *enic = netdev_priv(netdev);
767 schedule_work(&enic->reset);
768}
769
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000770static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
771{
772 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000773 struct enic_port_profile *pp;
774 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000775
Roopa Prabhu3f192792011-09-22 03:44:43 +0000776 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
777 if (err)
778 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000779
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000780 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000781 if (vf == PORT_SELF_VF) {
782 memcpy(pp->vf_mac, mac, ETH_ALEN);
783 return 0;
784 } else {
785 /*
786 * For sriov vf's set the mac in hw
787 */
788 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
789 vnic_dev_set_mac_addr, mac);
790 return enic_dev_status_to_errno(err);
791 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000792 } else
793 return -EINVAL;
794}
795
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700796static int enic_set_vf_port(struct net_device *netdev, int vf,
797 struct nlattr *port[])
798{
799 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000800 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000801 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000802 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700803
Roopa Prabhu3f192792011-09-22 03:44:43 +0000804 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
805 if (err)
806 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700807
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000808 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000809 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700810
Roopa Prabhu3f192792011-09-22 03:44:43 +0000811 memcpy(&prev_pp, pp, sizeof(*enic->pp));
812 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000813
Roopa Prabhu3f192792011-09-22 03:44:43 +0000814 pp->set |= ENIC_SET_REQUEST;
815 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000816
817 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000818 pp->set |= ENIC_SET_NAME;
819 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000820 PORT_PROFILE_MAX);
821 }
822
823 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000824 pp->set |= ENIC_SET_INSTANCE;
825 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000826 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
827 }
828
829 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000830 pp->set |= ENIC_SET_HOST;
831 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000832 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
833 }
834
Roopa Prabhub4765832012-02-20 00:11:58 +0000835 if (vf == PORT_SELF_VF) {
836 /* Special case handling: mac came from IFLA_VF_MAC */
837 if (!is_zero_ether_addr(prev_pp.vf_mac))
838 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000839
Roopa Prabhub4765832012-02-20 00:11:58 +0000840 if (is_zero_ether_addr(netdev->dev_addr))
841 eth_hw_addr_random(netdev);
842 } else {
843 /* SR-IOV VF: get mac from adapter */
844 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
845 vnic_dev_get_mac_addr, pp->mac_addr);
846 if (err) {
847 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
848 memcpy(pp, &prev_pp, sizeof(*pp));
849 return enic_dev_status_to_errno(err);
850 }
851 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000852
Roopa Prabhu3f192792011-09-22 03:44:43 +0000853 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000854 if (err) {
855 if (restore_pp) {
856 /* Things are still the way they were: Implicit
857 * DISASSOCIATE failed
858 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000859 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000860 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000861 memset(pp, 0, sizeof(*pp));
862 if (vf == PORT_SELF_VF)
863 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000864 }
865 } else {
866 /* Set flag to indicate that the port assoc/disassoc
867 * request has been sent out to fw
868 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000869 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000870
871 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000872 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
873 memset(pp->mac_addr, 0, ETH_ALEN);
874 if (vf == PORT_SELF_VF)
875 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000876 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700877 }
878
Roopa Prabhub4765832012-02-20 00:11:58 +0000879 if (vf == PORT_SELF_VF)
880 memset(pp->vf_mac, 0, ETH_ALEN);
Roopa Prabhu29639052010-12-08 13:54:03 +0000881
Roopa Prabhu29639052010-12-08 13:54:03 +0000882 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700883}
884
885static int enic_get_vf_port(struct net_device *netdev, int vf,
886 struct sk_buff *skb)
887{
888 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700889 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000890 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000891 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700892
Roopa Prabhu3f192792011-09-22 03:44:43 +0000893 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700894 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000895 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700896
Roopa Prabhu3f192792011-09-22 03:44:43 +0000897 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
898 return -ENODATA;
899
900 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
901 if (err)
902 return err;
903
David S. Miller1a106de2012-04-01 20:22:22 -0400904 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
905 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
906 ((pp->set & ENIC_SET_NAME) &&
907 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
908 ((pp->set & ENIC_SET_INSTANCE) &&
909 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
910 pp->instance_uuid)) ||
911 ((pp->set & ENIC_SET_HOST) &&
912 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
913 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700914 return 0;
915
916nla_put_failure:
917 return -EMSGSIZE;
918}
919
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700920static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
921{
922 struct enic *enic = vnic_dev_priv(rq->vdev);
923
924 if (!buf->os_buf)
925 return;
926
927 pci_unmap_single(enic->pdev, buf->dma_addr,
928 buf->len, PCI_DMA_FROMDEVICE);
929 dev_kfree_skb_any(buf->os_buf);
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530930 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700931}
932
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700933static int enic_rq_alloc_buf(struct vnic_rq *rq)
934{
935 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000936 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700937 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000938 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700939 unsigned int os_buf_index = 0;
940 dma_addr_t dma_addr;
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530941 struct vnic_rq_buf *buf = rq->to_use;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700942
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530943 if (buf->os_buf) {
Govindarajulu Varadarajanf6b77342014-11-06 15:21:39 +0530944 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
945 buf->len);
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530946
947 return 0;
948 }
Eric Dumazet89d71a62009-10-13 05:34:20 +0000949 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700950 if (!skb)
951 return -ENOMEM;
952
953 dma_addr = pci_map_single(enic->pdev, skb->data,
954 len, PCI_DMA_FROMDEVICE);
955
956 enic_queue_rq_desc(rq, skb, os_buf_index,
957 dma_addr, len);
958
959 return 0;
960}
961
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530962static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
963 u32 pkt_len)
964{
965 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
966 pkt_size->large_pkt_bytes_cnt += pkt_len;
967 else
968 pkt_size->small_pkt_bytes_cnt += pkt_len;
969}
970
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530971static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
972 struct vnic_rq_buf *buf, u16 len)
973{
974 struct enic *enic = netdev_priv(netdev);
975 struct sk_buff *new_skb;
976
977 if (len > enic->rx_copybreak)
978 return false;
979 new_skb = netdev_alloc_skb_ip_align(netdev, len);
980 if (!new_skb)
981 return false;
982 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
983 DMA_FROM_DEVICE);
984 memcpy(new_skb->data, (*skb)->data, len);
985 *skb = new_skb;
986
987 return true;
988}
989
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700990static void enic_rq_indicate_buf(struct vnic_rq *rq,
991 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
992 int skipped, void *opaque)
993{
994 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -0800995 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700996 struct sk_buff *skb;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530997 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700998
999 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1000 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1001 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1002 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1003 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001004 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001005 u32 rss_hash;
1006
1007 if (skipped)
1008 return;
1009
1010 skb = buf->os_buf;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001011
1012 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1013 &type, &color, &q_number, &completed_index,
1014 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1015 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001016 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001017 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1018 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1019 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1020 &fcs_ok);
1021
1022 if (packet_error) {
1023
Scott Feldman350991e2009-09-03 17:02:19 +00001024 if (!fcs_ok) {
1025 if (bytes_written > 0)
1026 enic->rq_bad_fcs++;
1027 else if (bytes_written == 0)
1028 enic->rq_truncated_pkts++;
1029 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001030
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301031 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1032 PCI_DMA_FROMDEVICE);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001033 dev_kfree_skb_any(skb);
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301034 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001035
1036 return;
1037 }
1038
1039 if (eop && bytes_written > 0) {
1040
1041 /* Good receive
1042 */
1043
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +05301044 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1045 buf->os_buf = NULL;
1046 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1047 PCI_DMA_FROMDEVICE);
1048 }
1049 prefetch(skb->data - NET_IP_ALIGN);
1050
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001051 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001052 skb->protocol = eth_type_trans(skb, netdev);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301053 skb_record_rx_queue(skb, q_number);
1054 if (netdev->features & NETIF_F_RXHASH) {
Tom Herbert3739acd2013-12-17 23:23:42 -08001055 skb_set_hash(skb, rss_hash,
1056 (rss_type &
1057 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1058 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1059 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1060 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301061 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001062
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001063 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001064 skb->csum = htons(checksum);
1065 skb->ip_summed = CHECKSUM_COMPLETE;
1066 }
1067
Jiri Pirko6ede7462011-07-20 04:54:18 +00001068 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001069 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001070
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301071 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1072 if (enic_poll_busy_polling(rq) ||
1073 !(netdev->features & NETIF_F_GRO))
Jiri Pirko6ede7462011-07-20 04:54:18 +00001074 netif_receive_skb(skb);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301075 else
1076 napi_gro_receive(&enic->napi[q_number], skb);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301077 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1078 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1079 bytes_written);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001080 } else {
1081
1082 /* Buffer overflow
1083 */
1084
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301085 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1086 PCI_DMA_FROMDEVICE);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001087 dev_kfree_skb_any(skb);
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301088 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001089 }
1090}
1091
1092static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1093 u8 type, u16 q_number, u16 completed_index, void *opaque)
1094{
1095 struct enic *enic = vnic_dev_priv(vdev);
1096
1097 vnic_rq_service(&enic->rq[q_number], cq_desc,
1098 completed_index, VNIC_RQ_RETURN_DESC,
1099 enic_rq_indicate_buf, opaque);
1100
1101 return 0;
1102}
1103
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001104static int enic_poll(struct napi_struct *napi, int budget)
1105{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001106 struct net_device *netdev = napi->dev;
1107 struct enic *enic = netdev_priv(netdev);
1108 unsigned int cq_rq = enic_cq_rq(enic, 0);
1109 unsigned int cq_wq = enic_cq_wq(enic, 0);
1110 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001111 unsigned int rq_work_to_do = budget;
1112 unsigned int wq_work_to_do = -1; /* no limit */
Eric W. Biederman4c502542014-03-14 18:02:08 -07001113 unsigned int work_done, rq_work_done = 0, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001114 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001115
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301116 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1117 enic_wq_service, NULL);
1118
1119 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1120 if (wq_work_done > 0)
1121 vnic_intr_return_credits(&enic->intr[intr],
1122 wq_work_done,
1123 0 /* dont unmask intr */,
1124 0 /* dont reset intr timer */);
1125 return rq_work_done;
1126 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001127
Eric W. Biederman4c502542014-03-14 18:02:08 -07001128 if (budget > 0)
1129 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1130 rq_work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001131
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001132 /* Accumulate intr event credits for this polling
1133 * cycle. An intr event is the completion of a
1134 * a WQ or RQ packet.
1135 */
1136
1137 work_done = rq_work_done + wq_work_done;
1138
1139 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001140 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001141 work_done,
1142 0 /* don't unmask intr */,
1143 0 /* don't reset intr timer */);
1144
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001145 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001146
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001147 /* Buffer allocation failed. Stay in polling
1148 * mode so we can try to fill the ring again.
1149 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001150
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001151 if (err)
1152 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001153
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001154 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001155
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001156 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001157 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001158 */
1159
Ben Hutchings288379f2009-01-19 16:43:59 -08001160 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001161 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001162 }
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301163 enic_poll_unlock_napi(&enic->rq[cq_rq]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001164
1165 return rq_work_done;
1166}
1167
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301168static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1169{
1170 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1171 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1172 u32 timer = cq->tobe_rx_coal_timeval;
1173
1174 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1175 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1176 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1177 }
1178}
1179
1180static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1181{
1182 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1183 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1184 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1185 int index;
1186 u32 timer;
1187 u32 range_start;
1188 u32 traffic;
1189 u64 delta;
1190 ktime_t now = ktime_get();
1191
1192 delta = ktime_us_delta(now, cq->prev_ts);
1193 if (delta < ENIC_AIC_TS_BREAK)
1194 return;
1195 cq->prev_ts = now;
1196
1197 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1198 pkt_size_counter->small_pkt_bytes_cnt;
1199 /* The table takes Mbps
1200 * traffic *= 8 => bits
1201 * traffic *= (10^6 / delta) => bps
1202 * traffic /= 10^6 => Mbps
1203 *
1204 * Combining, traffic *= (8 / delta)
1205 */
1206
1207 traffic <<= 3;
Govindarajulu Varadarajan958c4922014-05-26 15:52:43 +05301208 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301209
1210 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1211 if (traffic < mod_table[index].rx_rate)
1212 break;
1213 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1214 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1215 rx_coal->small_pkt_range_start :
1216 rx_coal->large_pkt_range_start;
1217 timer = range_start + ((rx_coal->range_end - range_start) *
1218 mod_table[index].range_percent / 100);
1219 /* Damping */
1220 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1221
1222 pkt_size_counter->large_pkt_bytes_cnt = 0;
1223 pkt_size_counter->small_pkt_bytes_cnt = 0;
1224}
1225
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301226#ifdef CONFIG_RFS_ACCEL
1227static void enic_free_rx_cpu_rmap(struct enic *enic)
1228{
1229 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1230 enic->netdev->rx_cpu_rmap = NULL;
1231}
1232
1233static void enic_set_rx_cpu_rmap(struct enic *enic)
1234{
1235 int i, res;
1236
1237 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1238 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1239 if (unlikely(!enic->netdev->rx_cpu_rmap))
1240 return;
1241 for (i = 0; i < enic->rq_count; i++) {
1242 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1243 enic->msix_entry[i].vector);
1244 if (unlikely(res)) {
1245 enic_free_rx_cpu_rmap(enic);
1246 return;
1247 }
1248 }
1249 }
1250}
1251
1252#else
1253
1254static void enic_free_rx_cpu_rmap(struct enic *enic)
1255{
1256}
1257
1258static void enic_set_rx_cpu_rmap(struct enic *enic)
1259{
1260}
1261
1262#endif /* CONFIG_RFS_ACCEL */
1263
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301264#ifdef CONFIG_NET_RX_BUSY_POLL
1265int enic_busy_poll(struct napi_struct *napi)
1266{
1267 struct net_device *netdev = napi->dev;
1268 struct enic *enic = netdev_priv(netdev);
1269 unsigned int rq = (napi - &enic->napi[0]);
1270 unsigned int cq = enic_cq_rq(enic, rq);
1271 unsigned int intr = enic_msix_rq_intr(enic, rq);
1272 unsigned int work_to_do = -1; /* clean all pkts possible */
1273 unsigned int work_done;
1274
1275 if (!enic_poll_lock_poll(&enic->rq[rq]))
1276 return LL_FLUSH_BUSY;
1277 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1278 enic_rq_service, NULL);
1279
1280 if (work_done > 0)
1281 vnic_intr_return_credits(&enic->intr[intr],
1282 work_done, 0, 0);
1283 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1284 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1285 enic_calc_int_moderation(enic, &enic->rq[rq]);
1286 enic_poll_unlock_poll(&enic->rq[rq]);
1287
1288 return work_done;
1289}
1290#endif /* CONFIG_NET_RX_BUSY_POLL */
1291
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301292static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1293{
1294 struct net_device *netdev = napi->dev;
1295 struct enic *enic = netdev_priv(netdev);
1296 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1297 struct vnic_wq *wq = &enic->wq[wq_index];
1298 unsigned int cq;
1299 unsigned int intr;
1300 unsigned int wq_work_to_do = -1; /* clean all desc possible */
1301 unsigned int wq_work_done;
1302 unsigned int wq_irq;
1303
1304 wq_irq = wq->index;
1305 cq = enic_cq_wq(enic, wq_irq);
1306 intr = enic_msix_wq_intr(enic, wq_irq);
1307 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1308 enic_wq_service, NULL);
1309
1310 vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1311 0 /* don't unmask intr */,
1312 1 /* reset intr timer */);
1313 if (!wq_work_done) {
1314 napi_complete(napi);
1315 vnic_intr_unmask(&enic->intr[intr]);
Govindarajulu Varadarajanf41281d2014-11-13 04:12:06 +05301316 return 0;
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301317 }
1318
Govindarajulu Varadarajanf41281d2014-11-13 04:12:06 +05301319 return budget;
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301320}
1321
1322static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001323{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001324 struct net_device *netdev = napi->dev;
1325 struct enic *enic = netdev_priv(netdev);
1326 unsigned int rq = (napi - &enic->napi[0]);
1327 unsigned int cq = enic_cq_rq(enic, rq);
1328 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001329 unsigned int work_to_do = budget;
Eric W. Biederman4c502542014-03-14 18:02:08 -07001330 unsigned int work_done = 0;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001331 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001332
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301333 if (!enic_poll_lock_napi(&enic->rq[rq]))
1334 return work_done;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001335 /* Service RQ
1336 */
1337
Eric W. Biederman4c502542014-03-14 18:02:08 -07001338 if (budget > 0)
1339 work_done = vnic_cq_service(&enic->cq[cq],
1340 work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001341
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001342 /* Return intr event credits for this polling
1343 * cycle. An intr event is the completion of a
1344 * RQ packet.
1345 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001346
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001347 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001348 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001349 work_done,
1350 0 /* don't unmask intr */,
1351 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001352
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001353 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001354
1355 /* Buffer allocation failed. Stay in polling mode
1356 * so we can try to fill the ring again.
1357 */
1358
1359 if (err)
1360 work_done = work_to_do;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301361 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1362 /* Call the function which refreshes
1363 * the intr coalescing timer value based on
1364 * the traffic. This is supported only in
1365 * the case of MSI-x mode
1366 */
1367 enic_calc_int_moderation(enic, &enic->rq[rq]);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001368
1369 if (work_done < work_to_do) {
1370
1371 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001372 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001373 */
1374
Ben Hutchings288379f2009-01-19 16:43:59 -08001375 napi_complete(napi);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301376 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1377 enic_set_int_moderation(enic, &enic->rq[rq]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001378 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001379 }
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301380 enic_poll_unlock_napi(&enic->rq[rq]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001381
1382 return work_done;
1383}
1384
1385static void enic_notify_timer(unsigned long data)
1386{
1387 struct enic *enic = (struct enic *)data;
1388
1389 enic_notify_check(enic);
1390
Scott Feldman25f0a062008-09-24 11:23:32 -07001391 mod_timer(&enic->notify_timer,
1392 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001393}
1394
1395static void enic_free_intr(struct enic *enic)
1396{
1397 struct net_device *netdev = enic->netdev;
1398 unsigned int i;
1399
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301400 enic_free_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001401 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1402 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001403 free_irq(enic->pdev->irq, netdev);
1404 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001405 case VNIC_DEV_INTR_MODE_MSI:
1406 free_irq(enic->pdev->irq, enic);
1407 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001408 case VNIC_DEV_INTR_MODE_MSIX:
1409 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1410 if (enic->msix[i].requested)
1411 free_irq(enic->msix_entry[i].vector,
1412 enic->msix[i].devid);
1413 break;
1414 default:
1415 break;
1416 }
1417}
1418
1419static int enic_request_intr(struct enic *enic)
1420{
1421 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001422 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001423 int err = 0;
1424
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301425 enic_set_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001426 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1427
1428 case VNIC_DEV_INTR_MODE_INTX:
1429
1430 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1431 IRQF_SHARED, netdev->name, netdev);
1432 break;
1433
1434 case VNIC_DEV_INTR_MODE_MSI:
1435
1436 err = request_irq(enic->pdev->irq, enic_isr_msi,
1437 0, netdev->name, enic);
1438 break;
1439
1440 case VNIC_DEV_INTR_MODE_MSIX:
1441
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001442 for (i = 0; i < enic->rq_count; i++) {
1443 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001444 snprintf(enic->msix[intr].devname,
1445 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001446 "%.11s-rx-%d", netdev->name, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301447 enic->msix[intr].isr = enic_isr_msix;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001448 enic->msix[intr].devid = &enic->napi[i];
1449 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001450
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001451 for (i = 0; i < enic->wq_count; i++) {
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301452 int wq = enic_cq_wq(enic, i);
1453
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001454 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001455 snprintf(enic->msix[intr].devname,
1456 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001457 "%.11s-tx-%d", netdev->name, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301458 enic->msix[intr].isr = enic_isr_msix;
1459 enic->msix[intr].devid = &enic->napi[wq];
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001460 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001461
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001462 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001463 snprintf(enic->msix[intr].devname,
1464 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001465 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001466 enic->msix[intr].isr = enic_isr_msix_err;
1467 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001468
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001469 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001470 snprintf(enic->msix[intr].devname,
1471 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001472 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001473 enic->msix[intr].isr = enic_isr_msix_notify;
1474 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001475
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001476 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1477 enic->msix[i].requested = 0;
1478
1479 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001480 err = request_irq(enic->msix_entry[i].vector,
1481 enic->msix[i].isr, 0,
1482 enic->msix[i].devname,
1483 enic->msix[i].devid);
1484 if (err) {
1485 enic_free_intr(enic);
1486 break;
1487 }
1488 enic->msix[i].requested = 1;
1489 }
1490
1491 break;
1492
1493 default:
1494 break;
1495 }
1496
1497 return err;
1498}
1499
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001500static void enic_synchronize_irqs(struct enic *enic)
1501{
1502 unsigned int i;
1503
1504 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1505 case VNIC_DEV_INTR_MODE_INTX:
1506 case VNIC_DEV_INTR_MODE_MSI:
1507 synchronize_irq(enic->pdev->irq);
1508 break;
1509 case VNIC_DEV_INTR_MODE_MSIX:
1510 for (i = 0; i < enic->intr_count; i++)
1511 synchronize_irq(enic->msix_entry[i].vector);
1512 break;
1513 default:
1514 break;
1515 }
1516}
1517
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301518static void enic_set_rx_coal_setting(struct enic *enic)
1519{
1520 unsigned int speed;
1521 int index = -1;
1522 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1523
1524 /* If intr mode is not MSIX, do not do adaptive coalescing */
1525 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1526 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1527 return;
1528 }
1529
1530 /* 1. Read the link speed from fw
1531 * 2. Pick the default range for the speed
1532 * 3. Update it in enic->rx_coalesce_setting
1533 */
1534 speed = vnic_dev_port_speed(enic->vdev);
1535 if (ENIC_LINK_SPEED_10G < speed)
1536 index = ENIC_LINK_40G_INDEX;
1537 else if (ENIC_LINK_SPEED_4G < speed)
1538 index = ENIC_LINK_10G_INDEX;
1539 else
1540 index = ENIC_LINK_4G_INDEX;
1541
1542 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1543 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1544 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1545
1546 /* Start with the value provided by UCSM */
1547 for (index = 0; index < enic->rq_count; index++)
1548 enic->cq[index].cur_rx_coal_timeval =
1549 enic->config.intr_timer_usec;
1550
1551 rx_coal->use_adaptive_rx_coalesce = 1;
1552}
1553
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001554static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001555{
1556 int err;
1557
Tony Camuso8e091342014-06-23 16:08:03 +05301558 spin_lock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001559 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1560 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001561 err = vnic_dev_notify_set(enic->vdev,
1562 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001563 break;
1564 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001565 err = vnic_dev_notify_set(enic->vdev,
1566 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001567 break;
1568 default:
1569 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1570 break;
1571 }
Tony Camuso8e091342014-06-23 16:08:03 +05301572 spin_unlock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001573
1574 return err;
1575}
1576
1577static void enic_notify_timer_start(struct enic *enic)
1578{
1579 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1580 case VNIC_DEV_INTR_MODE_MSI:
1581 mod_timer(&enic->notify_timer, jiffies);
1582 break;
1583 default:
1584 /* Using intr for notification for INTx/MSI-X */
1585 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001586 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001587}
1588
1589/* rtnl lock is held, process context */
1590static int enic_open(struct net_device *netdev)
1591{
1592 struct enic *enic = netdev_priv(netdev);
1593 unsigned int i;
1594 int err;
1595
Scott Feldman4b75a442008-09-24 11:23:53 -07001596 err = enic_request_intr(enic);
1597 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001598 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001599 return err;
1600 }
1601
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001602 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001603 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001604 netdev_err(netdev,
1605 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001606 goto err_out_free_intr;
1607 }
1608
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001609 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001610 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001611 /* Need at least one buffer on ring to get going */
1612 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001613 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001614 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001615 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001616 }
1617 }
1618
1619 for (i = 0; i < enic->wq_count; i++)
1620 vnic_wq_enable(&enic->wq[i]);
1621 for (i = 0; i < enic->rq_count; i++)
1622 vnic_rq_enable(&enic->rq[i]);
1623
Roopa Prabhu73359032012-01-18 04:24:02 +00001624 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu29639052010-12-08 13:54:03 +00001625 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001626
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001627 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001628
govindarajulu.v822473b2013-09-04 11:17:14 +05301629 netif_tx_wake_all_queues(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001630
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301631 for (i = 0; i < enic->rq_count; i++) {
1632 enic_busy_poll_init_lock(&enic->rq[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001633 napi_enable(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301634 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301635 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1636 for (i = 0; i < enic->wq_count; i++)
1637 napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001638 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001639
1640 for (i = 0; i < enic->intr_count; i++)
1641 vnic_intr_unmask(&enic->intr[i]);
1642
1643 enic_notify_timer_start(enic);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301644 enic_rfs_flw_tbl_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001645
1646 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001647
1648err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001649 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001650err_out_free_intr:
1651 enic_free_intr(enic);
1652
1653 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001654}
1655
1656/* rtnl lock is held, process context */
1657static int enic_stop(struct net_device *netdev)
1658{
1659 struct enic *enic = netdev_priv(netdev);
1660 unsigned int i;
1661 int err;
1662
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001663 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001664 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001665 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1666 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001667
1668 enic_synchronize_irqs(enic);
1669
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001670 del_timer_sync(&enic->notify_timer);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301671 enic_rfs_flw_tbl_free(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001672
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001673 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001674
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301675 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001676 napi_disable(&enic->napi[i]);
Govindarajulu Varadarajan39dc90c2014-10-19 14:20:28 +05301677 local_bh_disable();
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301678 while (!enic_poll_lock_napi(&enic->rq[i]))
1679 mdelay(1);
Govindarajulu Varadarajan39dc90c2014-10-19 14:20:28 +05301680 local_bh_enable();
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301681 }
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001682
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001683 netif_carrier_off(netdev);
1684 netif_tx_disable(netdev);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301685 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1686 for (i = 0; i < enic->wq_count; i++)
1687 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001688
Roopa Prabhu73359032012-01-18 04:24:02 +00001689 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu29639052010-12-08 13:54:03 +00001690 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001691
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001692 for (i = 0; i < enic->wq_count; i++) {
1693 err = vnic_wq_disable(&enic->wq[i]);
1694 if (err)
1695 return err;
1696 }
1697 for (i = 0; i < enic->rq_count; i++) {
1698 err = vnic_rq_disable(&enic->rq[i]);
1699 if (err)
1700 return err;
1701 }
1702
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001703 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001704 enic_free_intr(enic);
1705
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001706 for (i = 0; i < enic->wq_count; i++)
1707 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1708 for (i = 0; i < enic->rq_count; i++)
1709 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1710 for (i = 0; i < enic->cq_count; i++)
1711 vnic_cq_clean(&enic->cq[i]);
1712 for (i = 0; i < enic->intr_count; i++)
1713 vnic_intr_clean(&enic->intr[i]);
1714
1715 return 0;
1716}
1717
1718static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1719{
1720 struct enic *enic = netdev_priv(netdev);
1721 int running = netif_running(netdev);
1722
Scott Feldman25f0a062008-09-24 11:23:32 -07001723 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1724 return -EINVAL;
1725
Roopa Prabhu73359032012-01-18 04:24:02 +00001726 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001727 return -EOPNOTSUPP;
1728
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001729 if (running)
1730 enic_stop(netdev);
1731
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001732 netdev->mtu = new_mtu;
1733
1734 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001735 netdev_warn(netdev,
1736 "interface MTU (%d) set higher than port MTU (%d)\n",
1737 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001738
1739 if (running)
1740 enic_open(netdev);
1741
1742 return 0;
1743}
1744
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001745static void enic_change_mtu_work(struct work_struct *work)
1746{
1747 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1748 struct net_device *netdev = enic->netdev;
1749 int new_mtu = vnic_dev_mtu(enic->vdev);
1750 int err;
1751 unsigned int i;
1752
1753 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1754
1755 rtnl_lock();
1756
1757 /* Stop RQ */
1758 del_timer_sync(&enic->notify_timer);
1759
1760 for (i = 0; i < enic->rq_count; i++)
1761 napi_disable(&enic->napi[i]);
1762
1763 vnic_intr_mask(&enic->intr[0]);
1764 enic_synchronize_irqs(enic);
1765 err = vnic_rq_disable(&enic->rq[0]);
1766 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001767 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001768 netdev_err(netdev, "Unable to disable RQ.\n");
1769 return;
1770 }
1771 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1772 vnic_cq_clean(&enic->cq[0]);
1773 vnic_intr_clean(&enic->intr[0]);
1774
1775 /* Fill RQ with new_mtu-sized buffers */
1776 netdev->mtu = new_mtu;
1777 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1778 /* Need at least one buffer on ring to get going */
1779 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001780 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001781 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1782 return;
1783 }
1784
1785 /* Start RQ */
1786 vnic_rq_enable(&enic->rq[0]);
1787 napi_enable(&enic->napi[0]);
1788 vnic_intr_unmask(&enic->intr[0]);
1789 enic_notify_timer_start(enic);
1790
1791 rtnl_unlock();
1792
1793 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1794}
1795
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001796#ifdef CONFIG_NET_POLL_CONTROLLER
1797static void enic_poll_controller(struct net_device *netdev)
1798{
1799 struct enic *enic = netdev_priv(netdev);
1800 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001801 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001802
1803 switch (vnic_dev_get_intr_mode(vdev)) {
1804 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001805 for (i = 0; i < enic->rq_count; i++) {
1806 intr = enic_msix_rq_intr(enic, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301807 enic_isr_msix(enic->msix_entry[intr].vector,
1808 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001809 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001810
1811 for (i = 0; i < enic->wq_count; i++) {
1812 intr = enic_msix_wq_intr(enic, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301813 enic_isr_msix(enic->msix_entry[intr].vector,
1814 &enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001815 }
1816
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001817 break;
1818 case VNIC_DEV_INTR_MODE_MSI:
1819 enic_isr_msi(enic->pdev->irq, enic);
1820 break;
1821 case VNIC_DEV_INTR_MODE_INTX:
1822 enic_isr_legacy(enic->pdev->irq, netdev);
1823 break;
1824 default:
1825 break;
1826 }
1827}
1828#endif
1829
1830static int enic_dev_wait(struct vnic_dev *vdev,
1831 int (*start)(struct vnic_dev *, int),
1832 int (*finished)(struct vnic_dev *, int *),
1833 int arg)
1834{
1835 unsigned long time;
1836 int done;
1837 int err;
1838
1839 BUG_ON(in_interrupt());
1840
1841 err = start(vdev, arg);
1842 if (err)
1843 return err;
1844
1845 /* Wait for func to complete...2 seconds max
1846 */
1847
1848 time = jiffies + (HZ * 2);
1849 do {
1850
1851 err = finished(vdev, &done);
1852 if (err)
1853 return err;
1854
1855 if (done)
1856 return 0;
1857
1858 schedule_timeout_uninterruptible(HZ / 10);
1859
1860 } while (time_after(time, jiffies));
1861
1862 return -ETIMEDOUT;
1863}
1864
1865static int enic_dev_open(struct enic *enic)
1866{
1867 int err;
1868
1869 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1870 vnic_dev_open_done, 0);
1871 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001872 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1873 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001874
1875 return err;
1876}
1877
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001878static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001879{
1880 int err;
1881
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001882 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1883 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001884 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001885 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1886 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001887
1888 return err;
1889}
1890
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301891int __enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001892{
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001893 union vnic_rss_key *rss_key_buf_va;
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001894 dma_addr_t rss_key_buf_pa;
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001895 int i, kidx, bidx, err;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001896
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001897 rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
1898 sizeof(union vnic_rss_key),
1899 &rss_key_buf_pa);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001900 if (!rss_key_buf_va)
1901 return -ENOMEM;
1902
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001903 for (i = 0; i < ENIC_RSS_LEN; i++) {
1904 kidx = i / ENIC_RSS_BYTES_PER_KEY;
1905 bidx = i % ENIC_RSS_BYTES_PER_KEY;
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301906 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001907 }
Tony Camuso8e091342014-06-23 16:08:03 +05301908 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001909 err = enic_set_rss_key(enic,
1910 rss_key_buf_pa,
1911 sizeof(union vnic_rss_key));
Tony Camuso8e091342014-06-23 16:08:03 +05301912 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001913
1914 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1915 rss_key_buf_va, rss_key_buf_pa);
1916
1917 return err;
1918}
1919
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301920static int enic_set_rsskey(struct enic *enic)
1921{
1922 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
1923
1924 return __enic_set_rsskey(enic);
1925}
1926
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001927static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1928{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001929 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001930 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1931 unsigned int i;
1932 int err;
1933
1934 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1935 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1936 if (!rss_cpu_buf_va)
1937 return -ENOMEM;
1938
1939 for (i = 0; i < (1 << rss_hash_bits); i++)
1940 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1941
Tony Camuso8e091342014-06-23 16:08:03 +05301942 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001943 err = enic_set_rss_cpu(enic,
1944 rss_cpu_buf_pa,
1945 sizeof(union vnic_rss_cpu));
Tony Camuso8e091342014-06-23 16:08:03 +05301946 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001947
1948 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1949 rss_cpu_buf_va, rss_cpu_buf_pa);
1950
1951 return err;
1952}
1953
1954static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1955 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1956{
Scott Feldman68f71702009-02-09 23:24:24 -08001957 const u8 tso_ipid_split_en = 0;
1958 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001959 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001960
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001961 /* Enable VLAN tag stripping.
1962 */
Scott Feldman68f71702009-02-09 23:24:24 -08001963
Tony Camuso8e091342014-06-23 16:08:03 +05301964 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001965 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001966 rss_default_cpu, rss_hash_type,
1967 rss_hash_bits, rss_base_cpu,
1968 rss_enable, tso_ipid_split_en,
1969 ig_vlan_strip_en);
Tony Camuso8e091342014-06-23 16:08:03 +05301970 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001971
1972 return err;
1973}
1974
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001975static int enic_set_rss_nic_cfg(struct enic *enic)
1976{
1977 struct device *dev = enic_get_dev(enic);
1978 const u8 rss_default_cpu = 0;
1979 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1980 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1981 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1982 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1983 const u8 rss_hash_bits = 7;
1984 const u8 rss_base_cpu = 0;
1985 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1986
1987 if (rss_enable) {
1988 if (!enic_set_rsskey(enic)) {
1989 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1990 rss_enable = 0;
1991 dev_warn(dev, "RSS disabled, "
1992 "Failed to set RSS cpu indirection table.");
1993 }
1994 } else {
1995 rss_enable = 0;
1996 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1997 }
1998 }
1999
2000 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2001 rss_hash_bits, rss_base_cpu, rss_enable);
2002}
2003
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002004static void enic_reset(struct work_struct *work)
2005{
2006 struct enic *enic = container_of(work, struct enic, reset);
2007
2008 if (!netif_running(enic->netdev))
2009 return;
2010
2011 rtnl_lock();
2012
Neel Patel0b038562013-08-16 15:47:40 -07002013 spin_lock(&enic->enic_api_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002014 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002015 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00002016 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00002017 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002018 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002019 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00002020 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002021 enic_open(enic->netdev);
Neel Patel0b038562013-08-16 15:47:40 -07002022 spin_unlock(&enic->enic_api_lock);
Neel Pateld765bb42013-08-16 15:47:41 -07002023 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002024
2025 rtnl_unlock();
2026}
2027
2028static int enic_set_intr_mode(struct enic *enic)
2029{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002030 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00002031 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002032 unsigned int i;
2033
2034 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002035 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002036 *
2037 * Try MSI-X first
2038 *
2039 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2040 * (the second to last INTR is used for WQ/RQ errors)
2041 * (the last INTR is used for notifications)
2042 */
2043
2044 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2045 for (i = 0; i < n + m + 2; i++)
2046 enic->msix_entry[i].entry = i;
2047
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002048 /* Use multiple RQs if RSS is enabled
2049 */
2050
2051 if (ENIC_SETTING(enic, RSS) &&
2052 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002053 enic->rq_count >= n &&
2054 enic->wq_count >= m &&
2055 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002056 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002057
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002058 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2059 n + m + 2, n + m + 2) > 0) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002060
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002061 enic->rq_count = n;
2062 enic->wq_count = m;
2063 enic->cq_count = n + m;
2064 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002065
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002066 vnic_dev_set_intr_mode(enic->vdev,
2067 VNIC_DEV_INTR_MODE_MSIX);
2068
2069 return 0;
2070 }
2071 }
2072
2073 if (enic->config.intr_mode < 1 &&
2074 enic->rq_count >= 1 &&
2075 enic->wq_count >= m &&
2076 enic->cq_count >= 1 + m &&
2077 enic->intr_count >= 1 + m + 2) {
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002078 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2079 1 + m + 2, 1 + m + 2) > 0) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002080
2081 enic->rq_count = 1;
2082 enic->wq_count = m;
2083 enic->cq_count = 1 + m;
2084 enic->intr_count = 1 + m + 2;
2085
2086 vnic_dev_set_intr_mode(enic->vdev,
2087 VNIC_DEV_INTR_MODE_MSIX);
2088
2089 return 0;
2090 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002091 }
2092
2093 /* Next try MSI
2094 *
2095 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2096 */
2097
2098 if (enic->config.intr_mode < 2 &&
2099 enic->rq_count >= 1 &&
2100 enic->wq_count >= 1 &&
2101 enic->cq_count >= 2 &&
2102 enic->intr_count >= 1 &&
2103 !pci_enable_msi(enic->pdev)) {
2104
2105 enic->rq_count = 1;
2106 enic->wq_count = 1;
2107 enic->cq_count = 2;
2108 enic->intr_count = 1;
2109
2110 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2111
2112 return 0;
2113 }
2114
2115 /* Next try INTx
2116 *
2117 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2118 * (the first INTR is used for WQ/RQ)
2119 * (the second INTR is used for WQ/RQ errors)
2120 * (the last INTR is used for notifications)
2121 */
2122
2123 if (enic->config.intr_mode < 3 &&
2124 enic->rq_count >= 1 &&
2125 enic->wq_count >= 1 &&
2126 enic->cq_count >= 2 &&
2127 enic->intr_count >= 3) {
2128
2129 enic->rq_count = 1;
2130 enic->wq_count = 1;
2131 enic->cq_count = 2;
2132 enic->intr_count = 3;
2133
2134 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2135
2136 return 0;
2137 }
2138
2139 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2140
2141 return -EINVAL;
2142}
2143
2144static void enic_clear_intr_mode(struct enic *enic)
2145{
2146 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2147 case VNIC_DEV_INTR_MODE_MSIX:
2148 pci_disable_msix(enic->pdev);
2149 break;
2150 case VNIC_DEV_INTR_MODE_MSI:
2151 pci_disable_msi(enic->pdev);
2152 break;
2153 default:
2154 break;
2155 }
2156
2157 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2158}
2159
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002160static const struct net_device_ops enic_netdev_dynamic_ops = {
2161 .ndo_open = enic_open,
2162 .ndo_stop = enic_stop,
2163 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002164 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002165 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002166 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002167 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2168 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002169 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2170 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2171 .ndo_tx_timeout = enic_tx_timeout,
2172 .ndo_set_vf_port = enic_set_vf_port,
2173 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00002174 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002175#ifdef CONFIG_NET_POLL_CONTROLLER
2176 .ndo_poll_controller = enic_poll_controller,
2177#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302178#ifdef CONFIG_RFS_ACCEL
2179 .ndo_rx_flow_steer = enic_rx_flow_steer,
2180#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302181#ifdef CONFIG_NET_RX_BUSY_POLL
2182 .ndo_busy_poll = enic_busy_poll,
2183#endif
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002184};
2185
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002186static const struct net_device_ops enic_netdev_ops = {
2187 .ndo_open = enic_open,
2188 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08002189 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002190 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002191 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002192 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002193 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002194 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002195 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2196 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2197 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00002198 .ndo_set_vf_port = enic_set_vf_port,
2199 .ndo_get_vf_port = enic_get_vf_port,
2200 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002201#ifdef CONFIG_NET_POLL_CONTROLLER
2202 .ndo_poll_controller = enic_poll_controller,
2203#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302204#ifdef CONFIG_RFS_ACCEL
2205 .ndo_rx_flow_steer = enic_rx_flow_steer,
2206#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302207#ifdef CONFIG_NET_RX_BUSY_POLL
2208 .ndo_busy_poll = enic_busy_poll,
2209#endif
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002210};
2211
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002212static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002213{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002214 unsigned int i;
2215
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302216 for (i = 0; i < enic->rq_count; i++) {
2217 napi_hash_del(&enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002218 netif_napi_del(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302219 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302220 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2221 for (i = 0; i < enic->wq_count; i++)
2222 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002223
Scott Feldman6fdfa972009-09-03 17:02:45 +00002224 enic_free_vnic_resources(enic);
2225 enic_clear_intr_mode(enic);
2226}
2227
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002228static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002229{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002230 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002231 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002232 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00002233 int err;
2234
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00002235 /* Get interrupt coalesce timer info */
2236 err = enic_dev_intr_coal_timer_info(enic);
2237 if (err) {
2238 dev_warn(dev, "Using default conversion factor for "
2239 "interrupt coalesce timer\n");
2240 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2241 }
2242
Scott Feldman6fdfa972009-09-03 17:02:45 +00002243 /* Get vNIC configuration
2244 */
2245
2246 err = enic_get_vnic_config(enic);
2247 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002248 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002249 return err;
2250 }
2251
2252 /* Get available resource counts
2253 */
2254
2255 enic_get_res_counts(enic);
2256
2257 /* Set interrupt mode based on resource counts and system
2258 * capabilities
2259 */
2260
2261 err = enic_set_intr_mode(enic);
2262 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002263 dev_err(dev, "Failed to set intr mode based on resource "
2264 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002265 return err;
2266 }
2267
2268 /* Allocate and configure vNIC resources
2269 */
2270
2271 err = enic_alloc_vnic_resources(enic);
2272 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002273 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002274 goto err_out_free_vnic_resources;
2275 }
2276
2277 enic_init_vnic_resources(enic);
2278
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002279 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002280 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002281 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002282 goto err_out_free_vnic_resources;
2283 }
2284
2285 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2286 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002287 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302288 napi_hash_add(&enic->napi[0]);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002289 break;
2290 case VNIC_DEV_INTR_MODE_MSIX:
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302291 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002292 netif_napi_add(netdev, &enic->napi[i],
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302293 enic_poll_msix_rq, NAPI_POLL_WEIGHT);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302294 napi_hash_add(&enic->napi[i]);
2295 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302296 for (i = 0; i < enic->wq_count; i++)
2297 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
2298 enic_poll_msix_wq, NAPI_POLL_WEIGHT);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002299 break;
2300 }
2301
2302 return 0;
2303
2304err_out_free_vnic_resources:
2305 enic_clear_intr_mode(enic);
2306 enic_free_vnic_resources(enic);
2307
2308 return err;
2309}
2310
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002311static void enic_iounmap(struct enic *enic)
2312{
2313 unsigned int i;
2314
2315 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2316 if (enic->bar[i].vaddr)
2317 iounmap(enic->bar[i].vaddr);
2318}
2319
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002320static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002321{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002322 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002323 struct net_device *netdev;
2324 struct enic *enic;
2325 int using_dac = 0;
2326 unsigned int i;
2327 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002328#ifdef CONFIG_PCI_IOV
2329 int pos = 0;
2330#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002331 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002332
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002333 /* Allocate net device structure and initialize. Private
2334 * instance data is initialized to zero.
2335 */
2336
govindarajulu.v822473b2013-09-04 11:17:14 +05302337 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2338 ENIC_RQ_MAX, ENIC_WQ_MAX);
Joe Perches41de8d42012-01-29 13:47:52 +00002339 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002340 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002341
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002342 pci_set_drvdata(pdev, netdev);
2343
2344 SET_NETDEV_DEV(netdev, &pdev->dev);
2345
2346 enic = netdev_priv(netdev);
2347 enic->netdev = netdev;
2348 enic->pdev = pdev;
2349
2350 /* Setup PCI resources
2351 */
2352
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002353 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002354 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002355 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002356 goto err_out_free_netdev;
2357 }
2358
2359 err = pci_request_regions(pdev, DRV_NAME);
2360 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002361 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002362 goto err_out_disable_device;
2363 }
2364
2365 pci_set_master(pdev);
2366
2367 /* Query PCI controller on system for DMA addressing
govindarajulu.v624dbf52013-09-04 11:17:16 +05302368 * limitation for the device. Try 64-bit first, and
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002369 * fail to 32-bit.
2370 */
2371
govindarajulu.v624dbf52013-09-04 11:17:16 +05302372 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002373 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002374 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002375 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002376 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002377 goto err_out_release_regions;
2378 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002379 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002380 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002381 dev_err(dev, "Unable to obtain %u-bit DMA "
2382 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002383 goto err_out_release_regions;
2384 }
2385 } else {
govindarajulu.v624dbf52013-09-04 11:17:16 +05302386 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002387 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002388 dev_err(dev, "Unable to obtain %u-bit DMA "
govindarajulu.v624dbf52013-09-04 11:17:16 +05302389 "for consistent allocations, aborting\n", 64);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002390 goto err_out_release_regions;
2391 }
2392 using_dac = 1;
2393 }
2394
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002395 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002396 */
2397
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002398 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2399 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2400 continue;
2401 enic->bar[i].len = pci_resource_len(pdev, i);
2402 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2403 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002404 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002405 err = -ENODEV;
2406 goto err_out_iounmap;
2407 }
2408 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002409 }
2410
2411 /* Register vNIC device
2412 */
2413
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002414 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2415 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002416 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002417 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002418 err = -ENODEV;
2419 goto err_out_iounmap;
2420 }
2421
Roopa Prabhu8749b422011-09-22 03:44:33 +00002422#ifdef CONFIG_PCI_IOV
2423 /* Get number of subvnics */
2424 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2425 if (pos) {
2426 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002427 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002428 if (enic->num_vfs) {
2429 err = pci_enable_sriov(pdev, enic->num_vfs);
2430 if (err) {
2431 dev_err(dev, "SRIOV enable failed, aborting."
2432 " pci_enable_sriov() returned %d\n",
2433 err);
2434 goto err_out_vnic_unregister;
2435 }
2436 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002437 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002438 }
2439 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002440#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002441
Roopa Prabhu3f192792011-09-22 03:44:43 +00002442 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002443 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002444 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002445 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002446 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002447 }
2448
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002449 /* Issue device open to get device in known state
2450 */
2451
2452 err = enic_dev_open(enic);
2453 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002454 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002455 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002456 }
2457
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002458 /* Setup devcmd lock
2459 */
2460
2461 spin_lock_init(&enic->devcmd_lock);
Neel Patel0b038562013-08-16 15:47:40 -07002462 spin_lock_init(&enic->enic_api_lock);
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002463
2464 /*
2465 * Set ingress vlan rewrite mode before vnic initialization
2466 */
2467
2468 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2469 if (err) {
2470 dev_err(dev,
2471 "Failed to set ingress vlan rewrite mode, aborting.\n");
2472 goto err_out_dev_close;
2473 }
2474
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002475 /* Issue device init to initialize the vnic-to-switch link.
2476 * We'll start with carrier off and wait for link UP
2477 * notification later to turn on carrier. We don't need
2478 * to wait here for the vnic-to-switch link initialization
2479 * to complete; link UP notification is the indication that
2480 * the process is complete.
2481 */
2482
2483 netif_carrier_off(netdev);
2484
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002485 /* Do not call dev_init for a dynamic vnic.
2486 * For a dynamic vnic, init_prov_info will be
2487 * called later by an upper layer.
2488 */
2489
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002490 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002491 err = vnic_dev_init(enic->vdev, 0);
2492 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002493 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002494 goto err_out_dev_close;
2495 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002496 }
2497
Scott Feldman6fdfa972009-09-03 17:02:45 +00002498 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002499 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002500 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002501 goto err_out_dev_close;
2502 }
2503
govindarajulu.v822473b2013-09-04 11:17:14 +05302504 netif_set_real_num_tx_queues(netdev, enic->wq_count);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302505 netif_set_real_num_rx_queues(netdev, enic->rq_count);
govindarajulu.v822473b2013-09-04 11:17:14 +05302506
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002507 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002508 */
2509
2510 init_timer(&enic->notify_timer);
2511 enic->notify_timer.function = enic_notify_timer;
2512 enic->notify_timer.data = (unsigned long)enic;
2513
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302514 enic_set_rx_coal_setting(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002515 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002516 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002517
2518 for (i = 0; i < enic->wq_count; i++)
2519 spin_lock_init(&enic->wq_lock[i]);
2520
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002521 /* Register net device
2522 */
2523
2524 enic->port_mtu = enic->config.mtu;
2525 (void)enic_change_mtu(netdev, enic->port_mtu);
2526
2527 err = enic_set_mac_addr(netdev, enic->mac_addr);
2528 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002529 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002530 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002531 }
2532
Scott Feldman7c844592009-12-23 13:27:54 +00002533 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302534 /* rx coalesce time already got initialized. This gets used
2535 * if adaptive coal is turned off
2536 */
Scott Feldman7c844592009-12-23 13:27:54 +00002537 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2538
Roopa Prabhu73359032012-01-18 04:24:02 +00002539 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002540 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2541 else
2542 netdev->netdev_ops = &enic_netdev_ops;
2543
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002544 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002545 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002546
Patrick McHardyf6469682013-04-19 02:04:27 +00002547 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002548 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002549 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002550 enic->loop_enable = 1;
2551 enic->loop_tag = enic->config.loop_tag;
2552 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2553 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002554 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002555 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002556 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002557 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002558 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302559 if (ENIC_SETTING(enic, RSS))
2560 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002561 if (ENIC_SETTING(enic, RXCSUM))
2562 netdev->hw_features |= NETIF_F_RXCSUM;
2563
2564 netdev->features |= netdev->hw_features;
2565
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302566#ifdef CONFIG_RFS_ACCEL
2567 netdev->hw_features |= NETIF_F_NTUPLE;
2568#endif
2569
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002570 if (using_dac)
2571 netdev->features |= NETIF_F_HIGHDMA;
2572
Jiri Pirko01789342011-08-16 06:29:00 +00002573 netdev->priv_flags |= IFF_UNICAST_FLT;
2574
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002575 err = register_netdev(netdev);
2576 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002577 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002578 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002579 }
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +05302580 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002581
2582 return 0;
2583
Scott Feldman6fdfa972009-09-03 17:02:45 +00002584err_out_dev_deinit:
2585 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002586err_out_dev_close:
2587 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002588err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002589 kfree(enic->pp);
2590err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002591#ifdef CONFIG_PCI_IOV
2592 if (enic_sriov_enabled(enic)) {
2593 pci_disable_sriov(pdev);
2594 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2595 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002596err_out_vnic_unregister:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002597#endif
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002598 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002599err_out_iounmap:
2600 enic_iounmap(enic);
2601err_out_release_regions:
2602 pci_release_regions(pdev);
2603err_out_disable_device:
2604 pci_disable_device(pdev);
2605err_out_free_netdev:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002606 free_netdev(netdev);
2607
2608 return err;
2609}
2610
Bill Pemberton854de922012-12-03 09:23:05 -05002611static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002612{
2613 struct net_device *netdev = pci_get_drvdata(pdev);
2614
2615 if (netdev) {
2616 struct enic *enic = netdev_priv(netdev);
2617
Tejun Heo23f333a2010-12-12 16:45:14 +01002618 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002619 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002620 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002621 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002622 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002623#ifdef CONFIG_PCI_IOV
2624 if (enic_sriov_enabled(enic)) {
2625 pci_disable_sriov(pdev);
2626 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2627 }
2628#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002629 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002630 vnic_dev_unregister(enic->vdev);
2631 enic_iounmap(enic);
2632 pci_release_regions(pdev);
2633 pci_disable_device(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002634 free_netdev(netdev);
2635 }
2636}
2637
2638static struct pci_driver enic_driver = {
2639 .name = DRV_NAME,
2640 .id_table = enic_id_table,
2641 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002642 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002643};
2644
2645static int __init enic_init_module(void)
2646{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002647 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002648
2649 return pci_register_driver(&enic_driver);
2650}
2651
2652static void __exit enic_cleanup_module(void)
2653{
2654 pci_unregister_driver(&enic_driver);
2655}
2656
2657module_init(enic_init_module);
2658module_exit(enic_cleanup_module);