blob: 0b544298650719562758964112c5091d68bef260 [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053041#include <linux/ktime.h>
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +053042#ifdef CONFIG_RFS_ACCEL
43#include <linux/cpu_rmap.h>
44#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +053045#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h>
47#endif
Govindarajulu Varadarajan3f255dc2015-01-03 19:35:44 +053048#include <linux/crash_dump.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070049
50#include "cq_enet_desc.h"
51#include "vnic_dev.h"
52#include "vnic_intr.h"
53#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070054#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070055#include "enic_res.h"
56#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000057#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000058#include "enic_pp.h"
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +053059#include "enic_clsf.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070060
61#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000062#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
63#define MAX_TSO (1 << 16)
64#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
65
66#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070067#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000068#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070069
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +053070#define RX_COPYBREAK_DEFAULT 256
71
Scott Feldman01f2e4e2008-09-15 09:17:11 -070072/* Supported devices */
Benoit Taine9baa3c32014-08-08 15:56:03 +020073static const struct pci_device_id enic_id_table[] = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000074 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070075 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000076 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070077 { 0, } /* end of table */
78};
79
80MODULE_DESCRIPTION(DRV_DESCRIPTION);
81MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_VERSION);
84MODULE_DEVICE_TABLE(pci, enic_id_table);
85
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053086#define ENIC_LARGE_PKT_THRESHOLD 1000
87#define ENIC_MAX_COALESCE_TIMERS 10
88/* Interrupt moderation table, which will be used to decide the
89 * coalescing timer values
90 * {rx_rate in Mbps, mapping percentage of the range}
91 */
Lad, Prabhakar57ae84a02015-02-05 15:34:13 +000092static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053093 {4000, 0},
94 {4400, 10},
95 {5060, 20},
96 {5230, 30},
97 {5540, 40},
98 {5820, 50},
99 {6120, 60},
100 {6435, 70},
101 {6745, 80},
102 {7000, 90},
103 {0xFFFFFFFF, 100}
104};
105
106/* This table helps the driver to pick different ranges for rx coalescing
107 * timer depending on the link speed.
108 */
Lad, Prabhakar57ae84a02015-02-05 15:34:13 +0000109static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530110 {0, 0}, /* 0 - 4 Gbps */
111 {0, 3}, /* 4 - 10 Gbps */
112 {3, 6}, /* 10 - 40 Gbps */
113};
114
Roopa Prabhu3f192792011-09-22 03:44:43 +0000115int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700116{
117 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
118}
119
Roopa Prabhu8749b422011-09-22 03:44:33 +0000120int enic_sriov_enabled(struct enic *enic)
121{
122 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
123}
124
Roopa Prabhu3a4adef2012-01-18 04:23:55 +0000125static int enic_is_sriov_vf(struct enic *enic)
126{
127 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
128}
129
Roopa Prabhu889d13f2011-09-22 03:44:38 +0000130int enic_is_valid_vf(struct enic *enic, int vf)
131{
132#ifdef CONFIG_PCI_IOV
133 return vf >= 0 && vf < enic->num_vfs;
134#else
135 return 0;
136#endif
137}
138
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700139static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
140{
141 struct enic *enic = vnic_dev_priv(wq->vdev);
142
143 if (buf->sop)
144 pci_unmap_single(enic->pdev, buf->dma_addr,
145 buf->len, PCI_DMA_TODEVICE);
146 else
147 pci_unmap_page(enic->pdev, buf->dma_addr,
148 buf->len, PCI_DMA_TODEVICE);
149
150 if (buf->os_buf)
151 dev_kfree_skb_any(buf->os_buf);
152}
153
154static void enic_wq_free_buf(struct vnic_wq *wq,
155 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
156{
157 enic_free_wq_buf(wq, buf);
158}
159
160static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
161 u8 type, u16 q_number, u16 completed_index, void *opaque)
162{
163 struct enic *enic = vnic_dev_priv(vdev);
164
165 spin_lock(&enic->wq_lock[q_number]);
166
167 vnic_wq_service(&enic->wq[q_number], cq_desc,
168 completed_index, enic_wq_free_buf,
169 opaque);
170
govindarajulu.v822473b2013-09-04 11:17:14 +0530171 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000172 vnic_wq_desc_avail(&enic->wq[q_number]) >=
173 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
govindarajulu.v822473b2013-09-04 11:17:14 +0530174 netif_wake_subqueue(enic->netdev, q_number);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700175
176 spin_unlock(&enic->wq_lock[q_number]);
177
178 return 0;
179}
180
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530181static bool enic_log_q_error(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700182{
183 unsigned int i;
184 u32 error_status;
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530185 bool err = false;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700186
187 for (i = 0; i < enic->wq_count; i++) {
188 error_status = vnic_wq_error_status(&enic->wq[i]);
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530189 err |= error_status;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700190 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000191 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
192 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700193 }
194
195 for (i = 0; i < enic->rq_count; i++) {
196 error_status = vnic_rq_error_status(&enic->rq[i]);
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530197 err |= error_status;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700198 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000199 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
200 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700201 }
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530202
203 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700204}
205
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000206static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700207{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000208 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700209
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000210 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000211 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
212 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000213 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700214 }
215}
216
217static void enic_mtu_check(struct enic *enic)
218{
219 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000220 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700221
Scott Feldman491598a2009-09-03 17:02:40 +0000222 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000223 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000224 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000225 mtu = max_t(int, ENIC_MIN_MTU,
226 min_t(int, ENIC_MAX_MTU, mtu));
227 if (mtu != netdev->mtu)
228 schedule_work(&enic->change_mtu_work);
229 } else {
230 if (mtu < netdev->mtu)
231 netdev_warn(netdev,
232 "interface MTU (%d) set higher "
233 "than switch port MTU (%d)\n",
234 netdev->mtu, mtu);
235 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700236 }
237}
238
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000239static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700240{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000241 int link_status = vnic_dev_link_status(enic->vdev);
242 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700243
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000244 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000245 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000246 netif_carrier_on(enic->netdev);
247 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000248 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000249 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700250 }
251}
252
253static void enic_notify_check(struct enic *enic)
254{
255 enic_msglvl_check(enic);
256 enic_mtu_check(enic);
257 enic_link_check(enic);
258}
259
260#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
261
262static irqreturn_t enic_isr_legacy(int irq, void *data)
263{
264 struct net_device *netdev = data;
265 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000266 unsigned int io_intr = enic_legacy_io_intr();
267 unsigned int err_intr = enic_legacy_err_intr();
268 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700269 u32 pba;
270
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000271 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700272
273 pba = vnic_intr_legacy_pba(enic->legacy_pba);
274 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000275 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700276 return IRQ_NONE; /* not our interrupt */
277 }
278
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000279 if (ENIC_TEST_INTR(pba, notify_intr)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700280 enic_notify_check(enic);
Sujith Sankar2b0c2e22015-02-25 15:26:55 +0530281 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800282 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700283
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000284 if (ENIC_TEST_INTR(pba, err_intr)) {
285 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700286 enic_log_q_error(enic);
287 /* schedule recovery from WQ/RQ error */
288 schedule_work(&enic->reset);
289 return IRQ_HANDLED;
290 }
291
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530292 if (ENIC_TEST_INTR(pba, io_intr))
293 napi_schedule_irqoff(&enic->napi[0]);
294 else
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000295 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700296
297 return IRQ_HANDLED;
298}
299
300static irqreturn_t enic_isr_msi(int irq, void *data)
301{
302 struct enic *enic = data;
303
304 /* With MSI, there is no sharing of interrupts, so this is
305 * our interrupt and there is no need to ack it. The device
306 * is not providing per-vector masking, so the OS will not
307 * write to PCI config space to mask/unmask the interrupt.
308 * We're using mask_on_assertion for MSI, so the device
309 * automatically masks the interrupt when the interrupt is
310 * generated. Later, when exiting polling, the interrupt
311 * will be unmasked (see enic_poll).
312 *
313 * Also, the device uses the same PCIe Traffic Class (TC)
314 * for Memory Write data and MSI, so there are no ordering
315 * issues; the MSI will always arrive at the Root Complex
316 * _after_ corresponding Memory Writes (i.e. descriptor
317 * writes).
318 */
319
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530320 napi_schedule_irqoff(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700321
322 return IRQ_HANDLED;
323}
324
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +0530325static irqreturn_t enic_isr_msix(int irq, void *data)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700326{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000327 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700328
Govindarajulu Varadarajandb40b3f2014-11-23 01:22:51 +0530329 napi_schedule_irqoff(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700330
331 return IRQ_HANDLED;
332}
333
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700334static irqreturn_t enic_isr_msix_err(int irq, void *data)
335{
336 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000337 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700338
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000339 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800340
Govindarajulu Varadarajancc809232015-10-01 14:18:46 +0530341 if (enic_log_q_error(enic))
342 /* schedule recovery from WQ/RQ error */
343 schedule_work(&enic->reset);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700344
345 return IRQ_HANDLED;
346}
347
348static irqreturn_t enic_isr_msix_notify(int irq, void *data)
349{
350 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000351 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700352
353 enic_notify_check(enic);
Sujith Sankar2b0c2e22015-02-25 15:26:55 +0530354 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700355
356 return IRQ_HANDLED;
357}
358
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530359static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
360 struct sk_buff *skb, unsigned int len_left,
361 int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700362{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000363 const skb_frag_t *frag;
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530364 dma_addr_t dma_addr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700365
366 /* Queue additional data fragments */
367 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000368 len_left -= skb_frag_size(frag);
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530369 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
370 skb_frag_size(frag),
371 DMA_TO_DEVICE);
372 if (unlikely(enic_dma_map_check(enic, dma_addr)))
373 return -ENOMEM;
374 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
375 (len_left == 0), /* EOP? */
376 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700377 }
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530378
379 return 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700380}
381
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530382static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
383 struct sk_buff *skb, int vlan_tag_insert,
384 unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700385{
386 unsigned int head_len = skb_headlen(skb);
387 unsigned int len_left = skb->len - head_len;
388 int eop = (len_left == 0);
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530389 dma_addr_t dma_addr;
390 int err = 0;
391
392 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
393 PCI_DMA_TODEVICE);
394 if (unlikely(enic_dma_map_check(enic, dma_addr)))
395 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700396
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000397 /* Queue the main skb fragment. The fragments are no larger
398 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
399 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
400 * per fragment is queued.
401 */
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530402 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
403 vlan_tag, eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700404
405 if (!eop)
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530406 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
407
408 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700409}
410
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530411static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
412 struct sk_buff *skb, int vlan_tag_insert,
413 unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700414{
415 unsigned int head_len = skb_headlen(skb);
416 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000417 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700418 unsigned int csum_offset = hdr_len + skb->csum_offset;
419 int eop = (len_left == 0);
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530420 dma_addr_t dma_addr;
421 int err = 0;
422
423 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
424 PCI_DMA_TODEVICE);
425 if (unlikely(enic_dma_map_check(enic, dma_addr)))
426 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700427
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000428 /* Queue the main skb fragment. The fragments are no larger
429 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
430 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
431 * per fragment is queued.
432 */
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530433 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
434 hdr_len, vlan_tag_insert, vlan_tag, eop,
435 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700436
437 if (!eop)
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530438 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
439
440 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700441}
442
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530443static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
444 struct sk_buff *skb, unsigned int mss,
445 int vlan_tag_insert, unsigned int vlan_tag,
446 int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700447{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000448 unsigned int frag_len_left = skb_headlen(skb);
449 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700450 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
451 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000452 unsigned int len;
453 dma_addr_t dma_addr;
454 unsigned int offset = 0;
455 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700456
457 /* Preload TCP csum field with IP pseudo hdr calculated
458 * with IP length set to zero. HW will later add in length
459 * to each TCP segment resulting from the TSO.
460 */
461
Harvey Harrison09640e62009-02-01 00:45:17 -0800462 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700463 ip_hdr(skb)->check = 0;
464 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
465 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800466 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700467 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
468 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
469 }
470
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000471 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
472 * for the main skb fragment
473 */
474 while (frag_len_left) {
475 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530476 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
477 PCI_DMA_TODEVICE);
478 if (unlikely(enic_dma_map_check(enic, dma_addr)))
479 return -ENOMEM;
480 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
481 vlan_tag_insert, vlan_tag,
482 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000483 frag_len_left -= len;
484 offset += len;
485 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700486
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000487 if (eop)
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530488 return 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000489
490 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
491 * for additional data fragments
492 */
493 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000494 len_left -= skb_frag_size(frag);
495 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000496 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000497
498 while (frag_len_left) {
499 len = min(frag_len_left,
500 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000501 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
502 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100503 DMA_TO_DEVICE);
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530504 if (unlikely(enic_dma_map_check(enic, dma_addr)))
505 return -ENOMEM;
506 enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
507 (len_left == 0) &&
508 (len == frag_len_left),/*EOP*/
509 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000510 frag_len_left -= len;
511 offset += len;
512 }
513 }
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530514
515 return 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700516}
517
518static inline void enic_queue_wq_skb(struct enic *enic,
519 struct vnic_wq *wq, struct sk_buff *skb)
520{
521 unsigned int mss = skb_shinfo(skb)->gso_size;
522 unsigned int vlan_tag = 0;
523 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000524 int loopback = 0;
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530525 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700526
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100527 if (skb_vlan_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700528 /* VLAN tag from trunking driver */
529 vlan_tag_insert = 1;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100530 vlan_tag = skb_vlan_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000531 } else if (enic->loop_enable) {
532 vlan_tag = enic->loop_tag;
533 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700534 }
535
536 if (mss)
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530537 err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
538 vlan_tag_insert, vlan_tag,
539 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700540 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530541 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
542 vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700543 else
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530544 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
545 vlan_tag, loopback);
546 if (unlikely(err)) {
547 struct vnic_wq_buf *buf;
548
549 buf = wq->to_use->prev;
550 /* while not EOP of previous pkt && queue not empty.
551 * For all non EOP bufs, os_buf is NULL.
552 */
553 while (!buf->os_buf && (buf->next != wq->to_clean)) {
554 enic_free_wq_buf(wq, buf);
555 wq->ring.desc_avail++;
556 buf = buf->prev;
557 }
558 wq->to_use = buf->next;
559 dev_kfree_skb(skb);
560 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700561}
562
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800563/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000564static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000565 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700566{
567 struct enic *enic = netdev_priv(netdev);
govindarajulu.v822473b2013-09-04 11:17:14 +0530568 struct vnic_wq *wq;
govindarajulu.v822473b2013-09-04 11:17:14 +0530569 unsigned int txq_map;
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530570 struct netdev_queue *txq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700571
572 if (skb->len <= 0) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700573 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700574 return NETDEV_TX_OK;
575 }
576
govindarajulu.v822473b2013-09-04 11:17:14 +0530577 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
578 wq = &enic->wq[txq_map];
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530579 txq = netdev_get_tx_queue(netdev, txq_map);
govindarajulu.v822473b2013-09-04 11:17:14 +0530580
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700581 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
582 * which is very likely. In the off chance it's going to take
583 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
584 */
585
586 if (skb_shinfo(skb)->gso_size == 0 &&
587 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
588 skb_linearize(skb)) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700589 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700590 return NETDEV_TX_OK;
591 }
592
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530593 spin_lock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700594
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000595 if (vnic_wq_desc_avail(wq) <
596 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530597 netif_tx_stop_queue(txq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700598 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000599 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530600 spin_unlock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700601 return NETDEV_TX_BUSY;
602 }
603
604 enic_queue_wq_skb(enic, wq, skb);
605
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000606 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
Govindarajulu Varadarajanf8e34d22014-11-19 12:59:32 +0530607 netif_tx_stop_queue(txq);
608 if (!skb->xmit_more || netif_xmit_stopped(txq))
609 vnic_wq_doorbell(wq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700610
Govindarajulu Varadarajan78e20452014-11-23 01:22:52 +0530611 spin_unlock(&enic->wq_lock[txq_map]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700612
613 return NETDEV_TX_OK;
614}
615
616/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000617static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
618 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700619{
620 struct enic *enic = netdev_priv(netdev);
621 struct vnic_stats *stats;
Govindarajulu Varadarajan19b596b2015-06-11 11:52:55 +0530622 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700623
Govindarajulu Varadarajan19b596b2015-06-11 11:52:55 +0530624 err = enic_dev_stats_dump(enic, &stats);
625 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
626 * For other failures, like devcmd failure, we return previously
627 * recorded stats.
628 */
629 if (err == -ENOMEM)
630 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700631
Scott Feldman25f0a062008-09-24 11:23:32 -0700632 net_stats->tx_packets = stats->tx.tx_frames_ok;
633 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
634 net_stats->tx_errors = stats->tx.tx_errors;
635 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700636
Scott Feldman25f0a062008-09-24 11:23:32 -0700637 net_stats->rx_packets = stats->rx.rx_frames_ok;
638 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
639 net_stats->rx_errors = stats->rx.rx_errors;
640 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000641 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800642 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000643 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700644
Scott Feldman25f0a062008-09-24 11:23:32 -0700645 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700646}
647
Alexander Duyckf0096182014-05-28 18:44:52 -0700648static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
649{
650 struct enic *enic = netdev_priv(netdev);
651
652 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
653 unsigned int mc_count = netdev_mc_count(netdev);
654
655 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
656 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
657
658 return -ENOSPC;
659 }
660
661 enic_dev_add_addr(enic, mc_addr);
662 enic->mc_count++;
663
664 return 0;
665}
666
667static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
668{
669 struct enic *enic = netdev_priv(netdev);
670
671 enic_dev_del_addr(enic, mc_addr);
672 enic->mc_count--;
673
674 return 0;
675}
676
677static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
678{
679 struct enic *enic = netdev_priv(netdev);
680
681 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
682 unsigned int uc_count = netdev_uc_count(netdev);
683
684 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
685 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
686
687 return -ENOSPC;
688 }
689
690 enic_dev_add_addr(enic, uc_addr);
691 enic->uc_count++;
692
693 return 0;
694}
695
696static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
697{
698 struct enic *enic = netdev_priv(netdev);
699
700 enic_dev_del_addr(enic, uc_addr);
701 enic->uc_count--;
702
703 return 0;
704}
705
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000706void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700707{
Alexander Duyckf0096182014-05-28 18:44:52 -0700708 struct net_device *netdev = enic->netdev;
709
710 __dev_uc_unsync(netdev, NULL);
711 __dev_mc_unsync(netdev, NULL);
712
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700713 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000714 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000715 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700716}
717
718static int enic_set_mac_addr(struct net_device *netdev, char *addr)
719{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700720 struct enic *enic = netdev_priv(netdev);
721
Roopa Prabhu73359032012-01-18 04:24:02 +0000722 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700723 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
724 return -EADDRNOTAVAIL;
725 } else {
726 if (!is_valid_ether_addr(addr))
727 return -EADDRNOTAVAIL;
728 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700729
730 memcpy(netdev->dev_addr, addr, netdev->addr_len);
731
732 return 0;
733}
734
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700735static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
736{
737 struct enic *enic = netdev_priv(netdev);
738 struct sockaddr *saddr = p;
739 char *addr = saddr->sa_data;
740 int err;
741
742 if (netif_running(enic->netdev)) {
743 err = enic_dev_del_station_addr(enic);
744 if (err)
745 return err;
746 }
747
748 err = enic_set_mac_addr(netdev, addr);
749 if (err)
750 return err;
751
752 if (netif_running(enic->netdev)) {
753 err = enic_dev_add_station_addr(enic);
754 if (err)
755 return err;
756 }
757
758 return err;
759}
760
761static int enic_set_mac_address(struct net_device *netdev, void *p)
762{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000763 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000764 char *addr = saddr->sa_data;
765 struct enic *enic = netdev_priv(netdev);
766 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000767
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000768 err = enic_dev_del_station_addr(enic);
769 if (err)
770 return err;
771
772 err = enic_set_mac_addr(netdev, addr);
773 if (err)
774 return err;
775
776 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700777}
778
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000779/* netif_tx_lock held, BHs disabled */
780static void enic_set_rx_mode(struct net_device *netdev)
781{
782 struct enic *enic = netdev_priv(netdev);
783 int directed = 1;
784 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
785 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
786 int promisc = (netdev->flags & IFF_PROMISC) ||
787 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
788 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
789 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
790 unsigned int flags = netdev->flags |
791 (allmulti ? IFF_ALLMULTI : 0) |
792 (promisc ? IFF_PROMISC : 0);
793
794 if (enic->flags != flags) {
795 enic->flags = flags;
796 enic_dev_packet_filter(enic, directed,
797 multicast, broadcast, promisc, allmulti);
798 }
799
800 if (!promisc) {
Alexander Duyckf0096182014-05-28 18:44:52 -0700801 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000802 if (!allmulti)
Alexander Duyckf0096182014-05-28 18:44:52 -0700803 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000804 }
805}
806
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700807/* netif_tx_lock held, BHs disabled */
808static void enic_tx_timeout(struct net_device *netdev)
809{
810 struct enic *enic = netdev_priv(netdev);
811 schedule_work(&enic->reset);
812}
813
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000814static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
815{
816 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000817 struct enic_port_profile *pp;
818 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000819
Roopa Prabhu3f192792011-09-22 03:44:43 +0000820 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
821 if (err)
822 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000823
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000824 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000825 if (vf == PORT_SELF_VF) {
826 memcpy(pp->vf_mac, mac, ETH_ALEN);
827 return 0;
828 } else {
829 /*
830 * For sriov vf's set the mac in hw
831 */
832 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
833 vnic_dev_set_mac_addr, mac);
834 return enic_dev_status_to_errno(err);
835 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000836 } else
837 return -EINVAL;
838}
839
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700840static int enic_set_vf_port(struct net_device *netdev, int vf,
841 struct nlattr *port[])
842{
843 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000844 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000845 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000846 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700847
Roopa Prabhu3f192792011-09-22 03:44:43 +0000848 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
849 if (err)
850 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700851
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000852 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000853 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700854
Roopa Prabhu3f192792011-09-22 03:44:43 +0000855 memcpy(&prev_pp, pp, sizeof(*enic->pp));
856 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000857
Roopa Prabhu3f192792011-09-22 03:44:43 +0000858 pp->set |= ENIC_SET_REQUEST;
859 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000860
861 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000862 pp->set |= ENIC_SET_NAME;
863 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000864 PORT_PROFILE_MAX);
865 }
866
867 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000868 pp->set |= ENIC_SET_INSTANCE;
869 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000870 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
871 }
872
873 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000874 pp->set |= ENIC_SET_HOST;
875 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000876 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
877 }
878
Roopa Prabhub4765832012-02-20 00:11:58 +0000879 if (vf == PORT_SELF_VF) {
880 /* Special case handling: mac came from IFLA_VF_MAC */
881 if (!is_zero_ether_addr(prev_pp.vf_mac))
882 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000883
Roopa Prabhub4765832012-02-20 00:11:58 +0000884 if (is_zero_ether_addr(netdev->dev_addr))
885 eth_hw_addr_random(netdev);
886 } else {
887 /* SR-IOV VF: get mac from adapter */
888 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
889 vnic_dev_get_mac_addr, pp->mac_addr);
890 if (err) {
891 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
892 memcpy(pp, &prev_pp, sizeof(*pp));
893 return enic_dev_status_to_errno(err);
894 }
895 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000896
Roopa Prabhu3f192792011-09-22 03:44:43 +0000897 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000898 if (err) {
899 if (restore_pp) {
900 /* Things are still the way they were: Implicit
901 * DISASSOCIATE failed
902 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000903 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000904 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000905 memset(pp, 0, sizeof(*pp));
906 if (vf == PORT_SELF_VF)
Joe Perchesc7bf7162015-03-02 19:54:47 -0800907 eth_zero_addr(netdev->dev_addr);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000908 }
909 } else {
910 /* Set flag to indicate that the port assoc/disassoc
911 * request has been sent out to fw
912 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000913 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000914
915 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000916 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
Joe Perchesc7bf7162015-03-02 19:54:47 -0800917 eth_zero_addr(pp->mac_addr);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000918 if (vf == PORT_SELF_VF)
Joe Perchesc7bf7162015-03-02 19:54:47 -0800919 eth_zero_addr(netdev->dev_addr);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000920 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700921 }
922
Roopa Prabhub4765832012-02-20 00:11:58 +0000923 if (vf == PORT_SELF_VF)
Joe Perchesc7bf7162015-03-02 19:54:47 -0800924 eth_zero_addr(pp->vf_mac);
Roopa Prabhu296390592010-12-08 13:54:03 +0000925
Roopa Prabhu296390592010-12-08 13:54:03 +0000926 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700927}
928
929static int enic_get_vf_port(struct net_device *netdev, int vf,
930 struct sk_buff *skb)
931{
932 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700933 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000934 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000935 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700936
Roopa Prabhu3f192792011-09-22 03:44:43 +0000937 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700938 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000939 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700940
Roopa Prabhu3f192792011-09-22 03:44:43 +0000941 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
942 return -ENODATA;
943
944 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
945 if (err)
946 return err;
947
David S. Miller1a106de2012-04-01 20:22:22 -0400948 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
949 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
950 ((pp->set & ENIC_SET_NAME) &&
951 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
952 ((pp->set & ENIC_SET_INSTANCE) &&
953 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
954 pp->instance_uuid)) ||
955 ((pp->set & ENIC_SET_HOST) &&
956 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
957 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700958 return 0;
959
960nla_put_failure:
961 return -EMSGSIZE;
962}
963
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700964static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
965{
966 struct enic *enic = vnic_dev_priv(rq->vdev);
967
968 if (!buf->os_buf)
969 return;
970
971 pci_unmap_single(enic->pdev, buf->dma_addr,
972 buf->len, PCI_DMA_FROMDEVICE);
973 dev_kfree_skb_any(buf->os_buf);
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530974 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700975}
976
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700977static int enic_rq_alloc_buf(struct vnic_rq *rq)
978{
979 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000980 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700981 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000982 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700983 unsigned int os_buf_index = 0;
984 dma_addr_t dma_addr;
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530985 struct vnic_rq_buf *buf = rq->to_use;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700986
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530987 if (buf->os_buf) {
Govindarajulu Varadarajanf6b77342014-11-06 15:21:39 +0530988 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
989 buf->len);
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +0530990
991 return 0;
992 }
Eric Dumazet89d71a62009-10-13 05:34:20 +0000993 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700994 if (!skb)
995 return -ENOMEM;
996
Govindarajulu Varadarajan065df152014-12-24 15:59:37 +0530997 dma_addr = pci_map_single(enic->pdev, skb->data, len,
998 PCI_DMA_FROMDEVICE);
999 if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1000 dev_kfree_skb(skb);
1001 return -ENOMEM;
1002 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001003
1004 enic_queue_rq_desc(rq, skb, os_buf_index,
1005 dma_addr, len);
1006
1007 return 0;
1008}
1009
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301010static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1011 u32 pkt_len)
1012{
1013 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1014 pkt_size->large_pkt_bytes_cnt += pkt_len;
1015 else
1016 pkt_size->small_pkt_bytes_cnt += pkt_len;
1017}
1018
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +05301019static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1020 struct vnic_rq_buf *buf, u16 len)
1021{
1022 struct enic *enic = netdev_priv(netdev);
1023 struct sk_buff *new_skb;
1024
1025 if (len > enic->rx_copybreak)
1026 return false;
1027 new_skb = netdev_alloc_skb_ip_align(netdev, len);
1028 if (!new_skb)
1029 return false;
1030 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
1031 DMA_FROM_DEVICE);
1032 memcpy(new_skb->data, (*skb)->data, len);
1033 *skb = new_skb;
1034
1035 return true;
1036}
1037
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001038static void enic_rq_indicate_buf(struct vnic_rq *rq,
1039 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1040 int skipped, void *opaque)
1041{
1042 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001043 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001044 struct sk_buff *skb;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301045 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001046
1047 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1048 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1049 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1050 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1051 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001052 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001053 u32 rss_hash;
1054
1055 if (skipped)
1056 return;
1057
1058 skb = buf->os_buf;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001059
1060 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1061 &type, &color, &q_number, &completed_index,
1062 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1063 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001064 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001065 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1066 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1067 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1068 &fcs_ok);
1069
1070 if (packet_error) {
1071
Scott Feldman350991e2009-09-03 17:02:19 +00001072 if (!fcs_ok) {
1073 if (bytes_written > 0)
1074 enic->rq_bad_fcs++;
1075 else if (bytes_written == 0)
1076 enic->rq_truncated_pkts++;
1077 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001078
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301079 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1080 PCI_DMA_FROMDEVICE);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001081 dev_kfree_skb_any(skb);
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301082 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001083
1084 return;
1085 }
1086
1087 if (eop && bytes_written > 0) {
1088
1089 /* Good receive
1090 */
1091
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +05301092 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1093 buf->os_buf = NULL;
1094 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1095 PCI_DMA_FROMDEVICE);
1096 }
1097 prefetch(skb->data - NET_IP_ALIGN);
1098
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001099 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001100 skb->protocol = eth_type_trans(skb, netdev);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301101 skb_record_rx_queue(skb, q_number);
1102 if (netdev->features & NETIF_F_RXHASH) {
Tom Herbert3739acd2013-12-17 23:23:42 -08001103 skb_set_hash(skb, rss_hash,
1104 (rss_type &
1105 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1106 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1107 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1108 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301109 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001110
Govindarajulu Varadarajan17e96832014-12-18 15:58:42 +05301111 /* Hardware does not provide whole packet checksum. It only
1112 * provides pseudo checksum. Since hw validates the packet
1113 * checksum but not provide us the checksum value. use
1114 * CHECSUM_UNNECESSARY.
1115 */
1116 if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
1117 ipv4_csum_ok)
1118 skb->ip_summed = CHECKSUM_UNNECESSARY;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001119
Jiri Pirko6ede7462011-07-20 04:54:18 +00001120 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001121 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001122
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301123 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1124 if (enic_poll_busy_polling(rq) ||
1125 !(netdev->features & NETIF_F_GRO))
Jiri Pirko6ede7462011-07-20 04:54:18 +00001126 netif_receive_skb(skb);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301127 else
1128 napi_gro_receive(&enic->napi[q_number], skb);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301129 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1130 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1131 bytes_written);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001132 } else {
1133
1134 /* Buffer overflow
1135 */
1136
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301137 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1138 PCI_DMA_FROMDEVICE);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001139 dev_kfree_skb_any(skb);
Govindarajulu Varadarajan44aa91a2014-11-06 15:21:38 +05301140 buf->os_buf = NULL;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001141 }
1142}
1143
1144static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1145 u8 type, u16 q_number, u16 completed_index, void *opaque)
1146{
1147 struct enic *enic = vnic_dev_priv(vdev);
1148
1149 vnic_rq_service(&enic->rq[q_number], cq_desc,
1150 completed_index, VNIC_RQ_RETURN_DESC,
1151 enic_rq_indicate_buf, opaque);
1152
1153 return 0;
1154}
1155
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301156static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1157{
1158 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1159 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1160 u32 timer = cq->tobe_rx_coal_timeval;
1161
1162 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1163 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1164 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1165 }
1166}
1167
1168static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1169{
1170 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1171 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1172 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1173 int index;
1174 u32 timer;
1175 u32 range_start;
1176 u32 traffic;
1177 u64 delta;
1178 ktime_t now = ktime_get();
1179
1180 delta = ktime_us_delta(now, cq->prev_ts);
1181 if (delta < ENIC_AIC_TS_BREAK)
1182 return;
1183 cq->prev_ts = now;
1184
1185 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1186 pkt_size_counter->small_pkt_bytes_cnt;
1187 /* The table takes Mbps
1188 * traffic *= 8 => bits
1189 * traffic *= (10^6 / delta) => bps
1190 * traffic /= 10^6 => Mbps
1191 *
1192 * Combining, traffic *= (8 / delta)
1193 */
1194
1195 traffic <<= 3;
Govindarajulu Varadarajan958c4922014-05-26 15:52:43 +05301196 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301197
1198 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1199 if (traffic < mod_table[index].rx_rate)
1200 break;
1201 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1202 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1203 rx_coal->small_pkt_range_start :
1204 rx_coal->large_pkt_range_start;
1205 timer = range_start + ((rx_coal->range_end - range_start) *
1206 mod_table[index].range_percent / 100);
1207 /* Damping */
1208 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1209
1210 pkt_size_counter->large_pkt_bytes_cnt = 0;
1211 pkt_size_counter->small_pkt_bytes_cnt = 0;
1212}
1213
Govindarajulu Varadarajanfc865d62015-07-15 15:34:39 +05301214static int enic_poll(struct napi_struct *napi, int budget)
1215{
1216 struct net_device *netdev = napi->dev;
1217 struct enic *enic = netdev_priv(netdev);
1218 unsigned int cq_rq = enic_cq_rq(enic, 0);
1219 unsigned int cq_wq = enic_cq_wq(enic, 0);
1220 unsigned int intr = enic_legacy_io_intr();
1221 unsigned int rq_work_to_do = budget;
1222 unsigned int wq_work_to_do = -1; /* no limit */
1223 unsigned int work_done, rq_work_done = 0, wq_work_done;
1224 int err;
1225
1226 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1227 enic_wq_service, NULL);
1228
1229 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1230 if (wq_work_done > 0)
1231 vnic_intr_return_credits(&enic->intr[intr],
1232 wq_work_done,
1233 0 /* dont unmask intr */,
1234 0 /* dont reset intr timer */);
1235 return budget;
1236 }
1237
1238 if (budget > 0)
1239 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1240 rq_work_to_do, enic_rq_service, NULL);
1241
1242 /* Accumulate intr event credits for this polling
1243 * cycle. An intr event is the completion of a
1244 * a WQ or RQ packet.
1245 */
1246
1247 work_done = rq_work_done + wq_work_done;
1248
1249 if (work_done > 0)
1250 vnic_intr_return_credits(&enic->intr[intr],
1251 work_done,
1252 0 /* don't unmask intr */,
1253 0 /* don't reset intr timer */);
1254
1255 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1256 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1257
1258 /* Buffer allocation failed. Stay in polling
1259 * mode so we can try to fill the ring again.
1260 */
1261
1262 if (err)
1263 rq_work_done = rq_work_to_do;
1264 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1265 /* Call the function which refreshes the intr coalescing timer
1266 * value based on the traffic.
1267 */
1268 enic_calc_int_moderation(enic, &enic->rq[0]);
1269
1270 if (rq_work_done < rq_work_to_do) {
1271
1272 /* Some work done, but not enough to stay in polling,
1273 * exit polling
1274 */
1275
1276 napi_complete(napi);
1277 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1278 enic_set_int_moderation(enic, &enic->rq[0]);
1279 vnic_intr_unmask(&enic->intr[intr]);
1280 }
1281
1282 return rq_work_done;
1283}
1284
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301285#ifdef CONFIG_RFS_ACCEL
1286static void enic_free_rx_cpu_rmap(struct enic *enic)
1287{
1288 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1289 enic->netdev->rx_cpu_rmap = NULL;
1290}
1291
1292static void enic_set_rx_cpu_rmap(struct enic *enic)
1293{
1294 int i, res;
1295
1296 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1297 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1298 if (unlikely(!enic->netdev->rx_cpu_rmap))
1299 return;
1300 for (i = 0; i < enic->rq_count; i++) {
1301 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1302 enic->msix_entry[i].vector);
1303 if (unlikely(res)) {
1304 enic_free_rx_cpu_rmap(enic);
1305 return;
1306 }
1307 }
1308 }
1309}
1310
1311#else
1312
1313static void enic_free_rx_cpu_rmap(struct enic *enic)
1314{
1315}
1316
1317static void enic_set_rx_cpu_rmap(struct enic *enic)
1318{
1319}
1320
1321#endif /* CONFIG_RFS_ACCEL */
1322
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301323#ifdef CONFIG_NET_RX_BUSY_POLL
Lad, Prabhakar57ae84a02015-02-05 15:34:13 +00001324static int enic_busy_poll(struct napi_struct *napi)
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301325{
1326 struct net_device *netdev = napi->dev;
1327 struct enic *enic = netdev_priv(netdev);
1328 unsigned int rq = (napi - &enic->napi[0]);
1329 unsigned int cq = enic_cq_rq(enic, rq);
1330 unsigned int intr = enic_msix_rq_intr(enic, rq);
1331 unsigned int work_to_do = -1; /* clean all pkts possible */
1332 unsigned int work_done;
1333
1334 if (!enic_poll_lock_poll(&enic->rq[rq]))
1335 return LL_FLUSH_BUSY;
1336 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1337 enic_rq_service, NULL);
1338
1339 if (work_done > 0)
1340 vnic_intr_return_credits(&enic->intr[intr],
1341 work_done, 0, 0);
1342 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1343 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1344 enic_calc_int_moderation(enic, &enic->rq[rq]);
1345 enic_poll_unlock_poll(&enic->rq[rq]);
1346
1347 return work_done;
1348}
1349#endif /* CONFIG_NET_RX_BUSY_POLL */
1350
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301351static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1352{
1353 struct net_device *netdev = napi->dev;
1354 struct enic *enic = netdev_priv(netdev);
1355 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1356 struct vnic_wq *wq = &enic->wq[wq_index];
1357 unsigned int cq;
1358 unsigned int intr;
1359 unsigned int wq_work_to_do = -1; /* clean all desc possible */
1360 unsigned int wq_work_done;
1361 unsigned int wq_irq;
1362
1363 wq_irq = wq->index;
1364 cq = enic_cq_wq(enic, wq_irq);
1365 intr = enic_msix_wq_intr(enic, wq_irq);
1366 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1367 enic_wq_service, NULL);
1368
1369 vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1370 0 /* don't unmask intr */,
1371 1 /* reset intr timer */);
1372 if (!wq_work_done) {
1373 napi_complete(napi);
1374 vnic_intr_unmask(&enic->intr[intr]);
Govindarajulu Varadarajanf41281d2014-11-13 04:12:06 +05301375 return 0;
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301376 }
1377
Govindarajulu Varadarajanf41281d2014-11-13 04:12:06 +05301378 return budget;
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301379}
1380
1381static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001382{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001383 struct net_device *netdev = napi->dev;
1384 struct enic *enic = netdev_priv(netdev);
1385 unsigned int rq = (napi - &enic->napi[0]);
1386 unsigned int cq = enic_cq_rq(enic, rq);
1387 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001388 unsigned int work_to_do = budget;
Eric W. Biederman4c502542014-03-14 18:02:08 -07001389 unsigned int work_done = 0;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001390 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001391
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301392 if (!enic_poll_lock_napi(&enic->rq[rq]))
Govindarajulu Varadarajanf104fed2015-01-20 18:46:15 +05301393 return budget;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001394 /* Service RQ
1395 */
1396
Eric W. Biederman4c502542014-03-14 18:02:08 -07001397 if (budget > 0)
1398 work_done = vnic_cq_service(&enic->cq[cq],
1399 work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001400
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001401 /* Return intr event credits for this polling
1402 * cycle. An intr event is the completion of a
1403 * RQ packet.
1404 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001405
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001406 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001407 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001408 work_done,
1409 0 /* don't unmask intr */,
1410 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001411
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001412 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001413
1414 /* Buffer allocation failed. Stay in polling mode
1415 * so we can try to fill the ring again.
1416 */
1417
1418 if (err)
1419 work_done = work_to_do;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301420 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
Govindarajulu Varadarajanfc865d62015-07-15 15:34:39 +05301421 /* Call the function which refreshes the intr coalescing timer
1422 * value based on the traffic.
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301423 */
1424 enic_calc_int_moderation(enic, &enic->rq[rq]);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001425
Govindarajulu Varadarajanf586a332015-06-25 16:02:04 +05301426 enic_poll_unlock_napi(&enic->rq[rq], napi);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001427 if (work_done < work_to_do) {
1428
1429 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001430 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001431 */
1432
Ben Hutchings288379f2009-01-19 16:43:59 -08001433 napi_complete(napi);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301434 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1435 enic_set_int_moderation(enic, &enic->rq[rq]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001436 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001437 }
1438
1439 return work_done;
1440}
1441
1442static void enic_notify_timer(unsigned long data)
1443{
1444 struct enic *enic = (struct enic *)data;
1445
1446 enic_notify_check(enic);
1447
Scott Feldman25f0a062008-09-24 11:23:32 -07001448 mod_timer(&enic->notify_timer,
1449 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001450}
1451
1452static void enic_free_intr(struct enic *enic)
1453{
1454 struct net_device *netdev = enic->netdev;
1455 unsigned int i;
1456
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301457 enic_free_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001458 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1459 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001460 free_irq(enic->pdev->irq, netdev);
1461 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001462 case VNIC_DEV_INTR_MODE_MSI:
1463 free_irq(enic->pdev->irq, enic);
1464 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001465 case VNIC_DEV_INTR_MODE_MSIX:
1466 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1467 if (enic->msix[i].requested)
1468 free_irq(enic->msix_entry[i].vector,
1469 enic->msix[i].devid);
1470 break;
1471 default:
1472 break;
1473 }
1474}
1475
1476static int enic_request_intr(struct enic *enic)
1477{
1478 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001479 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001480 int err = 0;
1481
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301482 enic_set_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001483 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1484
1485 case VNIC_DEV_INTR_MODE_INTX:
1486
1487 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1488 IRQF_SHARED, netdev->name, netdev);
1489 break;
1490
1491 case VNIC_DEV_INTR_MODE_MSI:
1492
1493 err = request_irq(enic->pdev->irq, enic_isr_msi,
1494 0, netdev->name, enic);
1495 break;
1496
1497 case VNIC_DEV_INTR_MODE_MSIX:
1498
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001499 for (i = 0; i < enic->rq_count; i++) {
1500 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001501 snprintf(enic->msix[intr].devname,
1502 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001503 "%.11s-rx-%d", netdev->name, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301504 enic->msix[intr].isr = enic_isr_msix;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001505 enic->msix[intr].devid = &enic->napi[i];
1506 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001507
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001508 for (i = 0; i < enic->wq_count; i++) {
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301509 int wq = enic_cq_wq(enic, i);
1510
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001511 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001512 snprintf(enic->msix[intr].devname,
1513 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001514 "%.11s-tx-%d", netdev->name, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301515 enic->msix[intr].isr = enic_isr_msix;
1516 enic->msix[intr].devid = &enic->napi[wq];
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001517 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001518
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001519 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001520 snprintf(enic->msix[intr].devname,
1521 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001522 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001523 enic->msix[intr].isr = enic_isr_msix_err;
1524 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001525
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001526 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001527 snprintf(enic->msix[intr].devname,
1528 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001529 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001530 enic->msix[intr].isr = enic_isr_msix_notify;
1531 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001532
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001533 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1534 enic->msix[i].requested = 0;
1535
1536 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001537 err = request_irq(enic->msix_entry[i].vector,
1538 enic->msix[i].isr, 0,
1539 enic->msix[i].devname,
1540 enic->msix[i].devid);
1541 if (err) {
1542 enic_free_intr(enic);
1543 break;
1544 }
1545 enic->msix[i].requested = 1;
1546 }
1547
1548 break;
1549
1550 default:
1551 break;
1552 }
1553
1554 return err;
1555}
1556
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001557static void enic_synchronize_irqs(struct enic *enic)
1558{
1559 unsigned int i;
1560
1561 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1562 case VNIC_DEV_INTR_MODE_INTX:
1563 case VNIC_DEV_INTR_MODE_MSI:
1564 synchronize_irq(enic->pdev->irq);
1565 break;
1566 case VNIC_DEV_INTR_MODE_MSIX:
1567 for (i = 0; i < enic->intr_count; i++)
1568 synchronize_irq(enic->msix_entry[i].vector);
1569 break;
1570 default:
1571 break;
1572 }
1573}
1574
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301575static void enic_set_rx_coal_setting(struct enic *enic)
1576{
1577 unsigned int speed;
1578 int index = -1;
1579 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1580
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301581 /* 1. Read the link speed from fw
1582 * 2. Pick the default range for the speed
1583 * 3. Update it in enic->rx_coalesce_setting
1584 */
1585 speed = vnic_dev_port_speed(enic->vdev);
1586 if (ENIC_LINK_SPEED_10G < speed)
1587 index = ENIC_LINK_40G_INDEX;
1588 else if (ENIC_LINK_SPEED_4G < speed)
1589 index = ENIC_LINK_10G_INDEX;
1590 else
1591 index = ENIC_LINK_4G_INDEX;
1592
1593 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1594 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1595 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1596
1597 /* Start with the value provided by UCSM */
1598 for (index = 0; index < enic->rq_count; index++)
1599 enic->cq[index].cur_rx_coal_timeval =
1600 enic->config.intr_timer_usec;
1601
1602 rx_coal->use_adaptive_rx_coalesce = 1;
1603}
1604
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001605static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001606{
1607 int err;
1608
Tony Camuso8e091342014-06-23 16:08:03 +05301609 spin_lock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001610 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1611 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001612 err = vnic_dev_notify_set(enic->vdev,
1613 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001614 break;
1615 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001616 err = vnic_dev_notify_set(enic->vdev,
1617 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001618 break;
1619 default:
1620 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1621 break;
1622 }
Tony Camuso8e091342014-06-23 16:08:03 +05301623 spin_unlock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001624
1625 return err;
1626}
1627
1628static void enic_notify_timer_start(struct enic *enic)
1629{
1630 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1631 case VNIC_DEV_INTR_MODE_MSI:
1632 mod_timer(&enic->notify_timer, jiffies);
1633 break;
1634 default:
1635 /* Using intr for notification for INTx/MSI-X */
1636 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001637 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001638}
1639
1640/* rtnl lock is held, process context */
1641static int enic_open(struct net_device *netdev)
1642{
1643 struct enic *enic = netdev_priv(netdev);
1644 unsigned int i;
1645 int err;
1646
Scott Feldman4b75a442008-09-24 11:23:53 -07001647 err = enic_request_intr(enic);
1648 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001649 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001650 return err;
1651 }
1652
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001653 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001654 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001655 netdev_err(netdev,
1656 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001657 goto err_out_free_intr;
1658 }
1659
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001660 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001661 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001662 /* Need at least one buffer on ring to get going */
1663 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001664 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001665 err = -ENOMEM;
Govindarajulu Varadarajan9dac6232015-01-02 20:53:27 +05301666 goto err_out_free_rq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001667 }
1668 }
1669
1670 for (i = 0; i < enic->wq_count; i++)
1671 vnic_wq_enable(&enic->wq[i]);
1672 for (i = 0; i < enic->rq_count; i++)
1673 vnic_rq_enable(&enic->rq[i]);
1674
Roopa Prabhu73359032012-01-18 04:24:02 +00001675 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001676 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001677
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001678 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001679
govindarajulu.v822473b2013-09-04 11:17:14 +05301680 netif_tx_wake_all_queues(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001681
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301682 for (i = 0; i < enic->rq_count; i++) {
1683 enic_busy_poll_init_lock(&enic->rq[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001684 napi_enable(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301685 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301686 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1687 for (i = 0; i < enic->wq_count; i++)
1688 napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001689 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001690
1691 for (i = 0; i < enic->intr_count; i++)
1692 vnic_intr_unmask(&enic->intr[i]);
1693
1694 enic_notify_timer_start(enic);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301695 enic_rfs_flw_tbl_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001696
1697 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001698
Govindarajulu Varadarajan9dac6232015-01-02 20:53:27 +05301699err_out_free_rq:
1700 for (i = 0; i < enic->rq_count; i++)
1701 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001702 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001703err_out_free_intr:
1704 enic_free_intr(enic);
1705
1706 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001707}
1708
1709/* rtnl lock is held, process context */
1710static int enic_stop(struct net_device *netdev)
1711{
1712 struct enic *enic = netdev_priv(netdev);
1713 unsigned int i;
1714 int err;
1715
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001716 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001717 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001718 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1719 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001720
1721 enic_synchronize_irqs(enic);
1722
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001723 del_timer_sync(&enic->notify_timer);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301724 enic_rfs_flw_tbl_free(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001725
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001726 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001727
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301728 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001729 napi_disable(&enic->napi[i]);
Govindarajulu Varadarajan39dc90c2014-10-19 14:20:28 +05301730 local_bh_disable();
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301731 while (!enic_poll_lock_napi(&enic->rq[i]))
1732 mdelay(1);
Govindarajulu Varadarajan39dc90c2014-10-19 14:20:28 +05301733 local_bh_enable();
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05301734 }
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001735
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001736 netif_carrier_off(netdev);
1737 netif_tx_disable(netdev);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301738 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1739 for (i = 0; i < enic->wq_count; i++)
1740 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001741
Roopa Prabhu73359032012-01-18 04:24:02 +00001742 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001743 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001744
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001745 for (i = 0; i < enic->wq_count; i++) {
1746 err = vnic_wq_disable(&enic->wq[i]);
1747 if (err)
1748 return err;
1749 }
1750 for (i = 0; i < enic->rq_count; i++) {
1751 err = vnic_rq_disable(&enic->rq[i]);
1752 if (err)
1753 return err;
1754 }
1755
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001756 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001757 enic_free_intr(enic);
1758
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001759 for (i = 0; i < enic->wq_count; i++)
1760 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1761 for (i = 0; i < enic->rq_count; i++)
1762 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1763 for (i = 0; i < enic->cq_count; i++)
1764 vnic_cq_clean(&enic->cq[i]);
1765 for (i = 0; i < enic->intr_count; i++)
1766 vnic_intr_clean(&enic->intr[i]);
1767
1768 return 0;
1769}
1770
1771static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1772{
1773 struct enic *enic = netdev_priv(netdev);
1774 int running = netif_running(netdev);
1775
Scott Feldman25f0a062008-09-24 11:23:32 -07001776 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1777 return -EINVAL;
1778
Roopa Prabhu73359032012-01-18 04:24:02 +00001779 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001780 return -EOPNOTSUPP;
1781
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001782 if (running)
1783 enic_stop(netdev);
1784
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001785 netdev->mtu = new_mtu;
1786
1787 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001788 netdev_warn(netdev,
1789 "interface MTU (%d) set higher than port MTU (%d)\n",
1790 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001791
1792 if (running)
1793 enic_open(netdev);
1794
1795 return 0;
1796}
1797
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001798static void enic_change_mtu_work(struct work_struct *work)
1799{
1800 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1801 struct net_device *netdev = enic->netdev;
1802 int new_mtu = vnic_dev_mtu(enic->vdev);
1803 int err;
1804 unsigned int i;
1805
1806 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1807
1808 rtnl_lock();
1809
1810 /* Stop RQ */
1811 del_timer_sync(&enic->notify_timer);
1812
1813 for (i = 0; i < enic->rq_count; i++)
1814 napi_disable(&enic->napi[i]);
1815
1816 vnic_intr_mask(&enic->intr[0]);
1817 enic_synchronize_irqs(enic);
1818 err = vnic_rq_disable(&enic->rq[0]);
1819 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001820 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001821 netdev_err(netdev, "Unable to disable RQ.\n");
1822 return;
1823 }
1824 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1825 vnic_cq_clean(&enic->cq[0]);
1826 vnic_intr_clean(&enic->intr[0]);
1827
1828 /* Fill RQ with new_mtu-sized buffers */
1829 netdev->mtu = new_mtu;
1830 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1831 /* Need at least one buffer on ring to get going */
1832 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001833 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001834 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1835 return;
1836 }
1837
1838 /* Start RQ */
1839 vnic_rq_enable(&enic->rq[0]);
1840 napi_enable(&enic->napi[0]);
1841 vnic_intr_unmask(&enic->intr[0]);
1842 enic_notify_timer_start(enic);
1843
1844 rtnl_unlock();
1845
1846 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1847}
1848
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001849#ifdef CONFIG_NET_POLL_CONTROLLER
1850static void enic_poll_controller(struct net_device *netdev)
1851{
1852 struct enic *enic = netdev_priv(netdev);
1853 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001854 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001855
1856 switch (vnic_dev_get_intr_mode(vdev)) {
1857 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001858 for (i = 0; i < enic->rq_count; i++) {
1859 intr = enic_msix_rq_intr(enic, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301860 enic_isr_msix(enic->msix_entry[intr].vector,
1861 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001862 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001863
1864 for (i = 0; i < enic->wq_count; i++) {
1865 intr = enic_msix_wq_intr(enic, i);
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05301866 enic_isr_msix(enic->msix_entry[intr].vector,
1867 &enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001868 }
1869
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001870 break;
1871 case VNIC_DEV_INTR_MODE_MSI:
1872 enic_isr_msi(enic->pdev->irq, enic);
1873 break;
1874 case VNIC_DEV_INTR_MODE_INTX:
1875 enic_isr_legacy(enic->pdev->irq, netdev);
1876 break;
1877 default:
1878 break;
1879 }
1880}
1881#endif
1882
1883static int enic_dev_wait(struct vnic_dev *vdev,
1884 int (*start)(struct vnic_dev *, int),
1885 int (*finished)(struct vnic_dev *, int *),
1886 int arg)
1887{
1888 unsigned long time;
1889 int done;
1890 int err;
1891
1892 BUG_ON(in_interrupt());
1893
1894 err = start(vdev, arg);
1895 if (err)
1896 return err;
1897
1898 /* Wait for func to complete...2 seconds max
1899 */
1900
1901 time = jiffies + (HZ * 2);
1902 do {
1903
1904 err = finished(vdev, &done);
1905 if (err)
1906 return err;
1907
1908 if (done)
1909 return 0;
1910
1911 schedule_timeout_uninterruptible(HZ / 10);
1912
1913 } while (time_after(time, jiffies));
1914
1915 return -ETIMEDOUT;
1916}
1917
1918static int enic_dev_open(struct enic *enic)
1919{
1920 int err;
1921
1922 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1923 vnic_dev_open_done, 0);
1924 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001925 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1926 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001927
1928 return err;
1929}
1930
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001931static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001932{
1933 int err;
1934
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001935 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1936 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001937 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001938 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1939 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001940
1941 return err;
1942}
1943
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301944int __enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001945{
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001946 union vnic_rss_key *rss_key_buf_va;
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001947 dma_addr_t rss_key_buf_pa;
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001948 int i, kidx, bidx, err;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001949
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001950 rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
1951 sizeof(union vnic_rss_key),
1952 &rss_key_buf_pa);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001953 if (!rss_key_buf_va)
1954 return -ENOMEM;
1955
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001956 for (i = 0; i < ENIC_RSS_LEN; i++) {
1957 kidx = i / ENIC_RSS_BYTES_PER_KEY;
1958 bidx = i % ENIC_RSS_BYTES_PER_KEY;
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301959 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
Eric Dumazetc33d23c2014-11-23 12:27:41 -08001960 }
Tony Camuso8e091342014-06-23 16:08:03 +05301961 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001962 err = enic_set_rss_key(enic,
1963 rss_key_buf_pa,
1964 sizeof(union vnic_rss_key));
Tony Camuso8e091342014-06-23 16:08:03 +05301965 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001966
1967 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1968 rss_key_buf_va, rss_key_buf_pa);
1969
1970 return err;
1971}
1972
Govindarajulu Varadarajan4f675eb2014-12-10 13:40:23 +05301973static int enic_set_rsskey(struct enic *enic)
1974{
1975 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
1976
1977 return __enic_set_rsskey(enic);
1978}
1979
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001980static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1981{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001982 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001983 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1984 unsigned int i;
1985 int err;
1986
1987 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1988 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1989 if (!rss_cpu_buf_va)
1990 return -ENOMEM;
1991
1992 for (i = 0; i < (1 << rss_hash_bits); i++)
1993 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1994
Tony Camuso8e091342014-06-23 16:08:03 +05301995 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001996 err = enic_set_rss_cpu(enic,
1997 rss_cpu_buf_pa,
1998 sizeof(union vnic_rss_cpu));
Tony Camuso8e091342014-06-23 16:08:03 +05301999 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002000
2001 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2002 rss_cpu_buf_va, rss_cpu_buf_pa);
2003
2004 return err;
2005}
2006
2007static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2008 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2009{
Scott Feldman68f71702009-02-09 23:24:24 -08002010 const u8 tso_ipid_split_en = 0;
2011 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002012 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08002013
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002014 /* Enable VLAN tag stripping.
2015 */
Scott Feldman68f71702009-02-09 23:24:24 -08002016
Tony Camuso8e091342014-06-23 16:08:03 +05302017 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002018 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08002019 rss_default_cpu, rss_hash_type,
2020 rss_hash_bits, rss_base_cpu,
2021 rss_enable, tso_ipid_split_en,
2022 ig_vlan_strip_en);
Tony Camuso8e091342014-06-23 16:08:03 +05302023 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002024
2025 return err;
2026}
2027
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002028static int enic_set_rss_nic_cfg(struct enic *enic)
2029{
2030 struct device *dev = enic_get_dev(enic);
2031 const u8 rss_default_cpu = 0;
2032 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2033 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2034 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2035 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2036 const u8 rss_hash_bits = 7;
2037 const u8 rss_base_cpu = 0;
2038 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2039
2040 if (rss_enable) {
2041 if (!enic_set_rsskey(enic)) {
2042 if (enic_set_rsscpu(enic, rss_hash_bits)) {
2043 rss_enable = 0;
2044 dev_warn(dev, "RSS disabled, "
2045 "Failed to set RSS cpu indirection table.");
2046 }
2047 } else {
2048 rss_enable = 0;
2049 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2050 }
2051 }
2052
2053 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2054 rss_hash_bits, rss_base_cpu, rss_enable);
2055}
2056
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002057static void enic_reset(struct work_struct *work)
2058{
2059 struct enic *enic = container_of(work, struct enic, reset);
2060
2061 if (!netif_running(enic->netdev))
2062 return;
2063
2064 rtnl_lock();
2065
Neel Patel0b038562013-08-16 15:47:40 -07002066 spin_lock(&enic->enic_api_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002067 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002068 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00002069 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00002070 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002071 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002072 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00002073 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002074 enic_open(enic->netdev);
Neel Patel0b038562013-08-16 15:47:40 -07002075 spin_unlock(&enic->enic_api_lock);
Neel Pateld765bb42013-08-16 15:47:41 -07002076 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002077
2078 rtnl_unlock();
2079}
2080
2081static int enic_set_intr_mode(struct enic *enic)
2082{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002083 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00002084 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002085 unsigned int i;
2086
2087 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002088 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002089 *
2090 * Try MSI-X first
2091 *
2092 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2093 * (the second to last INTR is used for WQ/RQ errors)
2094 * (the last INTR is used for notifications)
2095 */
2096
2097 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2098 for (i = 0; i < n + m + 2; i++)
2099 enic->msix_entry[i].entry = i;
2100
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002101 /* Use multiple RQs if RSS is enabled
2102 */
2103
2104 if (ENIC_SETTING(enic, RSS) &&
2105 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002106 enic->rq_count >= n &&
2107 enic->wq_count >= m &&
2108 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002109 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002110
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002111 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2112 n + m + 2, n + m + 2) > 0) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002113
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002114 enic->rq_count = n;
2115 enic->wq_count = m;
2116 enic->cq_count = n + m;
2117 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002118
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002119 vnic_dev_set_intr_mode(enic->vdev,
2120 VNIC_DEV_INTR_MODE_MSIX);
2121
2122 return 0;
2123 }
2124 }
2125
2126 if (enic->config.intr_mode < 1 &&
2127 enic->rq_count >= 1 &&
2128 enic->wq_count >= m &&
2129 enic->cq_count >= 1 + m &&
2130 enic->intr_count >= 1 + m + 2) {
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01002131 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2132 1 + m + 2, 1 + m + 2) > 0) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002133
2134 enic->rq_count = 1;
2135 enic->wq_count = m;
2136 enic->cq_count = 1 + m;
2137 enic->intr_count = 1 + m + 2;
2138
2139 vnic_dev_set_intr_mode(enic->vdev,
2140 VNIC_DEV_INTR_MODE_MSIX);
2141
2142 return 0;
2143 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002144 }
2145
2146 /* Next try MSI
2147 *
2148 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2149 */
2150
2151 if (enic->config.intr_mode < 2 &&
2152 enic->rq_count >= 1 &&
2153 enic->wq_count >= 1 &&
2154 enic->cq_count >= 2 &&
2155 enic->intr_count >= 1 &&
2156 !pci_enable_msi(enic->pdev)) {
2157
2158 enic->rq_count = 1;
2159 enic->wq_count = 1;
2160 enic->cq_count = 2;
2161 enic->intr_count = 1;
2162
2163 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2164
2165 return 0;
2166 }
2167
2168 /* Next try INTx
2169 *
2170 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2171 * (the first INTR is used for WQ/RQ)
2172 * (the second INTR is used for WQ/RQ errors)
2173 * (the last INTR is used for notifications)
2174 */
2175
2176 if (enic->config.intr_mode < 3 &&
2177 enic->rq_count >= 1 &&
2178 enic->wq_count >= 1 &&
2179 enic->cq_count >= 2 &&
2180 enic->intr_count >= 3) {
2181
2182 enic->rq_count = 1;
2183 enic->wq_count = 1;
2184 enic->cq_count = 2;
2185 enic->intr_count = 3;
2186
2187 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2188
2189 return 0;
2190 }
2191
2192 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2193
2194 return -EINVAL;
2195}
2196
2197static void enic_clear_intr_mode(struct enic *enic)
2198{
2199 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2200 case VNIC_DEV_INTR_MODE_MSIX:
2201 pci_disable_msix(enic->pdev);
2202 break;
2203 case VNIC_DEV_INTR_MODE_MSI:
2204 pci_disable_msi(enic->pdev);
2205 break;
2206 default:
2207 break;
2208 }
2209
2210 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2211}
2212
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002213static const struct net_device_ops enic_netdev_dynamic_ops = {
2214 .ndo_open = enic_open,
2215 .ndo_stop = enic_stop,
2216 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002217 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002218 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002219 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002220 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2221 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002222 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2223 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2224 .ndo_tx_timeout = enic_tx_timeout,
2225 .ndo_set_vf_port = enic_set_vf_port,
2226 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00002227 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002228#ifdef CONFIG_NET_POLL_CONTROLLER
2229 .ndo_poll_controller = enic_poll_controller,
2230#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302231#ifdef CONFIG_RFS_ACCEL
2232 .ndo_rx_flow_steer = enic_rx_flow_steer,
2233#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302234#ifdef CONFIG_NET_RX_BUSY_POLL
2235 .ndo_busy_poll = enic_busy_poll,
2236#endif
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002237};
2238
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002239static const struct net_device_ops enic_netdev_ops = {
2240 .ndo_open = enic_open,
2241 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08002242 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002243 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002244 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002245 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002246 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002247 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002248 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2249 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2250 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00002251 .ndo_set_vf_port = enic_set_vf_port,
2252 .ndo_get_vf_port = enic_get_vf_port,
2253 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002254#ifdef CONFIG_NET_POLL_CONTROLLER
2255 .ndo_poll_controller = enic_poll_controller,
2256#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302257#ifdef CONFIG_RFS_ACCEL
2258 .ndo_rx_flow_steer = enic_rx_flow_steer,
2259#endif
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302260#ifdef CONFIG_NET_RX_BUSY_POLL
2261 .ndo_busy_poll = enic_busy_poll,
2262#endif
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002263};
2264
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002265static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002266{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002267 unsigned int i;
2268
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302269 for (i = 0; i < enic->rq_count; i++) {
2270 napi_hash_del(&enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002271 netif_napi_del(&enic->napi[i]);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302272 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302273 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2274 for (i = 0; i < enic->wq_count; i++)
2275 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002276
Scott Feldman6fdfa972009-09-03 17:02:45 +00002277 enic_free_vnic_resources(enic);
2278 enic_clear_intr_mode(enic);
2279}
2280
Govindarajulu Varadarajan3f255dc2015-01-03 19:35:44 +05302281static void enic_kdump_kernel_config(struct enic *enic)
2282{
2283 if (is_kdump_kernel()) {
2284 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2285 enic->rq_count = 1;
2286 enic->wq_count = 1;
2287 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2288 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2289 enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2290 }
2291}
2292
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002293static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002294{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002295 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002296 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002297 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00002298 int err;
2299
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00002300 /* Get interrupt coalesce timer info */
2301 err = enic_dev_intr_coal_timer_info(enic);
2302 if (err) {
2303 dev_warn(dev, "Using default conversion factor for "
2304 "interrupt coalesce timer\n");
2305 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2306 }
2307
Scott Feldman6fdfa972009-09-03 17:02:45 +00002308 /* Get vNIC configuration
2309 */
2310
2311 err = enic_get_vnic_config(enic);
2312 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002313 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002314 return err;
2315 }
2316
2317 /* Get available resource counts
2318 */
2319
2320 enic_get_res_counts(enic);
2321
Govindarajulu Varadarajan3f255dc2015-01-03 19:35:44 +05302322 /* modify resource count if we are in kdump_kernel
2323 */
2324 enic_kdump_kernel_config(enic);
2325
Scott Feldman6fdfa972009-09-03 17:02:45 +00002326 /* Set interrupt mode based on resource counts and system
2327 * capabilities
2328 */
2329
2330 err = enic_set_intr_mode(enic);
2331 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002332 dev_err(dev, "Failed to set intr mode based on resource "
2333 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002334 return err;
2335 }
2336
2337 /* Allocate and configure vNIC resources
2338 */
2339
2340 err = enic_alloc_vnic_resources(enic);
2341 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002342 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002343 goto err_out_free_vnic_resources;
2344 }
2345
2346 enic_init_vnic_resources(enic);
2347
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002348 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002349 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002350 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002351 goto err_out_free_vnic_resources;
2352 }
2353
2354 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2355 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002356 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302357 napi_hash_add(&enic->napi[0]);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002358 break;
2359 case VNIC_DEV_INTR_MODE_MSIX:
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302360 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002361 netif_napi_add(netdev, &enic->napi[i],
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302362 enic_poll_msix_rq, NAPI_POLL_WEIGHT);
Govindarajulu Varadarajan14747cd2014-06-23 16:08:04 +05302363 napi_hash_add(&enic->napi[i]);
2364 }
Govindarajulu Varadarajan4cfe8782014-06-23 16:08:05 +05302365 for (i = 0; i < enic->wq_count; i++)
2366 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
2367 enic_poll_msix_wq, NAPI_POLL_WEIGHT);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002368 break;
2369 }
2370
2371 return 0;
2372
2373err_out_free_vnic_resources:
2374 enic_clear_intr_mode(enic);
2375 enic_free_vnic_resources(enic);
2376
2377 return err;
2378}
2379
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002380static void enic_iounmap(struct enic *enic)
2381{
2382 unsigned int i;
2383
2384 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2385 if (enic->bar[i].vaddr)
2386 iounmap(enic->bar[i].vaddr);
2387}
2388
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002389static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002390{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002391 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002392 struct net_device *netdev;
2393 struct enic *enic;
2394 int using_dac = 0;
2395 unsigned int i;
2396 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002397#ifdef CONFIG_PCI_IOV
2398 int pos = 0;
2399#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002400 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002401
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002402 /* Allocate net device structure and initialize. Private
2403 * instance data is initialized to zero.
2404 */
2405
govindarajulu.v822473b2013-09-04 11:17:14 +05302406 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2407 ENIC_RQ_MAX, ENIC_WQ_MAX);
Joe Perches41de8d42012-01-29 13:47:52 +00002408 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002409 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002410
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002411 pci_set_drvdata(pdev, netdev);
2412
2413 SET_NETDEV_DEV(netdev, &pdev->dev);
2414
2415 enic = netdev_priv(netdev);
2416 enic->netdev = netdev;
2417 enic->pdev = pdev;
2418
2419 /* Setup PCI resources
2420 */
2421
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002422 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002423 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002424 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002425 goto err_out_free_netdev;
2426 }
2427
2428 err = pci_request_regions(pdev, DRV_NAME);
2429 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002430 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002431 goto err_out_disable_device;
2432 }
2433
2434 pci_set_master(pdev);
2435
2436 /* Query PCI controller on system for DMA addressing
govindarajulu.v624dbf52013-09-04 11:17:16 +05302437 * limitation for the device. Try 64-bit first, and
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002438 * fail to 32-bit.
2439 */
2440
govindarajulu.v624dbf52013-09-04 11:17:16 +05302441 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002442 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002443 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002444 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002445 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002446 goto err_out_release_regions;
2447 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002448 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002449 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002450 dev_err(dev, "Unable to obtain %u-bit DMA "
2451 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002452 goto err_out_release_regions;
2453 }
2454 } else {
govindarajulu.v624dbf52013-09-04 11:17:16 +05302455 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002456 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002457 dev_err(dev, "Unable to obtain %u-bit DMA "
govindarajulu.v624dbf52013-09-04 11:17:16 +05302458 "for consistent allocations, aborting\n", 64);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002459 goto err_out_release_regions;
2460 }
2461 using_dac = 1;
2462 }
2463
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002464 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002465 */
2466
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002467 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2468 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2469 continue;
2470 enic->bar[i].len = pci_resource_len(pdev, i);
2471 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2472 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002473 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002474 err = -ENODEV;
2475 goto err_out_iounmap;
2476 }
2477 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002478 }
2479
2480 /* Register vNIC device
2481 */
2482
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002483 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2484 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002485 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002486 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002487 err = -ENODEV;
2488 goto err_out_iounmap;
2489 }
2490
Govindarajulu Varadarajan373fb082015-08-16 01:44:54 +05302491 err = vnic_devcmd_init(enic->vdev);
2492
2493 if (err)
2494 goto err_out_vnic_unregister;
2495
Roopa Prabhu8749b422011-09-22 03:44:33 +00002496#ifdef CONFIG_PCI_IOV
2497 /* Get number of subvnics */
2498 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2499 if (pos) {
2500 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002501 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002502 if (enic->num_vfs) {
2503 err = pci_enable_sriov(pdev, enic->num_vfs);
2504 if (err) {
2505 dev_err(dev, "SRIOV enable failed, aborting."
2506 " pci_enable_sriov() returned %d\n",
2507 err);
2508 goto err_out_vnic_unregister;
2509 }
2510 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002511 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002512 }
2513 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002514#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002515
Roopa Prabhu3f192792011-09-22 03:44:43 +00002516 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002517 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002518 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002519 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002520 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002521 }
2522
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002523 /* Issue device open to get device in known state
2524 */
2525
2526 err = enic_dev_open(enic);
2527 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002528 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002529 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002530 }
2531
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002532 /* Setup devcmd lock
2533 */
2534
2535 spin_lock_init(&enic->devcmd_lock);
Neel Patel0b038562013-08-16 15:47:40 -07002536 spin_lock_init(&enic->enic_api_lock);
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002537
2538 /*
2539 * Set ingress vlan rewrite mode before vnic initialization
2540 */
2541
2542 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2543 if (err) {
2544 dev_err(dev,
2545 "Failed to set ingress vlan rewrite mode, aborting.\n");
2546 goto err_out_dev_close;
2547 }
2548
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002549 /* Issue device init to initialize the vnic-to-switch link.
2550 * We'll start with carrier off and wait for link UP
2551 * notification later to turn on carrier. We don't need
2552 * to wait here for the vnic-to-switch link initialization
2553 * to complete; link UP notification is the indication that
2554 * the process is complete.
2555 */
2556
2557 netif_carrier_off(netdev);
2558
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002559 /* Do not call dev_init for a dynamic vnic.
2560 * For a dynamic vnic, init_prov_info will be
2561 * called later by an upper layer.
2562 */
2563
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002564 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002565 err = vnic_dev_init(enic->vdev, 0);
2566 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002567 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002568 goto err_out_dev_close;
2569 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002570 }
2571
Scott Feldman6fdfa972009-09-03 17:02:45 +00002572 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002573 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002574 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002575 goto err_out_dev_close;
2576 }
2577
govindarajulu.v822473b2013-09-04 11:17:14 +05302578 netif_set_real_num_tx_queues(netdev, enic->wq_count);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302579 netif_set_real_num_rx_queues(netdev, enic->rq_count);
govindarajulu.v822473b2013-09-04 11:17:14 +05302580
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002581 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002582 */
2583
2584 init_timer(&enic->notify_timer);
2585 enic->notify_timer.function = enic_notify_timer;
2586 enic->notify_timer.data = (unsigned long)enic;
2587
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302588 enic_set_rx_coal_setting(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002589 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002590 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002591
2592 for (i = 0; i < enic->wq_count; i++)
2593 spin_lock_init(&enic->wq_lock[i]);
2594
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002595 /* Register net device
2596 */
2597
2598 enic->port_mtu = enic->config.mtu;
2599 (void)enic_change_mtu(netdev, enic->port_mtu);
2600
2601 err = enic_set_mac_addr(netdev, enic->mac_addr);
2602 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002603 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002604 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002605 }
2606
Scott Feldman7c844592009-12-23 13:27:54 +00002607 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302608 /* rx coalesce time already got initialized. This gets used
2609 * if adaptive coal is turned off
2610 */
Scott Feldman7c844592009-12-23 13:27:54 +00002611 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2612
Roopa Prabhu73359032012-01-18 04:24:02 +00002613 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002614 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2615 else
2616 netdev->netdev_ops = &enic_netdev_ops;
2617
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002618 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002619 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002620
Patrick McHardyf6469682013-04-19 02:04:27 +00002621 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002622 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002623 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002624 enic->loop_enable = 1;
2625 enic->loop_tag = enic->config.loop_tag;
2626 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2627 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002628 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002629 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002630 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002631 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002632 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302633 if (ENIC_SETTING(enic, RSS))
2634 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002635 if (ENIC_SETTING(enic, RXCSUM))
2636 netdev->hw_features |= NETIF_F_RXCSUM;
2637
2638 netdev->features |= netdev->hw_features;
2639
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302640#ifdef CONFIG_RFS_ACCEL
2641 netdev->hw_features |= NETIF_F_NTUPLE;
2642#endif
2643
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002644 if (using_dac)
2645 netdev->features |= NETIF_F_HIGHDMA;
2646
Jiri Pirko01789342011-08-16 06:29:00 +00002647 netdev->priv_flags |= IFF_UNICAST_FLT;
2648
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002649 err = register_netdev(netdev);
2650 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002651 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002652 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002653 }
Govindarajulu Varadarajana03bb562014-09-03 03:17:19 +05302654 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002655
2656 return 0;
2657
Scott Feldman6fdfa972009-09-03 17:02:45 +00002658err_out_dev_deinit:
2659 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002660err_out_dev_close:
2661 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002662err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002663 kfree(enic->pp);
2664err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002665#ifdef CONFIG_PCI_IOV
2666 if (enic_sriov_enabled(enic)) {
2667 pci_disable_sriov(pdev);
2668 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2669 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002670#endif
David S. Miller1a692052015-08-21 11:38:41 -07002671err_out_vnic_unregister:
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002672 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002673err_out_iounmap:
2674 enic_iounmap(enic);
2675err_out_release_regions:
2676 pci_release_regions(pdev);
2677err_out_disable_device:
2678 pci_disable_device(pdev);
2679err_out_free_netdev:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002680 free_netdev(netdev);
2681
2682 return err;
2683}
2684
Bill Pemberton854de922012-12-03 09:23:05 -05002685static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002686{
2687 struct net_device *netdev = pci_get_drvdata(pdev);
2688
2689 if (netdev) {
2690 struct enic *enic = netdev_priv(netdev);
2691
Tejun Heo23f333a2010-12-12 16:45:14 +01002692 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002693 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002694 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002695 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002696 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002697#ifdef CONFIG_PCI_IOV
2698 if (enic_sriov_enabled(enic)) {
2699 pci_disable_sriov(pdev);
2700 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2701 }
2702#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002703 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002704 vnic_dev_unregister(enic->vdev);
2705 enic_iounmap(enic);
2706 pci_release_regions(pdev);
2707 pci_disable_device(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002708 free_netdev(netdev);
2709 }
2710}
2711
2712static struct pci_driver enic_driver = {
2713 .name = DRV_NAME,
2714 .id_table = enic_id_table,
2715 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002716 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002717};
2718
2719static int __init enic_init_module(void)
2720{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002721 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002722
2723 return pci_register_driver(&enic_driver);
2724}
2725
2726static void __exit enic_cleanup_module(void)
2727{
2728 pci_unregister_driver(&enic_driver);
2729}
2730
2731module_init(enic_init_module);
2732module_exit(enic_cleanup_module);