blob: 5448df2d78c2787b2b27d64ac8c87e89364477b6 [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053041#include <linux/ktime.h>
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +053042#ifdef CONFIG_RFS_ACCEL
43#include <linux/cpu_rmap.h>
44#endif
Scott Feldman01f2e4e2008-09-15 09:17:11 -070045
46#include "cq_enet_desc.h"
47#include "vnic_dev.h"
48#include "vnic_intr.h"
49#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070050#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070051#include "enic_res.h"
52#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000053#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000054#include "enic_pp.h"
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +053055#include "enic_clsf.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070056
57#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000058#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
59#define MAX_TSO (1 << 16)
60#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
61
62#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070063#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000064#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070065
66/* Supported devices */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000067static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000068 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070069 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000070 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070071 { 0, } /* end of table */
72};
73
74MODULE_DESCRIPTION(DRV_DESCRIPTION);
75MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_VERSION);
78MODULE_DEVICE_TABLE(pci, enic_id_table);
79
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +053080#define ENIC_LARGE_PKT_THRESHOLD 1000
81#define ENIC_MAX_COALESCE_TIMERS 10
82/* Interrupt moderation table, which will be used to decide the
83 * coalescing timer values
84 * {rx_rate in Mbps, mapping percentage of the range}
85 */
86struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
87 {4000, 0},
88 {4400, 10},
89 {5060, 20},
90 {5230, 30},
91 {5540, 40},
92 {5820, 50},
93 {6120, 60},
94 {6435, 70},
95 {6745, 80},
96 {7000, 90},
97 {0xFFFFFFFF, 100}
98};
99
100/* This table helps the driver to pick different ranges for rx coalescing
101 * timer depending on the link speed.
102 */
103struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
104 {0, 0}, /* 0 - 4 Gbps */
105 {0, 3}, /* 4 - 10 Gbps */
106 {3, 6}, /* 10 - 40 Gbps */
107};
108
Roopa Prabhu3f192792011-09-22 03:44:43 +0000109int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700110{
111 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
112}
113
Roopa Prabhu8749b422011-09-22 03:44:33 +0000114int enic_sriov_enabled(struct enic *enic)
115{
116 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
117}
118
Roopa Prabhu3a4adef2012-01-18 04:23:55 +0000119static int enic_is_sriov_vf(struct enic *enic)
120{
121 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
122}
123
Roopa Prabhu889d13f2011-09-22 03:44:38 +0000124int enic_is_valid_vf(struct enic *enic, int vf)
125{
126#ifdef CONFIG_PCI_IOV
127 return vf >= 0 && vf < enic->num_vfs;
128#else
129 return 0;
130#endif
131}
132
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700133static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
134{
135 struct enic *enic = vnic_dev_priv(wq->vdev);
136
137 if (buf->sop)
138 pci_unmap_single(enic->pdev, buf->dma_addr,
139 buf->len, PCI_DMA_TODEVICE);
140 else
141 pci_unmap_page(enic->pdev, buf->dma_addr,
142 buf->len, PCI_DMA_TODEVICE);
143
144 if (buf->os_buf)
145 dev_kfree_skb_any(buf->os_buf);
146}
147
148static void enic_wq_free_buf(struct vnic_wq *wq,
149 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
150{
151 enic_free_wq_buf(wq, buf);
152}
153
154static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
155 u8 type, u16 q_number, u16 completed_index, void *opaque)
156{
157 struct enic *enic = vnic_dev_priv(vdev);
158
159 spin_lock(&enic->wq_lock[q_number]);
160
161 vnic_wq_service(&enic->wq[q_number], cq_desc,
162 completed_index, enic_wq_free_buf,
163 opaque);
164
govindarajulu.v822473b2013-09-04 11:17:14 +0530165 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000166 vnic_wq_desc_avail(&enic->wq[q_number]) >=
167 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
govindarajulu.v822473b2013-09-04 11:17:14 +0530168 netif_wake_subqueue(enic->netdev, q_number);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700169
170 spin_unlock(&enic->wq_lock[q_number]);
171
172 return 0;
173}
174
175static void enic_log_q_error(struct enic *enic)
176{
177 unsigned int i;
178 u32 error_status;
179
180 for (i = 0; i < enic->wq_count; i++) {
181 error_status = vnic_wq_error_status(&enic->wq[i]);
182 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000183 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
184 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700185 }
186
187 for (i = 0; i < enic->rq_count; i++) {
188 error_status = vnic_rq_error_status(&enic->rq[i]);
189 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000190 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
191 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700192 }
193}
194
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000195static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700196{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000197 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700198
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000199 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000200 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
201 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000202 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700203 }
204}
205
206static void enic_mtu_check(struct enic *enic)
207{
208 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000209 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700210
Scott Feldman491598a2009-09-03 17:02:40 +0000211 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000212 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000213 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000214 mtu = max_t(int, ENIC_MIN_MTU,
215 min_t(int, ENIC_MAX_MTU, mtu));
216 if (mtu != netdev->mtu)
217 schedule_work(&enic->change_mtu_work);
218 } else {
219 if (mtu < netdev->mtu)
220 netdev_warn(netdev,
221 "interface MTU (%d) set higher "
222 "than switch port MTU (%d)\n",
223 netdev->mtu, mtu);
224 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700225 }
226}
227
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000228static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700229{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000230 int link_status = vnic_dev_link_status(enic->vdev);
231 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700232
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000233 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000234 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000235 netif_carrier_on(enic->netdev);
236 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000237 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000238 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700239 }
240}
241
242static void enic_notify_check(struct enic *enic)
243{
244 enic_msglvl_check(enic);
245 enic_mtu_check(enic);
246 enic_link_check(enic);
247}
248
249#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
250
251static irqreturn_t enic_isr_legacy(int irq, void *data)
252{
253 struct net_device *netdev = data;
254 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000255 unsigned int io_intr = enic_legacy_io_intr();
256 unsigned int err_intr = enic_legacy_err_intr();
257 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700258 u32 pba;
259
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000260 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700261
262 pba = vnic_intr_legacy_pba(enic->legacy_pba);
263 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000264 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700265 return IRQ_NONE; /* not our interrupt */
266 }
267
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000268 if (ENIC_TEST_INTR(pba, notify_intr)) {
269 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700270 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800271 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700272
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000273 if (ENIC_TEST_INTR(pba, err_intr)) {
274 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700275 enic_log_q_error(enic);
276 /* schedule recovery from WQ/RQ error */
277 schedule_work(&enic->reset);
278 return IRQ_HANDLED;
279 }
280
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000281 if (ENIC_TEST_INTR(pba, io_intr)) {
282 if (napi_schedule_prep(&enic->napi[0]))
283 __napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700284 } else {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000285 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700286 }
287
288 return IRQ_HANDLED;
289}
290
291static irqreturn_t enic_isr_msi(int irq, void *data)
292{
293 struct enic *enic = data;
294
295 /* With MSI, there is no sharing of interrupts, so this is
296 * our interrupt and there is no need to ack it. The device
297 * is not providing per-vector masking, so the OS will not
298 * write to PCI config space to mask/unmask the interrupt.
299 * We're using mask_on_assertion for MSI, so the device
300 * automatically masks the interrupt when the interrupt is
301 * generated. Later, when exiting polling, the interrupt
302 * will be unmasked (see enic_poll).
303 *
304 * Also, the device uses the same PCIe Traffic Class (TC)
305 * for Memory Write data and MSI, so there are no ordering
306 * issues; the MSI will always arrive at the Root Complex
307 * _after_ corresponding Memory Writes (i.e. descriptor
308 * writes).
309 */
310
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000311 napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700312
313 return IRQ_HANDLED;
314}
315
316static irqreturn_t enic_isr_msix_rq(int irq, void *data)
317{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000318 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700319
320 /* schedule NAPI polling for RQ cleanup */
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000321 napi_schedule(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700322
323 return IRQ_HANDLED;
324}
325
326static irqreturn_t enic_isr_msix_wq(int irq, void *data)
327{
328 struct enic *enic = data;
govindarajulu.v822473b2013-09-04 11:17:14 +0530329 unsigned int cq;
330 unsigned int intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700331 unsigned int wq_work_to_do = -1; /* no limit */
332 unsigned int wq_work_done;
govindarajulu.v822473b2013-09-04 11:17:14 +0530333 unsigned int wq_irq;
334
335 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
336 cq = enic_cq_wq(enic, wq_irq);
337 intr = enic_msix_wq_intr(enic, wq_irq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700338
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000339 wq_work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700340 wq_work_to_do, enic_wq_service, NULL);
341
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000342 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700343 wq_work_done,
344 1 /* unmask intr */,
345 1 /* reset intr timer */);
346
347 return IRQ_HANDLED;
348}
349
350static irqreturn_t enic_isr_msix_err(int irq, void *data)
351{
352 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000353 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700354
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000355 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800356
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700357 enic_log_q_error(enic);
358
359 /* schedule recovery from WQ/RQ error */
360 schedule_work(&enic->reset);
361
362 return IRQ_HANDLED;
363}
364
365static irqreturn_t enic_isr_msix_notify(int irq, void *data)
366{
367 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000368 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700369
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000370 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700371 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700372
373 return IRQ_HANDLED;
374}
375
376static inline void enic_queue_wq_skb_cont(struct enic *enic,
377 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000378 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700379{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000380 const skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700381
382 /* Queue additional data fragments */
383 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000384 len_left -= skb_frag_size(frag);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700385 enic_queue_wq_desc_cont(wq, skb,
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000386 skb_frag_dma_map(&enic->pdev->dev,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000387 frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100388 DMA_TO_DEVICE),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000389 skb_frag_size(frag),
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000390 (len_left == 0), /* EOP? */
391 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700392 }
393}
394
395static inline void enic_queue_wq_skb_vlan(struct enic *enic,
396 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000397 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700398{
399 unsigned int head_len = skb_headlen(skb);
400 unsigned int len_left = skb->len - head_len;
401 int eop = (len_left == 0);
402
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000403 /* Queue the main skb fragment. The fragments are no larger
404 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
405 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
406 * per fragment is queued.
407 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700408 enic_queue_wq_desc(wq, skb,
409 pci_map_single(enic->pdev, skb->data,
410 head_len, PCI_DMA_TODEVICE),
411 head_len,
412 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000413 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700414
415 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000416 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700417}
418
419static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
420 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000421 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700422{
423 unsigned int head_len = skb_headlen(skb);
424 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000425 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700426 unsigned int csum_offset = hdr_len + skb->csum_offset;
427 int eop = (len_left == 0);
428
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000429 /* Queue the main skb fragment. The fragments are no larger
430 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
431 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
432 * per fragment is queued.
433 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700434 enic_queue_wq_desc_csum_l4(wq, skb,
435 pci_map_single(enic->pdev, skb->data,
436 head_len, PCI_DMA_TODEVICE),
437 head_len,
438 csum_offset,
439 hdr_len,
440 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000441 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700442
443 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000444 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700445}
446
447static inline void enic_queue_wq_skb_tso(struct enic *enic,
448 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000449 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700450{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000451 unsigned int frag_len_left = skb_headlen(skb);
452 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700453 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
454 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000455 unsigned int len;
456 dma_addr_t dma_addr;
457 unsigned int offset = 0;
458 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700459
460 /* Preload TCP csum field with IP pseudo hdr calculated
461 * with IP length set to zero. HW will later add in length
462 * to each TCP segment resulting from the TSO.
463 */
464
Harvey Harrison09640e62009-02-01 00:45:17 -0800465 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700466 ip_hdr(skb)->check = 0;
467 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
468 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800469 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700470 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
471 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
472 }
473
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000474 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
475 * for the main skb fragment
476 */
477 while (frag_len_left) {
478 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
479 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
480 len, PCI_DMA_TODEVICE);
481 enic_queue_wq_desc_tso(wq, skb,
482 dma_addr,
483 len,
484 mss, hdr_len,
485 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000486 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000487 frag_len_left -= len;
488 offset += len;
489 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700490
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000491 if (eop)
492 return;
493
494 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
495 * for additional data fragments
496 */
497 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000498 len_left -= skb_frag_size(frag);
499 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000500 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000501
502 while (frag_len_left) {
503 len = min(frag_len_left,
504 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000505 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
506 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100507 DMA_TO_DEVICE);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000508 enic_queue_wq_desc_cont(wq, skb,
509 dma_addr,
510 len,
511 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000512 (len == frag_len_left), /* EOP? */
513 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000514 frag_len_left -= len;
515 offset += len;
516 }
517 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700518}
519
520static inline void enic_queue_wq_skb(struct enic *enic,
521 struct vnic_wq *wq, struct sk_buff *skb)
522{
523 unsigned int mss = skb_shinfo(skb)->gso_size;
524 unsigned int vlan_tag = 0;
525 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000526 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700527
Jesse Grosseab6d182010-10-20 13:56:03 +0000528 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700529 /* VLAN tag from trunking driver */
530 vlan_tag_insert = 1;
531 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000532 } else if (enic->loop_enable) {
533 vlan_tag = enic->loop_tag;
534 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700535 }
536
537 if (mss)
538 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000539 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700540 else if (skb->ip_summed == CHECKSUM_PARTIAL)
541 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000542 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700543 else
544 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000545 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700546}
547
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800548/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000549static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000550 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700551{
552 struct enic *enic = netdev_priv(netdev);
govindarajulu.v822473b2013-09-04 11:17:14 +0530553 struct vnic_wq *wq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700554 unsigned long flags;
govindarajulu.v822473b2013-09-04 11:17:14 +0530555 unsigned int txq_map;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700556
557 if (skb->len <= 0) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700558 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700559 return NETDEV_TX_OK;
560 }
561
govindarajulu.v822473b2013-09-04 11:17:14 +0530562 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
563 wq = &enic->wq[txq_map];
564
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700565 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
566 * which is very likely. In the off chance it's going to take
567 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
568 */
569
570 if (skb_shinfo(skb)->gso_size == 0 &&
571 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
572 skb_linearize(skb)) {
Eric W. Biederman98d8a652014-03-15 16:49:05 -0700573 dev_kfree_skb_any(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700574 return NETDEV_TX_OK;
575 }
576
govindarajulu.v822473b2013-09-04 11:17:14 +0530577 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700578
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000579 if (vnic_wq_desc_avail(wq) <
580 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
govindarajulu.v822473b2013-09-04 11:17:14 +0530581 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700582 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000583 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
govindarajulu.v822473b2013-09-04 11:17:14 +0530584 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700585 return NETDEV_TX_BUSY;
586 }
587
588 enic_queue_wq_skb(enic, wq, skb);
589
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000590 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
govindarajulu.v822473b2013-09-04 11:17:14 +0530591 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700592
govindarajulu.v822473b2013-09-04 11:17:14 +0530593 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700594
595 return NETDEV_TX_OK;
596}
597
598/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000599static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
600 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700601{
602 struct enic *enic = netdev_priv(netdev);
603 struct vnic_stats *stats;
604
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000605 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700606
Scott Feldman25f0a062008-09-24 11:23:32 -0700607 net_stats->tx_packets = stats->tx.tx_frames_ok;
608 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
609 net_stats->tx_errors = stats->tx.tx_errors;
610 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700611
Scott Feldman25f0a062008-09-24 11:23:32 -0700612 net_stats->rx_packets = stats->rx.rx_frames_ok;
613 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
614 net_stats->rx_errors = stats->rx.rx_errors;
615 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000616 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800617 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000618 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700619
Scott Feldman25f0a062008-09-24 11:23:32 -0700620 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700621}
622
Alexander Duyckf0096182014-05-28 18:44:52 -0700623static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
624{
625 struct enic *enic = netdev_priv(netdev);
626
627 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
628 unsigned int mc_count = netdev_mc_count(netdev);
629
630 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
631 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
632
633 return -ENOSPC;
634 }
635
636 enic_dev_add_addr(enic, mc_addr);
637 enic->mc_count++;
638
639 return 0;
640}
641
642static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
643{
644 struct enic *enic = netdev_priv(netdev);
645
646 enic_dev_del_addr(enic, mc_addr);
647 enic->mc_count--;
648
649 return 0;
650}
651
652static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
653{
654 struct enic *enic = netdev_priv(netdev);
655
656 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
657 unsigned int uc_count = netdev_uc_count(netdev);
658
659 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
660 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
661
662 return -ENOSPC;
663 }
664
665 enic_dev_add_addr(enic, uc_addr);
666 enic->uc_count++;
667
668 return 0;
669}
670
671static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
672{
673 struct enic *enic = netdev_priv(netdev);
674
675 enic_dev_del_addr(enic, uc_addr);
676 enic->uc_count--;
677
678 return 0;
679}
680
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000681void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700682{
Alexander Duyckf0096182014-05-28 18:44:52 -0700683 struct net_device *netdev = enic->netdev;
684
685 __dev_uc_unsync(netdev, NULL);
686 __dev_mc_unsync(netdev, NULL);
687
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700688 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000689 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000690 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700691}
692
693static int enic_set_mac_addr(struct net_device *netdev, char *addr)
694{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700695 struct enic *enic = netdev_priv(netdev);
696
Roopa Prabhu73359032012-01-18 04:24:02 +0000697 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700698 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
699 return -EADDRNOTAVAIL;
700 } else {
701 if (!is_valid_ether_addr(addr))
702 return -EADDRNOTAVAIL;
703 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700704
705 memcpy(netdev->dev_addr, addr, netdev->addr_len);
706
707 return 0;
708}
709
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700710static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
711{
712 struct enic *enic = netdev_priv(netdev);
713 struct sockaddr *saddr = p;
714 char *addr = saddr->sa_data;
715 int err;
716
717 if (netif_running(enic->netdev)) {
718 err = enic_dev_del_station_addr(enic);
719 if (err)
720 return err;
721 }
722
723 err = enic_set_mac_addr(netdev, addr);
724 if (err)
725 return err;
726
727 if (netif_running(enic->netdev)) {
728 err = enic_dev_add_station_addr(enic);
729 if (err)
730 return err;
731 }
732
733 return err;
734}
735
736static int enic_set_mac_address(struct net_device *netdev, void *p)
737{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000738 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000739 char *addr = saddr->sa_data;
740 struct enic *enic = netdev_priv(netdev);
741 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000742
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000743 err = enic_dev_del_station_addr(enic);
744 if (err)
745 return err;
746
747 err = enic_set_mac_addr(netdev, addr);
748 if (err)
749 return err;
750
751 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700752}
753
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000754/* netif_tx_lock held, BHs disabled */
755static void enic_set_rx_mode(struct net_device *netdev)
756{
757 struct enic *enic = netdev_priv(netdev);
758 int directed = 1;
759 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
760 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
761 int promisc = (netdev->flags & IFF_PROMISC) ||
762 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
763 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
764 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
765 unsigned int flags = netdev->flags |
766 (allmulti ? IFF_ALLMULTI : 0) |
767 (promisc ? IFF_PROMISC : 0);
768
769 if (enic->flags != flags) {
770 enic->flags = flags;
771 enic_dev_packet_filter(enic, directed,
772 multicast, broadcast, promisc, allmulti);
773 }
774
775 if (!promisc) {
Alexander Duyckf0096182014-05-28 18:44:52 -0700776 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000777 if (!allmulti)
Alexander Duyckf0096182014-05-28 18:44:52 -0700778 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000779 }
780}
781
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700782/* netif_tx_lock held, BHs disabled */
783static void enic_tx_timeout(struct net_device *netdev)
784{
785 struct enic *enic = netdev_priv(netdev);
786 schedule_work(&enic->reset);
787}
788
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000789static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
790{
791 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000792 struct enic_port_profile *pp;
793 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000794
Roopa Prabhu3f192792011-09-22 03:44:43 +0000795 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
796 if (err)
797 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000798
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000799 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000800 if (vf == PORT_SELF_VF) {
801 memcpy(pp->vf_mac, mac, ETH_ALEN);
802 return 0;
803 } else {
804 /*
805 * For sriov vf's set the mac in hw
806 */
807 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
808 vnic_dev_set_mac_addr, mac);
809 return enic_dev_status_to_errno(err);
810 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000811 } else
812 return -EINVAL;
813}
814
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700815static int enic_set_vf_port(struct net_device *netdev, int vf,
816 struct nlattr *port[])
817{
818 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000819 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000820 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000821 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700822
Roopa Prabhu3f192792011-09-22 03:44:43 +0000823 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
824 if (err)
825 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700826
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000827 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000828 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700829
Roopa Prabhu3f192792011-09-22 03:44:43 +0000830 memcpy(&prev_pp, pp, sizeof(*enic->pp));
831 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000832
Roopa Prabhu3f192792011-09-22 03:44:43 +0000833 pp->set |= ENIC_SET_REQUEST;
834 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000835
836 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000837 pp->set |= ENIC_SET_NAME;
838 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000839 PORT_PROFILE_MAX);
840 }
841
842 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000843 pp->set |= ENIC_SET_INSTANCE;
844 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000845 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
846 }
847
848 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000849 pp->set |= ENIC_SET_HOST;
850 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000851 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
852 }
853
Roopa Prabhub4765832012-02-20 00:11:58 +0000854 if (vf == PORT_SELF_VF) {
855 /* Special case handling: mac came from IFLA_VF_MAC */
856 if (!is_zero_ether_addr(prev_pp.vf_mac))
857 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000858
Roopa Prabhub4765832012-02-20 00:11:58 +0000859 if (is_zero_ether_addr(netdev->dev_addr))
860 eth_hw_addr_random(netdev);
861 } else {
862 /* SR-IOV VF: get mac from adapter */
863 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
864 vnic_dev_get_mac_addr, pp->mac_addr);
865 if (err) {
866 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
867 memcpy(pp, &prev_pp, sizeof(*pp));
868 return enic_dev_status_to_errno(err);
869 }
870 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000871
Roopa Prabhu3f192792011-09-22 03:44:43 +0000872 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000873 if (err) {
874 if (restore_pp) {
875 /* Things are still the way they were: Implicit
876 * DISASSOCIATE failed
877 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000878 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000879 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000880 memset(pp, 0, sizeof(*pp));
881 if (vf == PORT_SELF_VF)
882 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000883 }
884 } else {
885 /* Set flag to indicate that the port assoc/disassoc
886 * request has been sent out to fw
887 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000888 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000889
890 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000891 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
892 memset(pp->mac_addr, 0, ETH_ALEN);
893 if (vf == PORT_SELF_VF)
894 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000895 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700896 }
897
Roopa Prabhub4765832012-02-20 00:11:58 +0000898 if (vf == PORT_SELF_VF)
899 memset(pp->vf_mac, 0, ETH_ALEN);
Roopa Prabhu296390592010-12-08 13:54:03 +0000900
Roopa Prabhu296390592010-12-08 13:54:03 +0000901 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700902}
903
904static int enic_get_vf_port(struct net_device *netdev, int vf,
905 struct sk_buff *skb)
906{
907 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700908 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000909 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000910 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700911
Roopa Prabhu3f192792011-09-22 03:44:43 +0000912 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700913 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000914 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700915
Roopa Prabhu3f192792011-09-22 03:44:43 +0000916 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
917 return -ENODATA;
918
919 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
920 if (err)
921 return err;
922
David S. Miller1a106de2012-04-01 20:22:22 -0400923 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
924 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
925 ((pp->set & ENIC_SET_NAME) &&
926 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
927 ((pp->set & ENIC_SET_INSTANCE) &&
928 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
929 pp->instance_uuid)) ||
930 ((pp->set & ENIC_SET_HOST) &&
931 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
932 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700933 return 0;
934
935nla_put_failure:
936 return -EMSGSIZE;
937}
938
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700939static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
940{
941 struct enic *enic = vnic_dev_priv(rq->vdev);
942
943 if (!buf->os_buf)
944 return;
945
946 pci_unmap_single(enic->pdev, buf->dma_addr,
947 buf->len, PCI_DMA_FROMDEVICE);
948 dev_kfree_skb_any(buf->os_buf);
949}
950
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700951static int enic_rq_alloc_buf(struct vnic_rq *rq)
952{
953 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000954 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700955 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000956 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700957 unsigned int os_buf_index = 0;
958 dma_addr_t dma_addr;
959
Eric Dumazet89d71a62009-10-13 05:34:20 +0000960 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700961 if (!skb)
962 return -ENOMEM;
963
964 dma_addr = pci_map_single(enic->pdev, skb->data,
965 len, PCI_DMA_FROMDEVICE);
966
967 enic_queue_rq_desc(rq, skb, os_buf_index,
968 dma_addr, len);
969
970 return 0;
971}
972
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530973static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
974 u32 pkt_len)
975{
976 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
977 pkt_size->large_pkt_bytes_cnt += pkt_len;
978 else
979 pkt_size->small_pkt_bytes_cnt += pkt_len;
980}
981
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700982static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque)
985{
986 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -0800987 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700988 struct sk_buff *skb;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +0530989 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700990
991 u8 type, color, eop, sop, ingress_port, vlan_stripped;
992 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
993 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
994 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
995 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +0000996 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700997 u32 rss_hash;
998
999 if (skipped)
1000 return;
1001
1002 skb = buf->os_buf;
1003 prefetch(skb->data - NET_IP_ALIGN);
1004 pci_unmap_single(enic->pdev, buf->dma_addr,
1005 buf->len, PCI_DMA_FROMDEVICE);
1006
1007 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1008 &type, &color, &q_number, &completed_index,
1009 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1010 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001011 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001012 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1013 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1014 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1015 &fcs_ok);
1016
1017 if (packet_error) {
1018
Scott Feldman350991e2009-09-03 17:02:19 +00001019 if (!fcs_ok) {
1020 if (bytes_written > 0)
1021 enic->rq_bad_fcs++;
1022 else if (bytes_written == 0)
1023 enic->rq_truncated_pkts++;
1024 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001025
1026 dev_kfree_skb_any(skb);
1027
1028 return;
1029 }
1030
1031 if (eop && bytes_written > 0) {
1032
1033 /* Good receive
1034 */
1035
1036 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001037 skb->protocol = eth_type_trans(skb, netdev);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301038 skb_record_rx_queue(skb, q_number);
1039 if (netdev->features & NETIF_F_RXHASH) {
Tom Herbert3739acd2013-12-17 23:23:42 -08001040 skb_set_hash(skb, rss_hash,
1041 (rss_type &
1042 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1043 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1044 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1045 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301046 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001047
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001048 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001049 skb->csum = htons(checksum);
1050 skb->ip_summed = CHECKSUM_COMPLETE;
1051 }
1052
Jiri Pirko6ede7462011-07-20 04:54:18 +00001053 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001054 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001055
Jiri Pirko6ede7462011-07-20 04:54:18 +00001056 if (netdev->features & NETIF_F_GRO)
1057 napi_gro_receive(&enic->napi[q_number], skb);
1058 else
1059 netif_receive_skb(skb);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301060 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1061 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1062 bytes_written);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001063 } else {
1064
1065 /* Buffer overflow
1066 */
1067
1068 dev_kfree_skb_any(skb);
1069 }
1070}
1071
1072static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1073 u8 type, u16 q_number, u16 completed_index, void *opaque)
1074{
1075 struct enic *enic = vnic_dev_priv(vdev);
1076
1077 vnic_rq_service(&enic->rq[q_number], cq_desc,
1078 completed_index, VNIC_RQ_RETURN_DESC,
1079 enic_rq_indicate_buf, opaque);
1080
1081 return 0;
1082}
1083
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001084static int enic_poll(struct napi_struct *napi, int budget)
1085{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001086 struct net_device *netdev = napi->dev;
1087 struct enic *enic = netdev_priv(netdev);
1088 unsigned int cq_rq = enic_cq_rq(enic, 0);
1089 unsigned int cq_wq = enic_cq_wq(enic, 0);
1090 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001091 unsigned int rq_work_to_do = budget;
1092 unsigned int wq_work_to_do = -1; /* no limit */
Eric W. Biederman4c502542014-03-14 18:02:08 -07001093 unsigned int work_done, rq_work_done = 0, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001094 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001095
1096 /* Service RQ (first) and WQ
1097 */
1098
Eric W. Biederman4c502542014-03-14 18:02:08 -07001099 if (budget > 0)
1100 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1101 rq_work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001102
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001103 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001104 wq_work_to_do, enic_wq_service, NULL);
1105
1106 /* Accumulate intr event credits for this polling
1107 * cycle. An intr event is the completion of a
1108 * a WQ or RQ packet.
1109 */
1110
1111 work_done = rq_work_done + wq_work_done;
1112
1113 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001114 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001115 work_done,
1116 0 /* don't unmask intr */,
1117 0 /* don't reset intr timer */);
1118
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001119 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001120
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001121 /* Buffer allocation failed. Stay in polling
1122 * mode so we can try to fill the ring again.
1123 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001124
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001125 if (err)
1126 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001127
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001128 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001129
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001130 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001131 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001132 */
1133
Ben Hutchings288379f2009-01-19 16:43:59 -08001134 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001135 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001136 }
1137
1138 return rq_work_done;
1139}
1140
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301141static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1142{
1143 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1144 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1145 u32 timer = cq->tobe_rx_coal_timeval;
1146
1147 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1148 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1149 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1150 }
1151}
1152
1153static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1154{
1155 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1156 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1157 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1158 int index;
1159 u32 timer;
1160 u32 range_start;
1161 u32 traffic;
1162 u64 delta;
1163 ktime_t now = ktime_get();
1164
1165 delta = ktime_us_delta(now, cq->prev_ts);
1166 if (delta < ENIC_AIC_TS_BREAK)
1167 return;
1168 cq->prev_ts = now;
1169
1170 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1171 pkt_size_counter->small_pkt_bytes_cnt;
1172 /* The table takes Mbps
1173 * traffic *= 8 => bits
1174 * traffic *= (10^6 / delta) => bps
1175 * traffic /= 10^6 => Mbps
1176 *
1177 * Combining, traffic *= (8 / delta)
1178 */
1179
1180 traffic <<= 3;
Govindarajulu Varadarajan958c4922014-05-26 15:52:43 +05301181 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301182
1183 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1184 if (traffic < mod_table[index].rx_rate)
1185 break;
1186 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1187 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1188 rx_coal->small_pkt_range_start :
1189 rx_coal->large_pkt_range_start;
1190 timer = range_start + ((rx_coal->range_end - range_start) *
1191 mod_table[index].range_percent / 100);
1192 /* Damping */
1193 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1194
1195 pkt_size_counter->large_pkt_bytes_cnt = 0;
1196 pkt_size_counter->small_pkt_bytes_cnt = 0;
1197}
1198
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301199#ifdef CONFIG_RFS_ACCEL
1200static void enic_free_rx_cpu_rmap(struct enic *enic)
1201{
1202 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1203 enic->netdev->rx_cpu_rmap = NULL;
1204}
1205
1206static void enic_set_rx_cpu_rmap(struct enic *enic)
1207{
1208 int i, res;
1209
1210 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1211 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1212 if (unlikely(!enic->netdev->rx_cpu_rmap))
1213 return;
1214 for (i = 0; i < enic->rq_count; i++) {
1215 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1216 enic->msix_entry[i].vector);
1217 if (unlikely(res)) {
1218 enic_free_rx_cpu_rmap(enic);
1219 return;
1220 }
1221 }
1222 }
1223}
1224
1225#else
1226
1227static void enic_free_rx_cpu_rmap(struct enic *enic)
1228{
1229}
1230
1231static void enic_set_rx_cpu_rmap(struct enic *enic)
1232{
1233}
1234
1235#endif /* CONFIG_RFS_ACCEL */
1236
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001237static int enic_poll_msix(struct napi_struct *napi, int budget)
1238{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001239 struct net_device *netdev = napi->dev;
1240 struct enic *enic = netdev_priv(netdev);
1241 unsigned int rq = (napi - &enic->napi[0]);
1242 unsigned int cq = enic_cq_rq(enic, rq);
1243 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001244 unsigned int work_to_do = budget;
Eric W. Biederman4c502542014-03-14 18:02:08 -07001245 unsigned int work_done = 0;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001246 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001247
1248 /* Service RQ
1249 */
1250
Eric W. Biederman4c502542014-03-14 18:02:08 -07001251 if (budget > 0)
1252 work_done = vnic_cq_service(&enic->cq[cq],
1253 work_to_do, enic_rq_service, NULL);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001254
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001255 /* Return intr event credits for this polling
1256 * cycle. An intr event is the completion of a
1257 * RQ packet.
1258 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001259
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001260 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001261 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001262 work_done,
1263 0 /* don't unmask intr */,
1264 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001265
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001266 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001267
1268 /* Buffer allocation failed. Stay in polling mode
1269 * so we can try to fill the ring again.
1270 */
1271
1272 if (err)
1273 work_done = work_to_do;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301274 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1275 /* Call the function which refreshes
1276 * the intr coalescing timer value based on
1277 * the traffic. This is supported only in
1278 * the case of MSI-x mode
1279 */
1280 enic_calc_int_moderation(enic, &enic->rq[rq]);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001281
1282 if (work_done < work_to_do) {
1283
1284 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001285 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001286 */
1287
Ben Hutchings288379f2009-01-19 16:43:59 -08001288 napi_complete(napi);
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301289 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1290 enic_set_int_moderation(enic, &enic->rq[rq]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001291 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001292 }
1293
1294 return work_done;
1295}
1296
1297static void enic_notify_timer(unsigned long data)
1298{
1299 struct enic *enic = (struct enic *)data;
1300
1301 enic_notify_check(enic);
1302
Scott Feldman25f0a062008-09-24 11:23:32 -07001303 mod_timer(&enic->notify_timer,
1304 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001305}
1306
1307static void enic_free_intr(struct enic *enic)
1308{
1309 struct net_device *netdev = enic->netdev;
1310 unsigned int i;
1311
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301312 enic_free_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001313 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1314 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001315 free_irq(enic->pdev->irq, netdev);
1316 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001317 case VNIC_DEV_INTR_MODE_MSI:
1318 free_irq(enic->pdev->irq, enic);
1319 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001320 case VNIC_DEV_INTR_MODE_MSIX:
1321 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1322 if (enic->msix[i].requested)
1323 free_irq(enic->msix_entry[i].vector,
1324 enic->msix[i].devid);
1325 break;
1326 default:
1327 break;
1328 }
1329}
1330
1331static int enic_request_intr(struct enic *enic)
1332{
1333 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001334 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001335 int err = 0;
1336
Govindarajulu Varadarajanb6e97c12014-06-23 16:08:01 +05301337 enic_set_rx_cpu_rmap(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001338 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1339
1340 case VNIC_DEV_INTR_MODE_INTX:
1341
1342 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1343 IRQF_SHARED, netdev->name, netdev);
1344 break;
1345
1346 case VNIC_DEV_INTR_MODE_MSI:
1347
1348 err = request_irq(enic->pdev->irq, enic_isr_msi,
1349 0, netdev->name, enic);
1350 break;
1351
1352 case VNIC_DEV_INTR_MODE_MSIX:
1353
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001354 for (i = 0; i < enic->rq_count; i++) {
1355 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001356 snprintf(enic->msix[intr].devname,
1357 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001358 "%.11s-rx-%d", netdev->name, i);
1359 enic->msix[intr].isr = enic_isr_msix_rq;
1360 enic->msix[intr].devid = &enic->napi[i];
1361 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001362
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001363 for (i = 0; i < enic->wq_count; i++) {
1364 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001365 snprintf(enic->msix[intr].devname,
1366 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001367 "%.11s-tx-%d", netdev->name, i);
1368 enic->msix[intr].isr = enic_isr_msix_wq;
1369 enic->msix[intr].devid = enic;
1370 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001371
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001372 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001373 snprintf(enic->msix[intr].devname,
1374 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001375 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001376 enic->msix[intr].isr = enic_isr_msix_err;
1377 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001378
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001379 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001380 snprintf(enic->msix[intr].devname,
1381 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001382 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001383 enic->msix[intr].isr = enic_isr_msix_notify;
1384 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001385
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001386 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1387 enic->msix[i].requested = 0;
1388
1389 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001390 err = request_irq(enic->msix_entry[i].vector,
1391 enic->msix[i].isr, 0,
1392 enic->msix[i].devname,
1393 enic->msix[i].devid);
1394 if (err) {
1395 enic_free_intr(enic);
1396 break;
1397 }
1398 enic->msix[i].requested = 1;
1399 }
1400
1401 break;
1402
1403 default:
1404 break;
1405 }
1406
1407 return err;
1408}
1409
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001410static void enic_synchronize_irqs(struct enic *enic)
1411{
1412 unsigned int i;
1413
1414 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1415 case VNIC_DEV_INTR_MODE_INTX:
1416 case VNIC_DEV_INTR_MODE_MSI:
1417 synchronize_irq(enic->pdev->irq);
1418 break;
1419 case VNIC_DEV_INTR_MODE_MSIX:
1420 for (i = 0; i < enic->intr_count; i++)
1421 synchronize_irq(enic->msix_entry[i].vector);
1422 break;
1423 default:
1424 break;
1425 }
1426}
1427
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05301428static void enic_set_rx_coal_setting(struct enic *enic)
1429{
1430 unsigned int speed;
1431 int index = -1;
1432 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1433
1434 /* If intr mode is not MSIX, do not do adaptive coalescing */
1435 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
1436 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1437 return;
1438 }
1439
1440 /* 1. Read the link speed from fw
1441 * 2. Pick the default range for the speed
1442 * 3. Update it in enic->rx_coalesce_setting
1443 */
1444 speed = vnic_dev_port_speed(enic->vdev);
1445 if (ENIC_LINK_SPEED_10G < speed)
1446 index = ENIC_LINK_40G_INDEX;
1447 else if (ENIC_LINK_SPEED_4G < speed)
1448 index = ENIC_LINK_10G_INDEX;
1449 else
1450 index = ENIC_LINK_4G_INDEX;
1451
1452 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1453 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1454 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1455
1456 /* Start with the value provided by UCSM */
1457 for (index = 0; index < enic->rq_count; index++)
1458 enic->cq[index].cur_rx_coal_timeval =
1459 enic->config.intr_timer_usec;
1460
1461 rx_coal->use_adaptive_rx_coalesce = 1;
1462}
1463
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001464static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001465{
1466 int err;
1467
Tony Camuso8e091342014-06-23 16:08:03 +05301468 spin_lock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001469 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1470 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001471 err = vnic_dev_notify_set(enic->vdev,
1472 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001473 break;
1474 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001475 err = vnic_dev_notify_set(enic->vdev,
1476 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001477 break;
1478 default:
1479 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1480 break;
1481 }
Tony Camuso8e091342014-06-23 16:08:03 +05301482 spin_unlock_bh(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001483
1484 return err;
1485}
1486
1487static void enic_notify_timer_start(struct enic *enic)
1488{
1489 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1490 case VNIC_DEV_INTR_MODE_MSI:
1491 mod_timer(&enic->notify_timer, jiffies);
1492 break;
1493 default:
1494 /* Using intr for notification for INTx/MSI-X */
1495 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001496 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001497}
1498
1499/* rtnl lock is held, process context */
1500static int enic_open(struct net_device *netdev)
1501{
1502 struct enic *enic = netdev_priv(netdev);
1503 unsigned int i;
1504 int err;
1505
Scott Feldman4b75a442008-09-24 11:23:53 -07001506 err = enic_request_intr(enic);
1507 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001508 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001509 return err;
1510 }
1511
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001512 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001513 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001514 netdev_err(netdev,
1515 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001516 goto err_out_free_intr;
1517 }
1518
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001519 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001520 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001521 /* Need at least one buffer on ring to get going */
1522 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001523 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001524 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001525 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001526 }
1527 }
1528
1529 for (i = 0; i < enic->wq_count; i++)
1530 vnic_wq_enable(&enic->wq[i]);
1531 for (i = 0; i < enic->rq_count; i++)
1532 vnic_rq_enable(&enic->rq[i]);
1533
Roopa Prabhu73359032012-01-18 04:24:02 +00001534 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001535 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001536
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001537 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001538
govindarajulu.v822473b2013-09-04 11:17:14 +05301539 netif_tx_wake_all_queues(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001540
1541 for (i = 0; i < enic->rq_count; i++)
1542 napi_enable(&enic->napi[i]);
1543
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001544 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001545
1546 for (i = 0; i < enic->intr_count; i++)
1547 vnic_intr_unmask(&enic->intr[i]);
1548
1549 enic_notify_timer_start(enic);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301550 enic_rfs_flw_tbl_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001551
1552 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001553
1554err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001555 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001556err_out_free_intr:
1557 enic_free_intr(enic);
1558
1559 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001560}
1561
1562/* rtnl lock is held, process context */
1563static int enic_stop(struct net_device *netdev)
1564{
1565 struct enic *enic = netdev_priv(netdev);
1566 unsigned int i;
1567 int err;
1568
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001569 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001570 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001571 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1572 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001573
1574 enic_synchronize_irqs(enic);
1575
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001576 del_timer_sync(&enic->notify_timer);
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05301577 enic_rfs_flw_tbl_free(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001578
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001579 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001580
1581 for (i = 0; i < enic->rq_count; i++)
1582 napi_disable(&enic->napi[i]);
1583
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001584 netif_carrier_off(netdev);
1585 netif_tx_disable(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001586
Roopa Prabhu73359032012-01-18 04:24:02 +00001587 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001588 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001589
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001590 for (i = 0; i < enic->wq_count; i++) {
1591 err = vnic_wq_disable(&enic->wq[i]);
1592 if (err)
1593 return err;
1594 }
1595 for (i = 0; i < enic->rq_count; i++) {
1596 err = vnic_rq_disable(&enic->rq[i]);
1597 if (err)
1598 return err;
1599 }
1600
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001601 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001602 enic_free_intr(enic);
1603
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001604 for (i = 0; i < enic->wq_count; i++)
1605 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1606 for (i = 0; i < enic->rq_count; i++)
1607 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1608 for (i = 0; i < enic->cq_count; i++)
1609 vnic_cq_clean(&enic->cq[i]);
1610 for (i = 0; i < enic->intr_count; i++)
1611 vnic_intr_clean(&enic->intr[i]);
1612
1613 return 0;
1614}
1615
1616static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1617{
1618 struct enic *enic = netdev_priv(netdev);
1619 int running = netif_running(netdev);
1620
Scott Feldman25f0a062008-09-24 11:23:32 -07001621 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1622 return -EINVAL;
1623
Roopa Prabhu73359032012-01-18 04:24:02 +00001624 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001625 return -EOPNOTSUPP;
1626
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001627 if (running)
1628 enic_stop(netdev);
1629
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001630 netdev->mtu = new_mtu;
1631
1632 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001633 netdev_warn(netdev,
1634 "interface MTU (%d) set higher than port MTU (%d)\n",
1635 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001636
1637 if (running)
1638 enic_open(netdev);
1639
1640 return 0;
1641}
1642
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001643static void enic_change_mtu_work(struct work_struct *work)
1644{
1645 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1646 struct net_device *netdev = enic->netdev;
1647 int new_mtu = vnic_dev_mtu(enic->vdev);
1648 int err;
1649 unsigned int i;
1650
1651 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1652
1653 rtnl_lock();
1654
1655 /* Stop RQ */
1656 del_timer_sync(&enic->notify_timer);
1657
1658 for (i = 0; i < enic->rq_count; i++)
1659 napi_disable(&enic->napi[i]);
1660
1661 vnic_intr_mask(&enic->intr[0]);
1662 enic_synchronize_irqs(enic);
1663 err = vnic_rq_disable(&enic->rq[0]);
1664 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001665 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001666 netdev_err(netdev, "Unable to disable RQ.\n");
1667 return;
1668 }
1669 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1670 vnic_cq_clean(&enic->cq[0]);
1671 vnic_intr_clean(&enic->intr[0]);
1672
1673 /* Fill RQ with new_mtu-sized buffers */
1674 netdev->mtu = new_mtu;
1675 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1676 /* Need at least one buffer on ring to get going */
1677 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001678 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001679 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1680 return;
1681 }
1682
1683 /* Start RQ */
1684 vnic_rq_enable(&enic->rq[0]);
1685 napi_enable(&enic->napi[0]);
1686 vnic_intr_unmask(&enic->intr[0]);
1687 enic_notify_timer_start(enic);
1688
1689 rtnl_unlock();
1690
1691 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1692}
1693
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001694#ifdef CONFIG_NET_POLL_CONTROLLER
1695static void enic_poll_controller(struct net_device *netdev)
1696{
1697 struct enic *enic = netdev_priv(netdev);
1698 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001699 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001700
1701 switch (vnic_dev_get_intr_mode(vdev)) {
1702 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001703 for (i = 0; i < enic->rq_count; i++) {
1704 intr = enic_msix_rq_intr(enic, i);
Vasanthy Kolluri79aeec52010-12-08 13:05:45 +00001705 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1706 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001707 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001708
1709 for (i = 0; i < enic->wq_count; i++) {
1710 intr = enic_msix_wq_intr(enic, i);
1711 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1712 }
1713
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001714 break;
1715 case VNIC_DEV_INTR_MODE_MSI:
1716 enic_isr_msi(enic->pdev->irq, enic);
1717 break;
1718 case VNIC_DEV_INTR_MODE_INTX:
1719 enic_isr_legacy(enic->pdev->irq, netdev);
1720 break;
1721 default:
1722 break;
1723 }
1724}
1725#endif
1726
1727static int enic_dev_wait(struct vnic_dev *vdev,
1728 int (*start)(struct vnic_dev *, int),
1729 int (*finished)(struct vnic_dev *, int *),
1730 int arg)
1731{
1732 unsigned long time;
1733 int done;
1734 int err;
1735
1736 BUG_ON(in_interrupt());
1737
1738 err = start(vdev, arg);
1739 if (err)
1740 return err;
1741
1742 /* Wait for func to complete...2 seconds max
1743 */
1744
1745 time = jiffies + (HZ * 2);
1746 do {
1747
1748 err = finished(vdev, &done);
1749 if (err)
1750 return err;
1751
1752 if (done)
1753 return 0;
1754
1755 schedule_timeout_uninterruptible(HZ / 10);
1756
1757 } while (time_after(time, jiffies));
1758
1759 return -ETIMEDOUT;
1760}
1761
1762static int enic_dev_open(struct enic *enic)
1763{
1764 int err;
1765
1766 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1767 vnic_dev_open_done, 0);
1768 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001769 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1770 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001771
1772 return err;
1773}
1774
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001775static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001776{
1777 int err;
1778
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001779 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1780 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001781 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001782 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1783 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001784
1785 return err;
1786}
1787
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001788static int enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001789{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001790 dma_addr_t rss_key_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001791 union vnic_rss_key *rss_key_buf_va = NULL;
1792 union vnic_rss_key rss_key = {
1793 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1794 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1795 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1796 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1797 };
1798 int err;
1799
1800 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1801 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1802 if (!rss_key_buf_va)
1803 return -ENOMEM;
1804
1805 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1806
Tony Camuso8e091342014-06-23 16:08:03 +05301807 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001808 err = enic_set_rss_key(enic,
1809 rss_key_buf_pa,
1810 sizeof(union vnic_rss_key));
Tony Camuso8e091342014-06-23 16:08:03 +05301811 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001812
1813 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1814 rss_key_buf_va, rss_key_buf_pa);
1815
1816 return err;
1817}
1818
1819static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1820{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001821 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001822 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1823 unsigned int i;
1824 int err;
1825
1826 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1827 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1828 if (!rss_cpu_buf_va)
1829 return -ENOMEM;
1830
1831 for (i = 0; i < (1 << rss_hash_bits); i++)
1832 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1833
Tony Camuso8e091342014-06-23 16:08:03 +05301834 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001835 err = enic_set_rss_cpu(enic,
1836 rss_cpu_buf_pa,
1837 sizeof(union vnic_rss_cpu));
Tony Camuso8e091342014-06-23 16:08:03 +05301838 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001839
1840 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1841 rss_cpu_buf_va, rss_cpu_buf_pa);
1842
1843 return err;
1844}
1845
1846static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1847 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1848{
Scott Feldman68f71702009-02-09 23:24:24 -08001849 const u8 tso_ipid_split_en = 0;
1850 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001851 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001852
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001853 /* Enable VLAN tag stripping.
1854 */
Scott Feldman68f71702009-02-09 23:24:24 -08001855
Tony Camuso8e091342014-06-23 16:08:03 +05301856 spin_lock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001857 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001858 rss_default_cpu, rss_hash_type,
1859 rss_hash_bits, rss_base_cpu,
1860 rss_enable, tso_ipid_split_en,
1861 ig_vlan_strip_en);
Tony Camuso8e091342014-06-23 16:08:03 +05301862 spin_unlock_bh(&enic->devcmd_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001863
1864 return err;
1865}
1866
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001867static int enic_set_rss_nic_cfg(struct enic *enic)
1868{
1869 struct device *dev = enic_get_dev(enic);
1870 const u8 rss_default_cpu = 0;
1871 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1872 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1873 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1874 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1875 const u8 rss_hash_bits = 7;
1876 const u8 rss_base_cpu = 0;
1877 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1878
1879 if (rss_enable) {
1880 if (!enic_set_rsskey(enic)) {
1881 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1882 rss_enable = 0;
1883 dev_warn(dev, "RSS disabled, "
1884 "Failed to set RSS cpu indirection table.");
1885 }
1886 } else {
1887 rss_enable = 0;
1888 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1889 }
1890 }
1891
1892 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1893 rss_hash_bits, rss_base_cpu, rss_enable);
1894}
1895
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001896static void enic_reset(struct work_struct *work)
1897{
1898 struct enic *enic = container_of(work, struct enic, reset);
1899
1900 if (!netif_running(enic->netdev))
1901 return;
1902
1903 rtnl_lock();
1904
Neel Patel0b038562013-08-16 15:47:40 -07001905 spin_lock(&enic->enic_api_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001906 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001907 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001908 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001909 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001910 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001911 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001912 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001913 enic_open(enic->netdev);
Neel Patel0b038562013-08-16 15:47:40 -07001914 spin_unlock(&enic->enic_api_lock);
Neel Pateld765bb42013-08-16 15:47:41 -07001915 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001916
1917 rtnl_unlock();
1918}
1919
1920static int enic_set_intr_mode(struct enic *enic)
1921{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001922 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00001923 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001924 unsigned int i;
1925
1926 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001927 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001928 *
1929 * Try MSI-X first
1930 *
1931 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1932 * (the second to last INTR is used for WQ/RQ errors)
1933 * (the last INTR is used for notifications)
1934 */
1935
1936 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1937 for (i = 0; i < n + m + 2; i++)
1938 enic->msix_entry[i].entry = i;
1939
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001940 /* Use multiple RQs if RSS is enabled
1941 */
1942
1943 if (ENIC_SETTING(enic, RSS) &&
1944 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001945 enic->rq_count >= n &&
1946 enic->wq_count >= m &&
1947 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001948 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001949
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01001950 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1951 n + m + 2, n + m + 2) > 0) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001952
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001953 enic->rq_count = n;
1954 enic->wq_count = m;
1955 enic->cq_count = n + m;
1956 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001957
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001958 vnic_dev_set_intr_mode(enic->vdev,
1959 VNIC_DEV_INTR_MODE_MSIX);
1960
1961 return 0;
1962 }
1963 }
1964
1965 if (enic->config.intr_mode < 1 &&
1966 enic->rq_count >= 1 &&
1967 enic->wq_count >= m &&
1968 enic->cq_count >= 1 + m &&
1969 enic->intr_count >= 1 + m + 2) {
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01001970 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1971 1 + m + 2, 1 + m + 2) > 0) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001972
1973 enic->rq_count = 1;
1974 enic->wq_count = m;
1975 enic->cq_count = 1 + m;
1976 enic->intr_count = 1 + m + 2;
1977
1978 vnic_dev_set_intr_mode(enic->vdev,
1979 VNIC_DEV_INTR_MODE_MSIX);
1980
1981 return 0;
1982 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001983 }
1984
1985 /* Next try MSI
1986 *
1987 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1988 */
1989
1990 if (enic->config.intr_mode < 2 &&
1991 enic->rq_count >= 1 &&
1992 enic->wq_count >= 1 &&
1993 enic->cq_count >= 2 &&
1994 enic->intr_count >= 1 &&
1995 !pci_enable_msi(enic->pdev)) {
1996
1997 enic->rq_count = 1;
1998 enic->wq_count = 1;
1999 enic->cq_count = 2;
2000 enic->intr_count = 1;
2001
2002 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2003
2004 return 0;
2005 }
2006
2007 /* Next try INTx
2008 *
2009 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2010 * (the first INTR is used for WQ/RQ)
2011 * (the second INTR is used for WQ/RQ errors)
2012 * (the last INTR is used for notifications)
2013 */
2014
2015 if (enic->config.intr_mode < 3 &&
2016 enic->rq_count >= 1 &&
2017 enic->wq_count >= 1 &&
2018 enic->cq_count >= 2 &&
2019 enic->intr_count >= 3) {
2020
2021 enic->rq_count = 1;
2022 enic->wq_count = 1;
2023 enic->cq_count = 2;
2024 enic->intr_count = 3;
2025
2026 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2027
2028 return 0;
2029 }
2030
2031 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2032
2033 return -EINVAL;
2034}
2035
2036static void enic_clear_intr_mode(struct enic *enic)
2037{
2038 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2039 case VNIC_DEV_INTR_MODE_MSIX:
2040 pci_disable_msix(enic->pdev);
2041 break;
2042 case VNIC_DEV_INTR_MODE_MSI:
2043 pci_disable_msi(enic->pdev);
2044 break;
2045 default:
2046 break;
2047 }
2048
2049 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2050}
2051
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002052static const struct net_device_ops enic_netdev_dynamic_ops = {
2053 .ndo_open = enic_open,
2054 .ndo_stop = enic_stop,
2055 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002056 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002057 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002058 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002059 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2060 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002061 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2062 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2063 .ndo_tx_timeout = enic_tx_timeout,
2064 .ndo_set_vf_port = enic_set_vf_port,
2065 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00002066 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002067#ifdef CONFIG_NET_POLL_CONTROLLER
2068 .ndo_poll_controller = enic_poll_controller,
2069#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302070#ifdef CONFIG_RFS_ACCEL
2071 .ndo_rx_flow_steer = enic_rx_flow_steer,
2072#endif
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002073};
2074
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002075static const struct net_device_ops enic_netdev_ops = {
2076 .ndo_open = enic_open,
2077 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08002078 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00002079 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002080 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002081 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00002082 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002083 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002084 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2085 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2086 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00002087 .ndo_set_vf_port = enic_set_vf_port,
2088 .ndo_get_vf_port = enic_get_vf_port,
2089 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002090#ifdef CONFIG_NET_POLL_CONTROLLER
2091 .ndo_poll_controller = enic_poll_controller,
2092#endif
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302093#ifdef CONFIG_RFS_ACCEL
2094 .ndo_rx_flow_steer = enic_rx_flow_steer,
2095#endif
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08002096};
2097
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002098static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002099{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002100 unsigned int i;
2101
2102 for (i = 0; i < enic->rq_count; i++)
2103 netif_napi_del(&enic->napi[i]);
2104
Scott Feldman6fdfa972009-09-03 17:02:45 +00002105 enic_free_vnic_resources(enic);
2106 enic_clear_intr_mode(enic);
2107}
2108
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00002109static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00002110{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002111 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002112 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002113 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00002114 int err;
2115
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00002116 /* Get interrupt coalesce timer info */
2117 err = enic_dev_intr_coal_timer_info(enic);
2118 if (err) {
2119 dev_warn(dev, "Using default conversion factor for "
2120 "interrupt coalesce timer\n");
2121 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2122 }
2123
Scott Feldman6fdfa972009-09-03 17:02:45 +00002124 /* Get vNIC configuration
2125 */
2126
2127 err = enic_get_vnic_config(enic);
2128 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002129 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002130 return err;
2131 }
2132
2133 /* Get available resource counts
2134 */
2135
2136 enic_get_res_counts(enic);
2137
2138 /* Set interrupt mode based on resource counts and system
2139 * capabilities
2140 */
2141
2142 err = enic_set_intr_mode(enic);
2143 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002144 dev_err(dev, "Failed to set intr mode based on resource "
2145 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002146 return err;
2147 }
2148
2149 /* Allocate and configure vNIC resources
2150 */
2151
2152 err = enic_alloc_vnic_resources(enic);
2153 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002154 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002155 goto err_out_free_vnic_resources;
2156 }
2157
2158 enic_init_vnic_resources(enic);
2159
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002160 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002161 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002162 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002163 goto err_out_free_vnic_resources;
2164 }
2165
2166 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2167 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002168 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002169 break;
2170 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002171 for (i = 0; i < enic->rq_count; i++)
2172 netif_napi_add(netdev, &enic->napi[i],
2173 enic_poll_msix, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002174 break;
2175 }
2176
2177 return 0;
2178
2179err_out_free_vnic_resources:
2180 enic_clear_intr_mode(enic);
2181 enic_free_vnic_resources(enic);
2182
2183 return err;
2184}
2185
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002186static void enic_iounmap(struct enic *enic)
2187{
2188 unsigned int i;
2189
2190 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2191 if (enic->bar[i].vaddr)
2192 iounmap(enic->bar[i].vaddr);
2193}
2194
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002195static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002196{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002197 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002198 struct net_device *netdev;
2199 struct enic *enic;
2200 int using_dac = 0;
2201 unsigned int i;
2202 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002203#ifdef CONFIG_PCI_IOV
2204 int pos = 0;
2205#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002206 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002207
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002208 /* Allocate net device structure and initialize. Private
2209 * instance data is initialized to zero.
2210 */
2211
govindarajulu.v822473b2013-09-04 11:17:14 +05302212 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2213 ENIC_RQ_MAX, ENIC_WQ_MAX);
Joe Perches41de8d42012-01-29 13:47:52 +00002214 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002215 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002216
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002217 pci_set_drvdata(pdev, netdev);
2218
2219 SET_NETDEV_DEV(netdev, &pdev->dev);
2220
2221 enic = netdev_priv(netdev);
2222 enic->netdev = netdev;
2223 enic->pdev = pdev;
2224
2225 /* Setup PCI resources
2226 */
2227
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002228 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002229 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002230 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002231 goto err_out_free_netdev;
2232 }
2233
2234 err = pci_request_regions(pdev, DRV_NAME);
2235 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002236 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002237 goto err_out_disable_device;
2238 }
2239
2240 pci_set_master(pdev);
2241
2242 /* Query PCI controller on system for DMA addressing
govindarajulu.v624dbf52013-09-04 11:17:16 +05302243 * limitation for the device. Try 64-bit first, and
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002244 * fail to 32-bit.
2245 */
2246
govindarajulu.v624dbf52013-09-04 11:17:16 +05302247 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002248 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002249 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002250 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002251 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002252 goto err_out_release_regions;
2253 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002254 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002255 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002256 dev_err(dev, "Unable to obtain %u-bit DMA "
2257 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002258 goto err_out_release_regions;
2259 }
2260 } else {
govindarajulu.v624dbf52013-09-04 11:17:16 +05302261 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002262 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002263 dev_err(dev, "Unable to obtain %u-bit DMA "
govindarajulu.v624dbf52013-09-04 11:17:16 +05302264 "for consistent allocations, aborting\n", 64);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002265 goto err_out_release_regions;
2266 }
2267 using_dac = 1;
2268 }
2269
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002270 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002271 */
2272
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002273 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2274 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2275 continue;
2276 enic->bar[i].len = pci_resource_len(pdev, i);
2277 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2278 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002279 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002280 err = -ENODEV;
2281 goto err_out_iounmap;
2282 }
2283 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002284 }
2285
2286 /* Register vNIC device
2287 */
2288
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002289 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2290 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002291 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002292 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002293 err = -ENODEV;
2294 goto err_out_iounmap;
2295 }
2296
Roopa Prabhu8749b422011-09-22 03:44:33 +00002297#ifdef CONFIG_PCI_IOV
2298 /* Get number of subvnics */
2299 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2300 if (pos) {
2301 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002302 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002303 if (enic->num_vfs) {
2304 err = pci_enable_sriov(pdev, enic->num_vfs);
2305 if (err) {
2306 dev_err(dev, "SRIOV enable failed, aborting."
2307 " pci_enable_sriov() returned %d\n",
2308 err);
2309 goto err_out_vnic_unregister;
2310 }
2311 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002312 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002313 }
2314 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002315#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002316
Roopa Prabhu3f192792011-09-22 03:44:43 +00002317 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002318 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002319 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002320 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002321 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002322 }
2323
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002324 /* Issue device open to get device in known state
2325 */
2326
2327 err = enic_dev_open(enic);
2328 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002329 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002330 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002331 }
2332
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002333 /* Setup devcmd lock
2334 */
2335
2336 spin_lock_init(&enic->devcmd_lock);
Neel Patel0b038562013-08-16 15:47:40 -07002337 spin_lock_init(&enic->enic_api_lock);
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002338
2339 /*
2340 * Set ingress vlan rewrite mode before vnic initialization
2341 */
2342
2343 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2344 if (err) {
2345 dev_err(dev,
2346 "Failed to set ingress vlan rewrite mode, aborting.\n");
2347 goto err_out_dev_close;
2348 }
2349
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002350 /* Issue device init to initialize the vnic-to-switch link.
2351 * We'll start with carrier off and wait for link UP
2352 * notification later to turn on carrier. We don't need
2353 * to wait here for the vnic-to-switch link initialization
2354 * to complete; link UP notification is the indication that
2355 * the process is complete.
2356 */
2357
2358 netif_carrier_off(netdev);
2359
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002360 /* Do not call dev_init for a dynamic vnic.
2361 * For a dynamic vnic, init_prov_info will be
2362 * called later by an upper layer.
2363 */
2364
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002365 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002366 err = vnic_dev_init(enic->vdev, 0);
2367 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002368 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002369 goto err_out_dev_close;
2370 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002371 }
2372
Scott Feldman6fdfa972009-09-03 17:02:45 +00002373 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002374 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002375 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002376 goto err_out_dev_close;
2377 }
2378
govindarajulu.v822473b2013-09-04 11:17:14 +05302379 netif_set_real_num_tx_queues(netdev, enic->wq_count);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302380 netif_set_real_num_rx_queues(netdev, enic->rq_count);
govindarajulu.v822473b2013-09-04 11:17:14 +05302381
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002382 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002383 */
2384
2385 init_timer(&enic->notify_timer);
2386 enic->notify_timer.function = enic_notify_timer;
2387 enic->notify_timer.data = (unsigned long)enic;
2388
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302389 enic_set_rx_coal_setting(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002390 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002391 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002392
2393 for (i = 0; i < enic->wq_count; i++)
2394 spin_lock_init(&enic->wq_lock[i]);
2395
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002396 /* Register net device
2397 */
2398
2399 enic->port_mtu = enic->config.mtu;
2400 (void)enic_change_mtu(netdev, enic->port_mtu);
2401
2402 err = enic_set_mac_addr(netdev, enic->mac_addr);
2403 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002404 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002405 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002406 }
2407
Scott Feldman7c844592009-12-23 13:27:54 +00002408 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
Sujith Sankar7c2ce6e2014-05-20 03:14:05 +05302409 /* rx coalesce time already got initialized. This gets used
2410 * if adaptive coal is turned off
2411 */
Scott Feldman7c844592009-12-23 13:27:54 +00002412 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2413
Roopa Prabhu73359032012-01-18 04:24:02 +00002414 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002415 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2416 else
2417 netdev->netdev_ops = &enic_netdev_ops;
2418
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002419 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002420 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002421
Patrick McHardyf6469682013-04-19 02:04:27 +00002422 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002423 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002424 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002425 enic->loop_enable = 1;
2426 enic->loop_tag = enic->config.loop_tag;
2427 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2428 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002429 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002430 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002431 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002432 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002433 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302434 if (ENIC_SETTING(enic, RSS))
2435 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002436 if (ENIC_SETTING(enic, RXCSUM))
2437 netdev->hw_features |= NETIF_F_RXCSUM;
2438
2439 netdev->features |= netdev->hw_features;
2440
Govindarajulu Varadarajana145df22014-06-23 16:08:02 +05302441#ifdef CONFIG_RFS_ACCEL
2442 netdev->hw_features |= NETIF_F_NTUPLE;
2443#endif
2444
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002445 if (using_dac)
2446 netdev->features |= NETIF_F_HIGHDMA;
2447
Jiri Pirko01789342011-08-16 06:29:00 +00002448 netdev->priv_flags |= IFF_UNICAST_FLT;
2449
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002450 err = register_netdev(netdev);
2451 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002452 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002453 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002454 }
2455
2456 return 0;
2457
Scott Feldman6fdfa972009-09-03 17:02:45 +00002458err_out_dev_deinit:
2459 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002460err_out_dev_close:
2461 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002462err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002463 kfree(enic->pp);
2464err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002465#ifdef CONFIG_PCI_IOV
2466 if (enic_sriov_enabled(enic)) {
2467 pci_disable_sriov(pdev);
2468 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2469 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002470err_out_vnic_unregister:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002471#endif
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002472 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002473err_out_iounmap:
2474 enic_iounmap(enic);
2475err_out_release_regions:
2476 pci_release_regions(pdev);
2477err_out_disable_device:
2478 pci_disable_device(pdev);
2479err_out_free_netdev:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002480 free_netdev(netdev);
2481
2482 return err;
2483}
2484
Bill Pemberton854de922012-12-03 09:23:05 -05002485static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002486{
2487 struct net_device *netdev = pci_get_drvdata(pdev);
2488
2489 if (netdev) {
2490 struct enic *enic = netdev_priv(netdev);
2491
Tejun Heo23f333a2010-12-12 16:45:14 +01002492 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002493 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002494 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002495 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002496 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002497#ifdef CONFIG_PCI_IOV
2498 if (enic_sriov_enabled(enic)) {
2499 pci_disable_sriov(pdev);
2500 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2501 }
2502#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002503 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002504 vnic_dev_unregister(enic->vdev);
2505 enic_iounmap(enic);
2506 pci_release_regions(pdev);
2507 pci_disable_device(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002508 free_netdev(netdev);
2509 }
2510}
2511
2512static struct pci_driver enic_driver = {
2513 .name = DRV_NAME,
2514 .id_table = enic_id_table,
2515 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002516 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002517};
2518
2519static int __init enic_init_module(void)
2520{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002521 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002522
2523 return pci_register_driver(&enic_driver);
2524}
2525
2526static void __exit enic_cleanup_module(void)
2527{
2528 pci_unregister_driver(&enic_driver);
2529}
2530
2531module_init(enic_init_module);
2532module_exit(enic_cleanup_module);