blob: dcd58f23834ac3b67f9175f65a466987abce50ef [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070041
42#include "cq_enet_desc.h"
43#include "vnic_dev.h"
44#include "vnic_intr.h"
45#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070046#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070047#include "enic_res.h"
48#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000049#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000050#include "enic_pp.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070051
52#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000053#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
54#define MAX_TSO (1 << 16)
55#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
56
57#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070058#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000059#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070060
61/* Supported devices */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000062static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000063 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070064 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000065 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070066 { 0, } /* end of table */
67};
68
69MODULE_DESCRIPTION(DRV_DESCRIPTION);
70MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
71MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION);
73MODULE_DEVICE_TABLE(pci, enic_id_table);
74
Roopa Prabhu3f192792011-09-22 03:44:43 +000075int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -070076{
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
78}
79
Roopa Prabhu8749b422011-09-22 03:44:33 +000080int enic_sriov_enabled(struct enic *enic)
81{
82 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
83}
84
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000085static int enic_is_sriov_vf(struct enic *enic)
86{
87 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
88}
89
Roopa Prabhu889d13f2011-09-22 03:44:38 +000090int enic_is_valid_vf(struct enic *enic, int vf)
91{
92#ifdef CONFIG_PCI_IOV
93 return vf >= 0 && vf < enic->num_vfs;
94#else
95 return 0;
96#endif
97}
98
Scott Feldman01f2e4e2008-09-15 09:17:11 -070099static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
100{
101 struct enic *enic = vnic_dev_priv(wq->vdev);
102
103 if (buf->sop)
104 pci_unmap_single(enic->pdev, buf->dma_addr,
105 buf->len, PCI_DMA_TODEVICE);
106 else
107 pci_unmap_page(enic->pdev, buf->dma_addr,
108 buf->len, PCI_DMA_TODEVICE);
109
110 if (buf->os_buf)
111 dev_kfree_skb_any(buf->os_buf);
112}
113
114static void enic_wq_free_buf(struct vnic_wq *wq,
115 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
116{
117 enic_free_wq_buf(wq, buf);
118}
119
120static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
121 u8 type, u16 q_number, u16 completed_index, void *opaque)
122{
123 struct enic *enic = vnic_dev_priv(vdev);
124
125 spin_lock(&enic->wq_lock[q_number]);
126
127 vnic_wq_service(&enic->wq[q_number], cq_desc,
128 completed_index, enic_wq_free_buf,
129 opaque);
130
govindarajulu.v822473b2013-09-04 11:17:14 +0530131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
govindarajulu.v822473b2013-09-04 11:17:14 +0530134 netif_wake_subqueue(enic->netdev, q_number);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700135
136 spin_unlock(&enic->wq_lock[q_number]);
137
138 return 0;
139}
140
141static void enic_log_q_error(struct enic *enic)
142{
143 unsigned int i;
144 u32 error_status;
145
146 for (i = 0; i < enic->wq_count; i++) {
147 error_status = vnic_wq_error_status(&enic->wq[i]);
148 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000149 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
150 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700151 }
152
153 for (i = 0; i < enic->rq_count; i++) {
154 error_status = vnic_rq_error_status(&enic->rq[i]);
155 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000156 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
157 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700158 }
159}
160
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000161static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700162{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000163 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700164
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000165 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000166 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
167 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000168 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700169 }
170}
171
172static void enic_mtu_check(struct enic *enic)
173{
174 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000175 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700176
Scott Feldman491598a2009-09-03 17:02:40 +0000177 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000178 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000179 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000180 mtu = max_t(int, ENIC_MIN_MTU,
181 min_t(int, ENIC_MAX_MTU, mtu));
182 if (mtu != netdev->mtu)
183 schedule_work(&enic->change_mtu_work);
184 } else {
185 if (mtu < netdev->mtu)
186 netdev_warn(netdev,
187 "interface MTU (%d) set higher "
188 "than switch port MTU (%d)\n",
189 netdev->mtu, mtu);
190 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700191 }
192}
193
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000194static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700195{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000196 int link_status = vnic_dev_link_status(enic->vdev);
197 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700198
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000199 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000200 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000201 netif_carrier_on(enic->netdev);
202 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000203 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000204 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700205 }
206}
207
208static void enic_notify_check(struct enic *enic)
209{
210 enic_msglvl_check(enic);
211 enic_mtu_check(enic);
212 enic_link_check(enic);
213}
214
215#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
216
217static irqreturn_t enic_isr_legacy(int irq, void *data)
218{
219 struct net_device *netdev = data;
220 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000221 unsigned int io_intr = enic_legacy_io_intr();
222 unsigned int err_intr = enic_legacy_err_intr();
223 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700224 u32 pba;
225
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000226 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700227
228 pba = vnic_intr_legacy_pba(enic->legacy_pba);
229 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000230 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700231 return IRQ_NONE; /* not our interrupt */
232 }
233
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000234 if (ENIC_TEST_INTR(pba, notify_intr)) {
235 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700236 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800237 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700238
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000239 if (ENIC_TEST_INTR(pba, err_intr)) {
240 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700241 enic_log_q_error(enic);
242 /* schedule recovery from WQ/RQ error */
243 schedule_work(&enic->reset);
244 return IRQ_HANDLED;
245 }
246
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000247 if (ENIC_TEST_INTR(pba, io_intr)) {
248 if (napi_schedule_prep(&enic->napi[0]))
249 __napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700250 } else {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000251 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700252 }
253
254 return IRQ_HANDLED;
255}
256
257static irqreturn_t enic_isr_msi(int irq, void *data)
258{
259 struct enic *enic = data;
260
261 /* With MSI, there is no sharing of interrupts, so this is
262 * our interrupt and there is no need to ack it. The device
263 * is not providing per-vector masking, so the OS will not
264 * write to PCI config space to mask/unmask the interrupt.
265 * We're using mask_on_assertion for MSI, so the device
266 * automatically masks the interrupt when the interrupt is
267 * generated. Later, when exiting polling, the interrupt
268 * will be unmasked (see enic_poll).
269 *
270 * Also, the device uses the same PCIe Traffic Class (TC)
271 * for Memory Write data and MSI, so there are no ordering
272 * issues; the MSI will always arrive at the Root Complex
273 * _after_ corresponding Memory Writes (i.e. descriptor
274 * writes).
275 */
276
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000277 napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700278
279 return IRQ_HANDLED;
280}
281
282static irqreturn_t enic_isr_msix_rq(int irq, void *data)
283{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000284 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700285
286 /* schedule NAPI polling for RQ cleanup */
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000287 napi_schedule(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700288
289 return IRQ_HANDLED;
290}
291
292static irqreturn_t enic_isr_msix_wq(int irq, void *data)
293{
294 struct enic *enic = data;
govindarajulu.v822473b2013-09-04 11:17:14 +0530295 unsigned int cq;
296 unsigned int intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700297 unsigned int wq_work_to_do = -1; /* no limit */
298 unsigned int wq_work_done;
govindarajulu.v822473b2013-09-04 11:17:14 +0530299 unsigned int wq_irq;
300
301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
302 cq = enic_cq_wq(enic, wq_irq);
303 intr = enic_msix_wq_intr(enic, wq_irq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700304
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000305 wq_work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700306 wq_work_to_do, enic_wq_service, NULL);
307
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000308 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700309 wq_work_done,
310 1 /* unmask intr */,
311 1 /* reset intr timer */);
312
313 return IRQ_HANDLED;
314}
315
316static irqreturn_t enic_isr_msix_err(int irq, void *data)
317{
318 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000319 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700320
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000321 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800322
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700323 enic_log_q_error(enic);
324
325 /* schedule recovery from WQ/RQ error */
326 schedule_work(&enic->reset);
327
328 return IRQ_HANDLED;
329}
330
331static irqreturn_t enic_isr_msix_notify(int irq, void *data)
332{
333 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000334 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700335
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000336 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700337 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700338
339 return IRQ_HANDLED;
340}
341
342static inline void enic_queue_wq_skb_cont(struct enic *enic,
343 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000344 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700345{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000346 const skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700347
348 /* Queue additional data fragments */
349 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000350 len_left -= skb_frag_size(frag);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700351 enic_queue_wq_desc_cont(wq, skb,
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000352 skb_frag_dma_map(&enic->pdev->dev,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000353 frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100354 DMA_TO_DEVICE),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000355 skb_frag_size(frag),
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000356 (len_left == 0), /* EOP? */
357 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700358 }
359}
360
361static inline void enic_queue_wq_skb_vlan(struct enic *enic,
362 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000363 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700364{
365 unsigned int head_len = skb_headlen(skb);
366 unsigned int len_left = skb->len - head_len;
367 int eop = (len_left == 0);
368
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000369 /* Queue the main skb fragment. The fragments are no larger
370 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
371 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
372 * per fragment is queued.
373 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700374 enic_queue_wq_desc(wq, skb,
375 pci_map_single(enic->pdev, skb->data,
376 head_len, PCI_DMA_TODEVICE),
377 head_len,
378 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000379 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700380
381 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000382 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700383}
384
385static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
386 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000387 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700388{
389 unsigned int head_len = skb_headlen(skb);
390 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000391 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700392 unsigned int csum_offset = hdr_len + skb->csum_offset;
393 int eop = (len_left == 0);
394
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000395 /* Queue the main skb fragment. The fragments are no larger
396 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
397 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
398 * per fragment is queued.
399 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700400 enic_queue_wq_desc_csum_l4(wq, skb,
401 pci_map_single(enic->pdev, skb->data,
402 head_len, PCI_DMA_TODEVICE),
403 head_len,
404 csum_offset,
405 hdr_len,
406 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000407 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700408
409 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000410 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700411}
412
413static inline void enic_queue_wq_skb_tso(struct enic *enic,
414 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000415 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700416{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000417 unsigned int frag_len_left = skb_headlen(skb);
418 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700419 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
420 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000421 unsigned int len;
422 dma_addr_t dma_addr;
423 unsigned int offset = 0;
424 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700425
426 /* Preload TCP csum field with IP pseudo hdr calculated
427 * with IP length set to zero. HW will later add in length
428 * to each TCP segment resulting from the TSO.
429 */
430
Harvey Harrison09640e62009-02-01 00:45:17 -0800431 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700432 ip_hdr(skb)->check = 0;
433 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
434 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800435 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700436 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
437 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
438 }
439
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000440 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
441 * for the main skb fragment
442 */
443 while (frag_len_left) {
444 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
445 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
446 len, PCI_DMA_TODEVICE);
447 enic_queue_wq_desc_tso(wq, skb,
448 dma_addr,
449 len,
450 mss, hdr_len,
451 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000452 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000453 frag_len_left -= len;
454 offset += len;
455 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700456
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000457 if (eop)
458 return;
459
460 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
461 * for additional data fragments
462 */
463 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000464 len_left -= skb_frag_size(frag);
465 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000466 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000467
468 while (frag_len_left) {
469 len = min(frag_len_left,
470 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000471 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
472 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100473 DMA_TO_DEVICE);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000474 enic_queue_wq_desc_cont(wq, skb,
475 dma_addr,
476 len,
477 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000478 (len == frag_len_left), /* EOP? */
479 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000480 frag_len_left -= len;
481 offset += len;
482 }
483 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700484}
485
486static inline void enic_queue_wq_skb(struct enic *enic,
487 struct vnic_wq *wq, struct sk_buff *skb)
488{
489 unsigned int mss = skb_shinfo(skb)->gso_size;
490 unsigned int vlan_tag = 0;
491 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000492 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700493
Jesse Grosseab6d182010-10-20 13:56:03 +0000494 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700495 /* VLAN tag from trunking driver */
496 vlan_tag_insert = 1;
497 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000498 } else if (enic->loop_enable) {
499 vlan_tag = enic->loop_tag;
500 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700501 }
502
503 if (mss)
504 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000505 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700506 else if (skb->ip_summed == CHECKSUM_PARTIAL)
507 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000508 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700509 else
510 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000511 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700512}
513
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800514/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000515static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000516 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700517{
518 struct enic *enic = netdev_priv(netdev);
govindarajulu.v822473b2013-09-04 11:17:14 +0530519 struct vnic_wq *wq;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700520 unsigned long flags;
govindarajulu.v822473b2013-09-04 11:17:14 +0530521 unsigned int txq_map;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700522
523 if (skb->len <= 0) {
524 dev_kfree_skb(skb);
525 return NETDEV_TX_OK;
526 }
527
govindarajulu.v822473b2013-09-04 11:17:14 +0530528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
529 wq = &enic->wq[txq_map];
530
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
532 * which is very likely. In the off chance it's going to take
533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
534 */
535
536 if (skb_shinfo(skb)->gso_size == 0 &&
537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
538 skb_linearize(skb)) {
539 dev_kfree_skb(skb);
540 return NETDEV_TX_OK;
541 }
542
govindarajulu.v822473b2013-09-04 11:17:14 +0530543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700544
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000545 if (vnic_wq_desc_avail(wq) <
546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
govindarajulu.v822473b2013-09-04 11:17:14 +0530547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700548 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
govindarajulu.v822473b2013-09-04 11:17:14 +0530550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700551 return NETDEV_TX_BUSY;
552 }
553
554 enic_queue_wq_skb(enic, wq, skb);
555
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
govindarajulu.v822473b2013-09-04 11:17:14 +0530557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700558
govindarajulu.v822473b2013-09-04 11:17:14 +0530559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700560
561 return NETDEV_TX_OK;
562}
563
564/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000565static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
566 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700567{
568 struct enic *enic = netdev_priv(netdev);
569 struct vnic_stats *stats;
570
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000571 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700572
Scott Feldman25f0a062008-09-24 11:23:32 -0700573 net_stats->tx_packets = stats->tx.tx_frames_ok;
574 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
575 net_stats->tx_errors = stats->tx.tx_errors;
576 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700577
Scott Feldman25f0a062008-09-24 11:23:32 -0700578 net_stats->rx_packets = stats->rx.rx_frames_ok;
579 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
580 net_stats->rx_errors = stats->rx.rx_errors;
581 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000582 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800583 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000584 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700585
Scott Feldman25f0a062008-09-24 11:23:32 -0700586 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700587}
588
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000589void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700590{
591 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000592 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000593 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700594}
595
596static int enic_set_mac_addr(struct net_device *netdev, char *addr)
597{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700598 struct enic *enic = netdev_priv(netdev);
599
Roopa Prabhu73359032012-01-18 04:24:02 +0000600 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700601 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
602 return -EADDRNOTAVAIL;
603 } else {
604 if (!is_valid_ether_addr(addr))
605 return -EADDRNOTAVAIL;
606 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700607
608 memcpy(netdev->dev_addr, addr, netdev->addr_len);
609
610 return 0;
611}
612
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700613static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
614{
615 struct enic *enic = netdev_priv(netdev);
616 struct sockaddr *saddr = p;
617 char *addr = saddr->sa_data;
618 int err;
619
620 if (netif_running(enic->netdev)) {
621 err = enic_dev_del_station_addr(enic);
622 if (err)
623 return err;
624 }
625
626 err = enic_set_mac_addr(netdev, addr);
627 if (err)
628 return err;
629
630 if (netif_running(enic->netdev)) {
631 err = enic_dev_add_station_addr(enic);
632 if (err)
633 return err;
634 }
635
636 return err;
637}
638
639static int enic_set_mac_address(struct net_device *netdev, void *p)
640{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000641 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000642 char *addr = saddr->sa_data;
643 struct enic *enic = netdev_priv(netdev);
644 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000645
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000646 err = enic_dev_del_station_addr(enic);
647 if (err)
648 return err;
649
650 err = enic_set_mac_addr(netdev, addr);
651 if (err)
652 return err;
653
654 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700655}
656
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000657static void enic_update_multicast_addr_list(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700658{
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000659 struct net_device *netdev = enic->netdev;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000660 struct netdev_hw_addr *ha;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000661 unsigned int mc_count = netdev_mc_count(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700663 unsigned int i, j;
664
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
666 netdev_warn(netdev, "Registering only %d out of %d "
667 "multicast addresses\n",
668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
Scott Feldman9959a182009-12-23 13:27:43 +0000670 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700671
672 /* Is there an easier way? Trying to minimize to
673 * calls to add/del multicast addrs. We keep the
674 * addrs from the last call in enic->mc_addr and
675 * look for changes to add/del.
676 */
677
Jiri Pirko48e2f182010-02-22 09:22:26 +0000678 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000679 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko48e2f182010-02-22 09:22:26 +0000680 if (i == mc_count)
681 break;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700683 }
684
685 for (i = 0; i < enic->mc_count; i++) {
686 for (j = 0; j < mc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700688 break;
689 if (j == mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000690 enic_dev_del_addr(enic, enic->mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700691 }
692
693 for (i = 0; i < mc_count; i++) {
694 for (j = 0; j < enic->mc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700696 break;
697 if (j == enic->mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000698 enic_dev_add_addr(enic, mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700699 }
700
701 /* Save the list to compare against next time
702 */
703
704 for (i = 0; i < mc_count; i++)
705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
706
707 enic->mc_count = mc_count;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700708}
709
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000710static void enic_update_unicast_addr_list(struct enic *enic)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000711{
712 struct net_device *netdev = enic->netdev;
713 struct netdev_hw_addr *ha;
714 unsigned int uc_count = netdev_uc_count(netdev);
715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
716 unsigned int i, j;
717
718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
719 netdev_warn(netdev, "Registering only %d out of %d "
720 "unicast addresses\n",
721 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
722 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
723 }
724
725 /* Is there an easier way? Trying to minimize to
726 * calls to add/del unicast addrs. We keep the
727 * addrs from the last call in enic->uc_addr and
728 * look for changes to add/del.
729 */
730
731 i = 0;
732 netdev_for_each_uc_addr(ha, netdev) {
733 if (i == uc_count)
734 break;
735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
736 }
737
738 for (i = 0; i < enic->uc_count; i++) {
739 for (j = 0; j < uc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000741 break;
742 if (j == uc_count)
743 enic_dev_del_addr(enic, enic->uc_addr[i]);
744 }
745
746 for (i = 0; i < uc_count; i++) {
747 for (j = 0; j < enic->uc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000749 break;
750 if (j == enic->uc_count)
751 enic_dev_add_addr(enic, uc_addr[i]);
752 }
753
754 /* Save the list to compare against next time
755 */
756
757 for (i = 0; i < uc_count; i++)
758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
759
760 enic->uc_count = uc_count;
761}
762
763/* netif_tx_lock held, BHs disabled */
764static void enic_set_rx_mode(struct net_device *netdev)
765{
766 struct enic *enic = netdev_priv(netdev);
767 int directed = 1;
768 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
769 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
770 int promisc = (netdev->flags & IFF_PROMISC) ||
771 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
772 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
773 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
774 unsigned int flags = netdev->flags |
775 (allmulti ? IFF_ALLMULTI : 0) |
776 (promisc ? IFF_PROMISC : 0);
777
778 if (enic->flags != flags) {
779 enic->flags = flags;
780 enic_dev_packet_filter(enic, directed,
781 multicast, broadcast, promisc, allmulti);
782 }
783
784 if (!promisc) {
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000785 enic_update_unicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000786 if (!allmulti)
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000787 enic_update_multicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000788 }
789}
790
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700791/* netif_tx_lock held, BHs disabled */
792static void enic_tx_timeout(struct net_device *netdev)
793{
794 struct enic *enic = netdev_priv(netdev);
795 schedule_work(&enic->reset);
796}
797
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000798static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
799{
800 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000801 struct enic_port_profile *pp;
802 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000803
Roopa Prabhu3f192792011-09-22 03:44:43 +0000804 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
805 if (err)
806 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000807
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000808 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000809 if (vf == PORT_SELF_VF) {
810 memcpy(pp->vf_mac, mac, ETH_ALEN);
811 return 0;
812 } else {
813 /*
814 * For sriov vf's set the mac in hw
815 */
816 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
817 vnic_dev_set_mac_addr, mac);
818 return enic_dev_status_to_errno(err);
819 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000820 } else
821 return -EINVAL;
822}
823
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700824static int enic_set_vf_port(struct net_device *netdev, int vf,
825 struct nlattr *port[])
826{
827 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000828 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000829 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000830 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700831
Roopa Prabhu3f192792011-09-22 03:44:43 +0000832 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
833 if (err)
834 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700835
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000836 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000837 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700838
Roopa Prabhu3f192792011-09-22 03:44:43 +0000839 memcpy(&prev_pp, pp, sizeof(*enic->pp));
840 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000841
Roopa Prabhu3f192792011-09-22 03:44:43 +0000842 pp->set |= ENIC_SET_REQUEST;
843 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000844
845 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000846 pp->set |= ENIC_SET_NAME;
847 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000848 PORT_PROFILE_MAX);
849 }
850
851 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000852 pp->set |= ENIC_SET_INSTANCE;
853 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000854 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
855 }
856
857 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000858 pp->set |= ENIC_SET_HOST;
859 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000860 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
861 }
862
Roopa Prabhub4765832012-02-20 00:11:58 +0000863 if (vf == PORT_SELF_VF) {
864 /* Special case handling: mac came from IFLA_VF_MAC */
865 if (!is_zero_ether_addr(prev_pp.vf_mac))
866 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000867
Roopa Prabhub4765832012-02-20 00:11:58 +0000868 if (is_zero_ether_addr(netdev->dev_addr))
869 eth_hw_addr_random(netdev);
870 } else {
871 /* SR-IOV VF: get mac from adapter */
872 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
873 vnic_dev_get_mac_addr, pp->mac_addr);
874 if (err) {
875 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
876 memcpy(pp, &prev_pp, sizeof(*pp));
877 return enic_dev_status_to_errno(err);
878 }
879 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000880
Roopa Prabhu3f192792011-09-22 03:44:43 +0000881 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000882 if (err) {
883 if (restore_pp) {
884 /* Things are still the way they were: Implicit
885 * DISASSOCIATE failed
886 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000887 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000888 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000889 memset(pp, 0, sizeof(*pp));
890 if (vf == PORT_SELF_VF)
891 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000892 }
893 } else {
894 /* Set flag to indicate that the port assoc/disassoc
895 * request has been sent out to fw
896 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000897 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000898
899 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000900 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
901 memset(pp->mac_addr, 0, ETH_ALEN);
902 if (vf == PORT_SELF_VF)
903 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000904 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700905 }
906
Roopa Prabhub4765832012-02-20 00:11:58 +0000907 if (vf == PORT_SELF_VF)
908 memset(pp->vf_mac, 0, ETH_ALEN);
Roopa Prabhu296390592010-12-08 13:54:03 +0000909
Roopa Prabhu296390592010-12-08 13:54:03 +0000910 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700911}
912
913static int enic_get_vf_port(struct net_device *netdev, int vf,
914 struct sk_buff *skb)
915{
916 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700917 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000918 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000919 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700920
Roopa Prabhu3f192792011-09-22 03:44:43 +0000921 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700922 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000923 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700924
Roopa Prabhu3f192792011-09-22 03:44:43 +0000925 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
926 return -ENODATA;
927
928 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
929 if (err)
930 return err;
931
David S. Miller1a106de2012-04-01 20:22:22 -0400932 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
933 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
934 ((pp->set & ENIC_SET_NAME) &&
935 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
936 ((pp->set & ENIC_SET_INSTANCE) &&
937 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
938 pp->instance_uuid)) ||
939 ((pp->set & ENIC_SET_HOST) &&
940 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
941 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700942 return 0;
943
944nla_put_failure:
945 return -EMSGSIZE;
946}
947
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700948static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
949{
950 struct enic *enic = vnic_dev_priv(rq->vdev);
951
952 if (!buf->os_buf)
953 return;
954
955 pci_unmap_single(enic->pdev, buf->dma_addr,
956 buf->len, PCI_DMA_FROMDEVICE);
957 dev_kfree_skb_any(buf->os_buf);
958}
959
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700960static int enic_rq_alloc_buf(struct vnic_rq *rq)
961{
962 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000963 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700964 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000965 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700966 unsigned int os_buf_index = 0;
967 dma_addr_t dma_addr;
968
Eric Dumazet89d71a62009-10-13 05:34:20 +0000969 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700970 if (!skb)
971 return -ENOMEM;
972
973 dma_addr = pci_map_single(enic->pdev, skb->data,
974 len, PCI_DMA_FROMDEVICE);
975
976 enic_queue_rq_desc(rq, skb, os_buf_index,
977 dma_addr, len);
978
979 return 0;
980}
981
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700982static void enic_rq_indicate_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque)
985{
986 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -0800987 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700988 struct sk_buff *skb;
989
990 u8 type, color, eop, sop, ingress_port, vlan_stripped;
991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
992 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
993 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
994 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +0000995 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700996 u32 rss_hash;
997
998 if (skipped)
999 return;
1000
1001 skb = buf->os_buf;
1002 prefetch(skb->data - NET_IP_ALIGN);
1003 pci_unmap_single(enic->pdev, buf->dma_addr,
1004 buf->len, PCI_DMA_FROMDEVICE);
1005
1006 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1007 &type, &color, &q_number, &completed_index,
1008 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1009 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001010 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001011 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1012 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1013 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1014 &fcs_ok);
1015
1016 if (packet_error) {
1017
Scott Feldman350991e2009-09-03 17:02:19 +00001018 if (!fcs_ok) {
1019 if (bytes_written > 0)
1020 enic->rq_bad_fcs++;
1021 else if (bytes_written == 0)
1022 enic->rq_truncated_pkts++;
1023 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001024
1025 dev_kfree_skb_any(skb);
1026
1027 return;
1028 }
1029
1030 if (eop && bytes_written > 0) {
1031
1032 /* Good receive
1033 */
1034
1035 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001036 skb->protocol = eth_type_trans(skb, netdev);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301037 skb_record_rx_queue(skb, q_number);
1038 if (netdev->features & NETIF_F_RXHASH) {
Tom Herbert3739acd2013-12-17 23:23:42 -08001039 skb_set_hash(skb, rss_hash,
1040 (rss_type &
1041 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1043 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1044 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05301045 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001046
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001047 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001048 skb->csum = htons(checksum);
1049 skb->ip_summed = CHECKSUM_COMPLETE;
1050 }
1051
Jiri Pirko6ede7462011-07-20 04:54:18 +00001052 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001053 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001054
Jiri Pirko6ede7462011-07-20 04:54:18 +00001055 if (netdev->features & NETIF_F_GRO)
1056 napi_gro_receive(&enic->napi[q_number], skb);
1057 else
1058 netif_receive_skb(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001059 } else {
1060
1061 /* Buffer overflow
1062 */
1063
1064 dev_kfree_skb_any(skb);
1065 }
1066}
1067
1068static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1069 u8 type, u16 q_number, u16 completed_index, void *opaque)
1070{
1071 struct enic *enic = vnic_dev_priv(vdev);
1072
1073 vnic_rq_service(&enic->rq[q_number], cq_desc,
1074 completed_index, VNIC_RQ_RETURN_DESC,
1075 enic_rq_indicate_buf, opaque);
1076
1077 return 0;
1078}
1079
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001080static int enic_poll(struct napi_struct *napi, int budget)
1081{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001082 struct net_device *netdev = napi->dev;
1083 struct enic *enic = netdev_priv(netdev);
1084 unsigned int cq_rq = enic_cq_rq(enic, 0);
1085 unsigned int cq_wq = enic_cq_wq(enic, 0);
1086 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001087 unsigned int rq_work_to_do = budget;
1088 unsigned int wq_work_to_do = -1; /* no limit */
1089 unsigned int work_done, rq_work_done, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001090 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001091
1092 /* Service RQ (first) and WQ
1093 */
1094
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001095 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001096 rq_work_to_do, enic_rq_service, NULL);
1097
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001098 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001099 wq_work_to_do, enic_wq_service, NULL);
1100
1101 /* Accumulate intr event credits for this polling
1102 * cycle. An intr event is the completion of a
1103 * a WQ or RQ packet.
1104 */
1105
1106 work_done = rq_work_done + wq_work_done;
1107
1108 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001109 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001110 work_done,
1111 0 /* don't unmask intr */,
1112 0 /* don't reset intr timer */);
1113
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001114 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001115
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001116 /* Buffer allocation failed. Stay in polling
1117 * mode so we can try to fill the ring again.
1118 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001119
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001120 if (err)
1121 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001122
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001123 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001124
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001125 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001126 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001127 */
1128
Ben Hutchings288379f2009-01-19 16:43:59 -08001129 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001130 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001131 }
1132
1133 return rq_work_done;
1134}
1135
1136static int enic_poll_msix(struct napi_struct *napi, int budget)
1137{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001138 struct net_device *netdev = napi->dev;
1139 struct enic *enic = netdev_priv(netdev);
1140 unsigned int rq = (napi - &enic->napi[0]);
1141 unsigned int cq = enic_cq_rq(enic, rq);
1142 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001143 unsigned int work_to_do = budget;
1144 unsigned int work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001145 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001146
1147 /* Service RQ
1148 */
1149
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001150 work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001151 work_to_do, enic_rq_service, NULL);
1152
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001153 /* Return intr event credits for this polling
1154 * cycle. An intr event is the completion of a
1155 * RQ packet.
1156 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001157
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001158 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001159 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001160 work_done,
1161 0 /* don't unmask intr */,
1162 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001163
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001164 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001165
1166 /* Buffer allocation failed. Stay in polling mode
1167 * so we can try to fill the ring again.
1168 */
1169
1170 if (err)
1171 work_done = work_to_do;
1172
1173 if (work_done < work_to_do) {
1174
1175 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001176 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001177 */
1178
Ben Hutchings288379f2009-01-19 16:43:59 -08001179 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001180 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001181 }
1182
1183 return work_done;
1184}
1185
1186static void enic_notify_timer(unsigned long data)
1187{
1188 struct enic *enic = (struct enic *)data;
1189
1190 enic_notify_check(enic);
1191
Scott Feldman25f0a062008-09-24 11:23:32 -07001192 mod_timer(&enic->notify_timer,
1193 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001194}
1195
1196static void enic_free_intr(struct enic *enic)
1197{
1198 struct net_device *netdev = enic->netdev;
1199 unsigned int i;
1200
1201 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1202 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001203 free_irq(enic->pdev->irq, netdev);
1204 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001205 case VNIC_DEV_INTR_MODE_MSI:
1206 free_irq(enic->pdev->irq, enic);
1207 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001208 case VNIC_DEV_INTR_MODE_MSIX:
1209 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1210 if (enic->msix[i].requested)
1211 free_irq(enic->msix_entry[i].vector,
1212 enic->msix[i].devid);
1213 break;
1214 default:
1215 break;
1216 }
1217}
1218
1219static int enic_request_intr(struct enic *enic)
1220{
1221 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001222 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001223 int err = 0;
1224
1225 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1226
1227 case VNIC_DEV_INTR_MODE_INTX:
1228
1229 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1230 IRQF_SHARED, netdev->name, netdev);
1231 break;
1232
1233 case VNIC_DEV_INTR_MODE_MSI:
1234
1235 err = request_irq(enic->pdev->irq, enic_isr_msi,
1236 0, netdev->name, enic);
1237 break;
1238
1239 case VNIC_DEV_INTR_MODE_MSIX:
1240
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001241 for (i = 0; i < enic->rq_count; i++) {
1242 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001243 snprintf(enic->msix[intr].devname,
1244 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001245 "%.11s-rx-%d", netdev->name, i);
1246 enic->msix[intr].isr = enic_isr_msix_rq;
1247 enic->msix[intr].devid = &enic->napi[i];
1248 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001249
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001250 for (i = 0; i < enic->wq_count; i++) {
1251 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001252 snprintf(enic->msix[intr].devname,
1253 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001254 "%.11s-tx-%d", netdev->name, i);
1255 enic->msix[intr].isr = enic_isr_msix_wq;
1256 enic->msix[intr].devid = enic;
1257 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001258
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001259 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001260 snprintf(enic->msix[intr].devname,
1261 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001262 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001263 enic->msix[intr].isr = enic_isr_msix_err;
1264 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001265
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001266 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001267 snprintf(enic->msix[intr].devname,
1268 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001269 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001270 enic->msix[intr].isr = enic_isr_msix_notify;
1271 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001272
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001273 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1274 enic->msix[i].requested = 0;
1275
1276 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001277 err = request_irq(enic->msix_entry[i].vector,
1278 enic->msix[i].isr, 0,
1279 enic->msix[i].devname,
1280 enic->msix[i].devid);
1281 if (err) {
1282 enic_free_intr(enic);
1283 break;
1284 }
1285 enic->msix[i].requested = 1;
1286 }
1287
1288 break;
1289
1290 default:
1291 break;
1292 }
1293
1294 return err;
1295}
1296
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001297static void enic_synchronize_irqs(struct enic *enic)
1298{
1299 unsigned int i;
1300
1301 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1302 case VNIC_DEV_INTR_MODE_INTX:
1303 case VNIC_DEV_INTR_MODE_MSI:
1304 synchronize_irq(enic->pdev->irq);
1305 break;
1306 case VNIC_DEV_INTR_MODE_MSIX:
1307 for (i = 0; i < enic->intr_count; i++)
1308 synchronize_irq(enic->msix_entry[i].vector);
1309 break;
1310 default:
1311 break;
1312 }
1313}
1314
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001315static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001316{
1317 int err;
1318
Scott Feldman56ac88b2009-09-03 17:02:14 +00001319 spin_lock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001320 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1321 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001322 err = vnic_dev_notify_set(enic->vdev,
1323 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001324 break;
1325 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001326 err = vnic_dev_notify_set(enic->vdev,
1327 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001328 break;
1329 default:
1330 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1331 break;
1332 }
Scott Feldman56ac88b2009-09-03 17:02:14 +00001333 spin_unlock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001334
1335 return err;
1336}
1337
1338static void enic_notify_timer_start(struct enic *enic)
1339{
1340 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1341 case VNIC_DEV_INTR_MODE_MSI:
1342 mod_timer(&enic->notify_timer, jiffies);
1343 break;
1344 default:
1345 /* Using intr for notification for INTx/MSI-X */
1346 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001347 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001348}
1349
1350/* rtnl lock is held, process context */
1351static int enic_open(struct net_device *netdev)
1352{
1353 struct enic *enic = netdev_priv(netdev);
1354 unsigned int i;
1355 int err;
1356
Scott Feldman4b75a442008-09-24 11:23:53 -07001357 err = enic_request_intr(enic);
1358 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001359 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001360 return err;
1361 }
1362
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001363 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001364 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001365 netdev_err(netdev,
1366 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001367 goto err_out_free_intr;
1368 }
1369
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001370 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001371 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001372 /* Need at least one buffer on ring to get going */
1373 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001374 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001375 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001376 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001377 }
1378 }
1379
1380 for (i = 0; i < enic->wq_count; i++)
1381 vnic_wq_enable(&enic->wq[i]);
1382 for (i = 0; i < enic->rq_count; i++)
1383 vnic_rq_enable(&enic->rq[i]);
1384
Roopa Prabhu73359032012-01-18 04:24:02 +00001385 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001386 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001387
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001388 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001389
govindarajulu.v822473b2013-09-04 11:17:14 +05301390 netif_tx_wake_all_queues(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001391
1392 for (i = 0; i < enic->rq_count; i++)
1393 napi_enable(&enic->napi[i]);
1394
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001395 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001396
1397 for (i = 0; i < enic->intr_count; i++)
1398 vnic_intr_unmask(&enic->intr[i]);
1399
1400 enic_notify_timer_start(enic);
1401
1402 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001403
1404err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001405 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001406err_out_free_intr:
1407 enic_free_intr(enic);
1408
1409 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001410}
1411
1412/* rtnl lock is held, process context */
1413static int enic_stop(struct net_device *netdev)
1414{
1415 struct enic *enic = netdev_priv(netdev);
1416 unsigned int i;
1417 int err;
1418
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001419 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001420 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001421 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1422 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001423
1424 enic_synchronize_irqs(enic);
1425
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001426 del_timer_sync(&enic->notify_timer);
1427
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001428 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001429
1430 for (i = 0; i < enic->rq_count; i++)
1431 napi_disable(&enic->napi[i]);
1432
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001433 netif_carrier_off(netdev);
1434 netif_tx_disable(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001435
Roopa Prabhu73359032012-01-18 04:24:02 +00001436 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001437 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001438
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001439 for (i = 0; i < enic->wq_count; i++) {
1440 err = vnic_wq_disable(&enic->wq[i]);
1441 if (err)
1442 return err;
1443 }
1444 for (i = 0; i < enic->rq_count; i++) {
1445 err = vnic_rq_disable(&enic->rq[i]);
1446 if (err)
1447 return err;
1448 }
1449
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001450 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001451 enic_free_intr(enic);
1452
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001453 for (i = 0; i < enic->wq_count; i++)
1454 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1455 for (i = 0; i < enic->rq_count; i++)
1456 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1457 for (i = 0; i < enic->cq_count; i++)
1458 vnic_cq_clean(&enic->cq[i]);
1459 for (i = 0; i < enic->intr_count; i++)
1460 vnic_intr_clean(&enic->intr[i]);
1461
1462 return 0;
1463}
1464
1465static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1466{
1467 struct enic *enic = netdev_priv(netdev);
1468 int running = netif_running(netdev);
1469
Scott Feldman25f0a062008-09-24 11:23:32 -07001470 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1471 return -EINVAL;
1472
Roopa Prabhu73359032012-01-18 04:24:02 +00001473 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001474 return -EOPNOTSUPP;
1475
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001476 if (running)
1477 enic_stop(netdev);
1478
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001479 netdev->mtu = new_mtu;
1480
1481 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001482 netdev_warn(netdev,
1483 "interface MTU (%d) set higher than port MTU (%d)\n",
1484 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001485
1486 if (running)
1487 enic_open(netdev);
1488
1489 return 0;
1490}
1491
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001492static void enic_change_mtu_work(struct work_struct *work)
1493{
1494 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1495 struct net_device *netdev = enic->netdev;
1496 int new_mtu = vnic_dev_mtu(enic->vdev);
1497 int err;
1498 unsigned int i;
1499
1500 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1501
1502 rtnl_lock();
1503
1504 /* Stop RQ */
1505 del_timer_sync(&enic->notify_timer);
1506
1507 for (i = 0; i < enic->rq_count; i++)
1508 napi_disable(&enic->napi[i]);
1509
1510 vnic_intr_mask(&enic->intr[0]);
1511 enic_synchronize_irqs(enic);
1512 err = vnic_rq_disable(&enic->rq[0]);
1513 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001514 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001515 netdev_err(netdev, "Unable to disable RQ.\n");
1516 return;
1517 }
1518 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1519 vnic_cq_clean(&enic->cq[0]);
1520 vnic_intr_clean(&enic->intr[0]);
1521
1522 /* Fill RQ with new_mtu-sized buffers */
1523 netdev->mtu = new_mtu;
1524 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1525 /* Need at least one buffer on ring to get going */
1526 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001527 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001528 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1529 return;
1530 }
1531
1532 /* Start RQ */
1533 vnic_rq_enable(&enic->rq[0]);
1534 napi_enable(&enic->napi[0]);
1535 vnic_intr_unmask(&enic->intr[0]);
1536 enic_notify_timer_start(enic);
1537
1538 rtnl_unlock();
1539
1540 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1541}
1542
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001543#ifdef CONFIG_NET_POLL_CONTROLLER
1544static void enic_poll_controller(struct net_device *netdev)
1545{
1546 struct enic *enic = netdev_priv(netdev);
1547 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001548 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001549
1550 switch (vnic_dev_get_intr_mode(vdev)) {
1551 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001552 for (i = 0; i < enic->rq_count; i++) {
1553 intr = enic_msix_rq_intr(enic, i);
Vasanthy Kolluri79aeec52010-12-08 13:05:45 +00001554 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1555 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001556 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001557
1558 for (i = 0; i < enic->wq_count; i++) {
1559 intr = enic_msix_wq_intr(enic, i);
1560 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1561 }
1562
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001563 break;
1564 case VNIC_DEV_INTR_MODE_MSI:
1565 enic_isr_msi(enic->pdev->irq, enic);
1566 break;
1567 case VNIC_DEV_INTR_MODE_INTX:
1568 enic_isr_legacy(enic->pdev->irq, netdev);
1569 break;
1570 default:
1571 break;
1572 }
1573}
1574#endif
1575
1576static int enic_dev_wait(struct vnic_dev *vdev,
1577 int (*start)(struct vnic_dev *, int),
1578 int (*finished)(struct vnic_dev *, int *),
1579 int arg)
1580{
1581 unsigned long time;
1582 int done;
1583 int err;
1584
1585 BUG_ON(in_interrupt());
1586
1587 err = start(vdev, arg);
1588 if (err)
1589 return err;
1590
1591 /* Wait for func to complete...2 seconds max
1592 */
1593
1594 time = jiffies + (HZ * 2);
1595 do {
1596
1597 err = finished(vdev, &done);
1598 if (err)
1599 return err;
1600
1601 if (done)
1602 return 0;
1603
1604 schedule_timeout_uninterruptible(HZ / 10);
1605
1606 } while (time_after(time, jiffies));
1607
1608 return -ETIMEDOUT;
1609}
1610
1611static int enic_dev_open(struct enic *enic)
1612{
1613 int err;
1614
1615 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1616 vnic_dev_open_done, 0);
1617 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001618 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1619 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001620
1621 return err;
1622}
1623
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001624static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001625{
1626 int err;
1627
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001628 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1629 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001630 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001631 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1632 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001633
1634 return err;
1635}
1636
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001637static int enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001638{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001639 dma_addr_t rss_key_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001640 union vnic_rss_key *rss_key_buf_va = NULL;
1641 union vnic_rss_key rss_key = {
1642 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1643 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1644 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1645 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1646 };
1647 int err;
1648
1649 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1650 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1651 if (!rss_key_buf_va)
1652 return -ENOMEM;
1653
1654 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1655
1656 spin_lock(&enic->devcmd_lock);
1657 err = enic_set_rss_key(enic,
1658 rss_key_buf_pa,
1659 sizeof(union vnic_rss_key));
1660 spin_unlock(&enic->devcmd_lock);
1661
1662 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1663 rss_key_buf_va, rss_key_buf_pa);
1664
1665 return err;
1666}
1667
1668static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1669{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001670 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001671 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1672 unsigned int i;
1673 int err;
1674
1675 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1676 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1677 if (!rss_cpu_buf_va)
1678 return -ENOMEM;
1679
1680 for (i = 0; i < (1 << rss_hash_bits); i++)
1681 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1682
1683 spin_lock(&enic->devcmd_lock);
1684 err = enic_set_rss_cpu(enic,
1685 rss_cpu_buf_pa,
1686 sizeof(union vnic_rss_cpu));
1687 spin_unlock(&enic->devcmd_lock);
1688
1689 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1690 rss_cpu_buf_va, rss_cpu_buf_pa);
1691
1692 return err;
1693}
1694
1695static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1696 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1697{
Scott Feldman68f71702009-02-09 23:24:24 -08001698 const u8 tso_ipid_split_en = 0;
1699 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001700 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001701
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001702 /* Enable VLAN tag stripping.
1703 */
Scott Feldman68f71702009-02-09 23:24:24 -08001704
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001705 spin_lock(&enic->devcmd_lock);
1706 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001707 rss_default_cpu, rss_hash_type,
1708 rss_hash_bits, rss_base_cpu,
1709 rss_enable, tso_ipid_split_en,
1710 ig_vlan_strip_en);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001711 spin_unlock(&enic->devcmd_lock);
1712
1713 return err;
1714}
1715
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001716static int enic_set_rss_nic_cfg(struct enic *enic)
1717{
1718 struct device *dev = enic_get_dev(enic);
1719 const u8 rss_default_cpu = 0;
1720 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1721 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1722 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1723 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1724 const u8 rss_hash_bits = 7;
1725 const u8 rss_base_cpu = 0;
1726 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1727
1728 if (rss_enable) {
1729 if (!enic_set_rsskey(enic)) {
1730 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1731 rss_enable = 0;
1732 dev_warn(dev, "RSS disabled, "
1733 "Failed to set RSS cpu indirection table.");
1734 }
1735 } else {
1736 rss_enable = 0;
1737 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1738 }
1739 }
1740
1741 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1742 rss_hash_bits, rss_base_cpu, rss_enable);
1743}
1744
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001745static void enic_reset(struct work_struct *work)
1746{
1747 struct enic *enic = container_of(work, struct enic, reset);
1748
1749 if (!netif_running(enic->netdev))
1750 return;
1751
1752 rtnl_lock();
1753
Neel Patel0b038562013-08-16 15:47:40 -07001754 spin_lock(&enic->enic_api_lock);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001755 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001756 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001757 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001758 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001759 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001760 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001761 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001762 enic_open(enic->netdev);
Neel Patel0b038562013-08-16 15:47:40 -07001763 spin_unlock(&enic->enic_api_lock);
Neel Pateld765bb42013-08-16 15:47:41 -07001764 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001765
1766 rtnl_unlock();
1767}
1768
1769static int enic_set_intr_mode(struct enic *enic)
1770{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001771 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00001772 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001773 unsigned int i;
1774
1775 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001776 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001777 *
1778 * Try MSI-X first
1779 *
1780 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1781 * (the second to last INTR is used for WQ/RQ errors)
1782 * (the last INTR is used for notifications)
1783 */
1784
1785 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1786 for (i = 0; i < n + m + 2; i++)
1787 enic->msix_entry[i].entry = i;
1788
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001789 /* Use multiple RQs if RSS is enabled
1790 */
1791
1792 if (ENIC_SETTING(enic, RSS) &&
1793 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001794 enic->rq_count >= n &&
1795 enic->wq_count >= m &&
1796 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001797 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001798
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01001799 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1800 n + m + 2, n + m + 2) > 0) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001801
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001802 enic->rq_count = n;
1803 enic->wq_count = m;
1804 enic->cq_count = n + m;
1805 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001806
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001807 vnic_dev_set_intr_mode(enic->vdev,
1808 VNIC_DEV_INTR_MODE_MSIX);
1809
1810 return 0;
1811 }
1812 }
1813
1814 if (enic->config.intr_mode < 1 &&
1815 enic->rq_count >= 1 &&
1816 enic->wq_count >= m &&
1817 enic->cq_count >= 1 + m &&
1818 enic->intr_count >= 1 + m + 2) {
Alexander Gordeevabbb6a32014-02-18 11:08:02 +01001819 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1820 1 + m + 2, 1 + m + 2) > 0) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001821
1822 enic->rq_count = 1;
1823 enic->wq_count = m;
1824 enic->cq_count = 1 + m;
1825 enic->intr_count = 1 + m + 2;
1826
1827 vnic_dev_set_intr_mode(enic->vdev,
1828 VNIC_DEV_INTR_MODE_MSIX);
1829
1830 return 0;
1831 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001832 }
1833
1834 /* Next try MSI
1835 *
1836 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1837 */
1838
1839 if (enic->config.intr_mode < 2 &&
1840 enic->rq_count >= 1 &&
1841 enic->wq_count >= 1 &&
1842 enic->cq_count >= 2 &&
1843 enic->intr_count >= 1 &&
1844 !pci_enable_msi(enic->pdev)) {
1845
1846 enic->rq_count = 1;
1847 enic->wq_count = 1;
1848 enic->cq_count = 2;
1849 enic->intr_count = 1;
1850
1851 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1852
1853 return 0;
1854 }
1855
1856 /* Next try INTx
1857 *
1858 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1859 * (the first INTR is used for WQ/RQ)
1860 * (the second INTR is used for WQ/RQ errors)
1861 * (the last INTR is used for notifications)
1862 */
1863
1864 if (enic->config.intr_mode < 3 &&
1865 enic->rq_count >= 1 &&
1866 enic->wq_count >= 1 &&
1867 enic->cq_count >= 2 &&
1868 enic->intr_count >= 3) {
1869
1870 enic->rq_count = 1;
1871 enic->wq_count = 1;
1872 enic->cq_count = 2;
1873 enic->intr_count = 3;
1874
1875 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1876
1877 return 0;
1878 }
1879
1880 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1881
1882 return -EINVAL;
1883}
1884
1885static void enic_clear_intr_mode(struct enic *enic)
1886{
1887 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1888 case VNIC_DEV_INTR_MODE_MSIX:
1889 pci_disable_msix(enic->pdev);
1890 break;
1891 case VNIC_DEV_INTR_MODE_MSI:
1892 pci_disable_msi(enic->pdev);
1893 break;
1894 default:
1895 break;
1896 }
1897
1898 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1899}
1900
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001901static const struct net_device_ops enic_netdev_dynamic_ops = {
1902 .ndo_open = enic_open,
1903 .ndo_stop = enic_stop,
1904 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00001905 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001906 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001907 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001908 .ndo_set_mac_address = enic_set_mac_address_dynamic,
1909 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001910 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1911 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1912 .ndo_tx_timeout = enic_tx_timeout,
1913 .ndo_set_vf_port = enic_set_vf_port,
1914 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00001915 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001916#ifdef CONFIG_NET_POLL_CONTROLLER
1917 .ndo_poll_controller = enic_poll_controller,
1918#endif
1919};
1920
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001921static const struct net_device_ops enic_netdev_ops = {
1922 .ndo_open = enic_open,
1923 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08001924 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00001925 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001926 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001927 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001928 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001929 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001930 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1931 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1932 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00001933 .ndo_set_vf_port = enic_set_vf_port,
1934 .ndo_get_vf_port = enic_get_vf_port,
1935 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001936#ifdef CONFIG_NET_POLL_CONTROLLER
1937 .ndo_poll_controller = enic_poll_controller,
1938#endif
1939};
1940
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00001941static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00001942{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001943 unsigned int i;
1944
1945 for (i = 0; i < enic->rq_count; i++)
1946 netif_napi_del(&enic->napi[i]);
1947
Scott Feldman6fdfa972009-09-03 17:02:45 +00001948 enic_free_vnic_resources(enic);
1949 enic_clear_intr_mode(enic);
1950}
1951
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00001952static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00001953{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001954 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00001955 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001956 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00001957 int err;
1958
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00001959 /* Get interrupt coalesce timer info */
1960 err = enic_dev_intr_coal_timer_info(enic);
1961 if (err) {
1962 dev_warn(dev, "Using default conversion factor for "
1963 "interrupt coalesce timer\n");
1964 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1965 }
1966
Scott Feldman6fdfa972009-09-03 17:02:45 +00001967 /* Get vNIC configuration
1968 */
1969
1970 err = enic_get_vnic_config(enic);
1971 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001972 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001973 return err;
1974 }
1975
1976 /* Get available resource counts
1977 */
1978
1979 enic_get_res_counts(enic);
1980
1981 /* Set interrupt mode based on resource counts and system
1982 * capabilities
1983 */
1984
1985 err = enic_set_intr_mode(enic);
1986 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001987 dev_err(dev, "Failed to set intr mode based on resource "
1988 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001989 return err;
1990 }
1991
1992 /* Allocate and configure vNIC resources
1993 */
1994
1995 err = enic_alloc_vnic_resources(enic);
1996 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001997 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001998 goto err_out_free_vnic_resources;
1999 }
2000
2001 enic_init_vnic_resources(enic);
2002
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002003 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002004 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002005 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002006 goto err_out_free_vnic_resources;
2007 }
2008
2009 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2010 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002011 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002012 break;
2013 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00002014 for (i = 0; i < enic->rq_count; i++)
2015 netif_napi_add(netdev, &enic->napi[i],
2016 enic_poll_msix, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002017 break;
2018 }
2019
2020 return 0;
2021
2022err_out_free_vnic_resources:
2023 enic_clear_intr_mode(enic);
2024 enic_free_vnic_resources(enic);
2025
2026 return err;
2027}
2028
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002029static void enic_iounmap(struct enic *enic)
2030{
2031 unsigned int i;
2032
2033 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2034 if (enic->bar[i].vaddr)
2035 iounmap(enic->bar[i].vaddr);
2036}
2037
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002038static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002039{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002040 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002041 struct net_device *netdev;
2042 struct enic *enic;
2043 int using_dac = 0;
2044 unsigned int i;
2045 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002046#ifdef CONFIG_PCI_IOV
2047 int pos = 0;
2048#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002049 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002050
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002051 /* Allocate net device structure and initialize. Private
2052 * instance data is initialized to zero.
2053 */
2054
govindarajulu.v822473b2013-09-04 11:17:14 +05302055 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2056 ENIC_RQ_MAX, ENIC_WQ_MAX);
Joe Perches41de8d42012-01-29 13:47:52 +00002057 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002058 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002059
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002060 pci_set_drvdata(pdev, netdev);
2061
2062 SET_NETDEV_DEV(netdev, &pdev->dev);
2063
2064 enic = netdev_priv(netdev);
2065 enic->netdev = netdev;
2066 enic->pdev = pdev;
2067
2068 /* Setup PCI resources
2069 */
2070
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002071 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002072 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002073 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002074 goto err_out_free_netdev;
2075 }
2076
2077 err = pci_request_regions(pdev, DRV_NAME);
2078 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002079 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002080 goto err_out_disable_device;
2081 }
2082
2083 pci_set_master(pdev);
2084
2085 /* Query PCI controller on system for DMA addressing
govindarajulu.v624dbf52013-09-04 11:17:16 +05302086 * limitation for the device. Try 64-bit first, and
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002087 * fail to 32-bit.
2088 */
2089
govindarajulu.v624dbf52013-09-04 11:17:16 +05302090 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002091 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002092 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002093 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002094 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002095 goto err_out_release_regions;
2096 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002097 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002098 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002099 dev_err(dev, "Unable to obtain %u-bit DMA "
2100 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002101 goto err_out_release_regions;
2102 }
2103 } else {
govindarajulu.v624dbf52013-09-04 11:17:16 +05302104 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002105 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002106 dev_err(dev, "Unable to obtain %u-bit DMA "
govindarajulu.v624dbf52013-09-04 11:17:16 +05302107 "for consistent allocations, aborting\n", 64);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002108 goto err_out_release_regions;
2109 }
2110 using_dac = 1;
2111 }
2112
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002113 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002114 */
2115
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002116 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2117 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2118 continue;
2119 enic->bar[i].len = pci_resource_len(pdev, i);
2120 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2121 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002122 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002123 err = -ENODEV;
2124 goto err_out_iounmap;
2125 }
2126 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002127 }
2128
2129 /* Register vNIC device
2130 */
2131
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002132 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2133 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002134 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002135 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002136 err = -ENODEV;
2137 goto err_out_iounmap;
2138 }
2139
Roopa Prabhu8749b422011-09-22 03:44:33 +00002140#ifdef CONFIG_PCI_IOV
2141 /* Get number of subvnics */
2142 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2143 if (pos) {
2144 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002145 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002146 if (enic->num_vfs) {
2147 err = pci_enable_sriov(pdev, enic->num_vfs);
2148 if (err) {
2149 dev_err(dev, "SRIOV enable failed, aborting."
2150 " pci_enable_sriov() returned %d\n",
2151 err);
2152 goto err_out_vnic_unregister;
2153 }
2154 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002155 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002156 }
2157 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002158#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002159
Roopa Prabhu3f192792011-09-22 03:44:43 +00002160 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002161 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002162 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002163 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002164 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002165 }
2166
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002167 /* Issue device open to get device in known state
2168 */
2169
2170 err = enic_dev_open(enic);
2171 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002172 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002173 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002174 }
2175
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002176 /* Setup devcmd lock
2177 */
2178
2179 spin_lock_init(&enic->devcmd_lock);
Neel Patel0b038562013-08-16 15:47:40 -07002180 spin_lock_init(&enic->enic_api_lock);
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002181
2182 /*
2183 * Set ingress vlan rewrite mode before vnic initialization
2184 */
2185
2186 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2187 if (err) {
2188 dev_err(dev,
2189 "Failed to set ingress vlan rewrite mode, aborting.\n");
2190 goto err_out_dev_close;
2191 }
2192
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002193 /* Issue device init to initialize the vnic-to-switch link.
2194 * We'll start with carrier off and wait for link UP
2195 * notification later to turn on carrier. We don't need
2196 * to wait here for the vnic-to-switch link initialization
2197 * to complete; link UP notification is the indication that
2198 * the process is complete.
2199 */
2200
2201 netif_carrier_off(netdev);
2202
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002203 /* Do not call dev_init for a dynamic vnic.
2204 * For a dynamic vnic, init_prov_info will be
2205 * called later by an upper layer.
2206 */
2207
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002208 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002209 err = vnic_dev_init(enic->vdev, 0);
2210 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002211 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002212 goto err_out_dev_close;
2213 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002214 }
2215
Scott Feldman6fdfa972009-09-03 17:02:45 +00002216 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002217 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002218 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002219 goto err_out_dev_close;
2220 }
2221
govindarajulu.v822473b2013-09-04 11:17:14 +05302222 netif_set_real_num_tx_queues(netdev, enic->wq_count);
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302223 netif_set_real_num_rx_queues(netdev, enic->rq_count);
govindarajulu.v822473b2013-09-04 11:17:14 +05302224
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002225 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002226 */
2227
2228 init_timer(&enic->notify_timer);
2229 enic->notify_timer.function = enic_notify_timer;
2230 enic->notify_timer.data = (unsigned long)enic;
2231
2232 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002233 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002234
2235 for (i = 0; i < enic->wq_count; i++)
2236 spin_lock_init(&enic->wq_lock[i]);
2237
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002238 /* Register net device
2239 */
2240
2241 enic->port_mtu = enic->config.mtu;
2242 (void)enic_change_mtu(netdev, enic->port_mtu);
2243
2244 err = enic_set_mac_addr(netdev, enic->mac_addr);
2245 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002246 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002247 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002248 }
2249
Scott Feldman7c844592009-12-23 13:27:54 +00002250 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2251 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2252
Roopa Prabhu73359032012-01-18 04:24:02 +00002253 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002254 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2255 else
2256 netdev->netdev_ops = &enic_netdev_ops;
2257
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002258 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002259 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002260
Patrick McHardyf6469682013-04-19 02:04:27 +00002261 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002262 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002263 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002264 enic->loop_enable = 1;
2265 enic->loop_tag = enic->config.loop_tag;
2266 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2267 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002268 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002269 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002270 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002271 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002272 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
govindarajulu.vbf751ba2013-09-04 11:17:15 +05302273 if (ENIC_SETTING(enic, RSS))
2274 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002275 if (ENIC_SETTING(enic, RXCSUM))
2276 netdev->hw_features |= NETIF_F_RXCSUM;
2277
2278 netdev->features |= netdev->hw_features;
2279
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002280 if (using_dac)
2281 netdev->features |= NETIF_F_HIGHDMA;
2282
Jiri Pirko01789342011-08-16 06:29:00 +00002283 netdev->priv_flags |= IFF_UNICAST_FLT;
2284
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002285 err = register_netdev(netdev);
2286 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002287 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002288 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002289 }
2290
2291 return 0;
2292
Scott Feldman6fdfa972009-09-03 17:02:45 +00002293err_out_dev_deinit:
2294 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002295err_out_dev_close:
2296 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002297err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002298 kfree(enic->pp);
2299err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002300#ifdef CONFIG_PCI_IOV
2301 if (enic_sriov_enabled(enic)) {
2302 pci_disable_sriov(pdev);
2303 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2304 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002305err_out_vnic_unregister:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002306#endif
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002307 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002308err_out_iounmap:
2309 enic_iounmap(enic);
2310err_out_release_regions:
2311 pci_release_regions(pdev);
2312err_out_disable_device:
2313 pci_disable_device(pdev);
2314err_out_free_netdev:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002315 free_netdev(netdev);
2316
2317 return err;
2318}
2319
Bill Pemberton854de922012-12-03 09:23:05 -05002320static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002321{
2322 struct net_device *netdev = pci_get_drvdata(pdev);
2323
2324 if (netdev) {
2325 struct enic *enic = netdev_priv(netdev);
2326
Tejun Heo23f333a2010-12-12 16:45:14 +01002327 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002328 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002329 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002330 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002331 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002332#ifdef CONFIG_PCI_IOV
2333 if (enic_sriov_enabled(enic)) {
2334 pci_disable_sriov(pdev);
2335 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2336 }
2337#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002338 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002339 vnic_dev_unregister(enic->vdev);
2340 enic_iounmap(enic);
2341 pci_release_regions(pdev);
2342 pci_disable_device(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002343 free_netdev(netdev);
2344 }
2345}
2346
2347static struct pci_driver enic_driver = {
2348 .name = DRV_NAME,
2349 .id_table = enic_id_table,
2350 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002351 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002352};
2353
2354static int __init enic_init_module(void)
2355{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002356 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002357
2358 return pci_register_driver(&enic_driver);
2359}
2360
2361static void __exit enic_cleanup_module(void)
2362{
2363 pci_unregister_driver(&enic_driver);
2364}
2365
2366module_init(enic_init_module);
2367module_exit(enic_cleanup_module);