blob: b12b32bc53a687e8bf28512646b05e11e81ca040 [file] [log] [blame]
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001/*
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07003 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000026#include <linux/interrupt.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070027#include <linux/workqueue.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
Jiri Pirko01789342011-08-16 06:29:00 +000031#include <linux/if.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070034#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/tcp.h>
Vasanthy Kolluri29046f92010-06-24 10:52:26 +000038#include <linux/rtnetlink.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040039#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070040#include <net/ip6_checksum.h>
Scott Feldman01f2e4e2008-09-15 09:17:11 -070041
42#include "cq_enet_desc.h"
43#include "vnic_dev.h"
44#include "vnic_intr.h"
45#include "vnic_stats.h"
Scott Feldmanf8bd9092010-05-17 22:50:19 -070046#include "vnic_vic.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070047#include "enic_res.h"
48#include "enic.h"
Vasanthy Kolluri51987462011-02-04 16:17:05 +000049#include "enic_dev.h"
Roopa Prabhub3abfbd2011-03-29 20:36:07 +000050#include "enic_pp.h"
Scott Feldman01f2e4e2008-09-15 09:17:11 -070051
52#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
Scott Feldmanea0d7d92009-09-03 17:02:03 +000053#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
54#define MAX_TSO (1 << 16)
55#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
56
57#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
Scott Feldmanf8bd9092010-05-17 22:50:19 -070058#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000059#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
Scott Feldman01f2e4e2008-09-15 09:17:11 -070060
61/* Supported devices */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000062static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
Scott Feldmanea0d7d92009-09-03 17:02:03 +000063 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
Scott Feldmanf8bd9092010-05-17 22:50:19 -070064 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000065 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
Scott Feldman01f2e4e2008-09-15 09:17:11 -070066 { 0, } /* end of table */
67};
68
69MODULE_DESCRIPTION(DRV_DESCRIPTION);
70MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
71MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION);
73MODULE_DEVICE_TABLE(pci, enic_id_table);
74
Roopa Prabhu3f192792011-09-22 03:44:43 +000075int enic_is_dynamic(struct enic *enic)
Scott Feldmanf8bd9092010-05-17 22:50:19 -070076{
77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
78}
79
Roopa Prabhu8749b422011-09-22 03:44:33 +000080int enic_sriov_enabled(struct enic *enic)
81{
82 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
83}
84
Roopa Prabhu3a4adef2012-01-18 04:23:55 +000085static int enic_is_sriov_vf(struct enic *enic)
86{
87 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
88}
89
Roopa Prabhu889d13f2011-09-22 03:44:38 +000090int enic_is_valid_vf(struct enic *enic, int vf)
91{
92#ifdef CONFIG_PCI_IOV
93 return vf >= 0 && vf < enic->num_vfs;
94#else
95 return 0;
96#endif
97}
98
Scott Feldman01f2e4e2008-09-15 09:17:11 -070099static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
100{
101 struct enic *enic = vnic_dev_priv(wq->vdev);
102
103 if (buf->sop)
104 pci_unmap_single(enic->pdev, buf->dma_addr,
105 buf->len, PCI_DMA_TODEVICE);
106 else
107 pci_unmap_page(enic->pdev, buf->dma_addr,
108 buf->len, PCI_DMA_TODEVICE);
109
110 if (buf->os_buf)
111 dev_kfree_skb_any(buf->os_buf);
112}
113
114static void enic_wq_free_buf(struct vnic_wq *wq,
115 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
116{
117 enic_free_wq_buf(wq, buf);
118}
119
120static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
121 u8 type, u16 q_number, u16 completed_index, void *opaque)
122{
123 struct enic *enic = vnic_dev_priv(vdev);
124
125 spin_lock(&enic->wq_lock[q_number]);
126
127 vnic_wq_service(&enic->wq[q_number], cq_desc,
128 completed_index, enic_wq_free_buf,
129 opaque);
130
131 if (netif_queue_stopped(enic->netdev) &&
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700134 netif_wake_queue(enic->netdev);
135
136 spin_unlock(&enic->wq_lock[q_number]);
137
138 return 0;
139}
140
141static void enic_log_q_error(struct enic *enic)
142{
143 unsigned int i;
144 u32 error_status;
145
146 for (i = 0; i < enic->wq_count; i++) {
147 error_status = vnic_wq_error_status(&enic->wq[i]);
148 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000149 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
150 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700151 }
152
153 for (i = 0; i < enic->rq_count; i++) {
154 error_status = vnic_rq_error_status(&enic->rq[i]);
155 if (error_status)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000156 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
157 i, error_status);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700158 }
159}
160
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000161static void enic_msglvl_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700162{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000163 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700164
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000165 if (msg_enable != enic->msg_enable) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000166 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
167 enic->msg_enable, msg_enable);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000168 enic->msg_enable = msg_enable;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700169 }
170}
171
172static void enic_mtu_check(struct enic *enic)
173{
174 u32 mtu = vnic_dev_mtu(enic->vdev);
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000175 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700176
Scott Feldman491598a2009-09-03 17:02:40 +0000177 if (mtu && mtu != enic->port_mtu) {
Scott Feldman7c844592009-12-23 13:27:54 +0000178 enic->port_mtu = mtu;
Roopa Prabhu73359032012-01-18 04:24:02 +0000179 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Roopa Prabhuc97c8942011-06-03 14:35:17 +0000180 mtu = max_t(int, ENIC_MIN_MTU,
181 min_t(int, ENIC_MAX_MTU, mtu));
182 if (mtu != netdev->mtu)
183 schedule_work(&enic->change_mtu_work);
184 } else {
185 if (mtu < netdev->mtu)
186 netdev_warn(netdev,
187 "interface MTU (%d) set higher "
188 "than switch port MTU (%d)\n",
189 netdev->mtu, mtu);
190 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700191 }
192}
193
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000194static void enic_link_check(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700195{
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000196 int link_status = vnic_dev_link_status(enic->vdev);
197 int carrier_ok = netif_carrier_ok(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700198
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000199 if (link_status && !carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000200 netdev_info(enic->netdev, "Link UP\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000201 netif_carrier_on(enic->netdev);
202 } else if (!link_status && carrier_ok) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000203 netdev_info(enic->netdev, "Link DOWN\n");
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000204 netif_carrier_off(enic->netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700205 }
206}
207
208static void enic_notify_check(struct enic *enic)
209{
210 enic_msglvl_check(enic);
211 enic_mtu_check(enic);
212 enic_link_check(enic);
213}
214
215#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
216
217static irqreturn_t enic_isr_legacy(int irq, void *data)
218{
219 struct net_device *netdev = data;
220 struct enic *enic = netdev_priv(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000221 unsigned int io_intr = enic_legacy_io_intr();
222 unsigned int err_intr = enic_legacy_err_intr();
223 unsigned int notify_intr = enic_legacy_notify_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700224 u32 pba;
225
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000226 vnic_intr_mask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700227
228 pba = vnic_intr_legacy_pba(enic->legacy_pba);
229 if (!pba) {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000230 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700231 return IRQ_NONE; /* not our interrupt */
232 }
233
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000234 if (ENIC_TEST_INTR(pba, notify_intr)) {
235 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700236 enic_notify_check(enic);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800237 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700238
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000239 if (ENIC_TEST_INTR(pba, err_intr)) {
240 vnic_intr_return_all_credits(&enic->intr[err_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700241 enic_log_q_error(enic);
242 /* schedule recovery from WQ/RQ error */
243 schedule_work(&enic->reset);
244 return IRQ_HANDLED;
245 }
246
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000247 if (ENIC_TEST_INTR(pba, io_intr)) {
248 if (napi_schedule_prep(&enic->napi[0]))
249 __napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700250 } else {
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000251 vnic_intr_unmask(&enic->intr[io_intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700252 }
253
254 return IRQ_HANDLED;
255}
256
257static irqreturn_t enic_isr_msi(int irq, void *data)
258{
259 struct enic *enic = data;
260
261 /* With MSI, there is no sharing of interrupts, so this is
262 * our interrupt and there is no need to ack it. The device
263 * is not providing per-vector masking, so the OS will not
264 * write to PCI config space to mask/unmask the interrupt.
265 * We're using mask_on_assertion for MSI, so the device
266 * automatically masks the interrupt when the interrupt is
267 * generated. Later, when exiting polling, the interrupt
268 * will be unmasked (see enic_poll).
269 *
270 * Also, the device uses the same PCIe Traffic Class (TC)
271 * for Memory Write data and MSI, so there are no ordering
272 * issues; the MSI will always arrive at the Root Complex
273 * _after_ corresponding Memory Writes (i.e. descriptor
274 * writes).
275 */
276
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000277 napi_schedule(&enic->napi[0]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700278
279 return IRQ_HANDLED;
280}
281
282static irqreturn_t enic_isr_msix_rq(int irq, void *data)
283{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000284 struct napi_struct *napi = data;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700285
286 /* schedule NAPI polling for RQ cleanup */
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000287 napi_schedule(napi);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700288
289 return IRQ_HANDLED;
290}
291
292static irqreturn_t enic_isr_msix_wq(int irq, void *data)
293{
294 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000295 unsigned int cq = enic_cq_wq(enic, 0);
296 unsigned int intr = enic_msix_wq_intr(enic, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700297 unsigned int wq_work_to_do = -1; /* no limit */
298 unsigned int wq_work_done;
299
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000300 wq_work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700301 wq_work_to_do, enic_wq_service, NULL);
302
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000303 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700304 wq_work_done,
305 1 /* unmask intr */,
306 1 /* reset intr timer */);
307
308 return IRQ_HANDLED;
309}
310
311static irqreturn_t enic_isr_msix_err(int irq, void *data)
312{
313 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000314 unsigned int intr = enic_msix_err_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700315
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000316 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800317
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700318 enic_log_q_error(enic);
319
320 /* schedule recovery from WQ/RQ error */
321 schedule_work(&enic->reset);
322
323 return IRQ_HANDLED;
324}
325
326static irqreturn_t enic_isr_msix_notify(int irq, void *data)
327{
328 struct enic *enic = data;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000329 unsigned int intr = enic_msix_notify_intr(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700330
Vasanthy Kolluri717258b2010-10-20 10:16:59 +0000331 vnic_intr_return_all_credits(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700332 enic_notify_check(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700333
334 return IRQ_HANDLED;
335}
336
337static inline void enic_queue_wq_skb_cont(struct enic *enic,
338 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000339 unsigned int len_left, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700340{
Eric Dumazet9e903e02011-10-18 21:00:24 +0000341 const skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700342
343 /* Queue additional data fragments */
344 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000345 len_left -= skb_frag_size(frag);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700346 enic_queue_wq_desc_cont(wq, skb,
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000347 skb_frag_dma_map(&enic->pdev->dev,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000348 frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100349 DMA_TO_DEVICE),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000350 skb_frag_size(frag),
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000351 (len_left == 0), /* EOP? */
352 loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700353 }
354}
355
356static inline void enic_queue_wq_skb_vlan(struct enic *enic,
357 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000358 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700359{
360 unsigned int head_len = skb_headlen(skb);
361 unsigned int len_left = skb->len - head_len;
362 int eop = (len_left == 0);
363
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000364 /* Queue the main skb fragment. The fragments are no larger
365 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
366 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
367 * per fragment is queued.
368 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700369 enic_queue_wq_desc(wq, skb,
370 pci_map_single(enic->pdev, skb->data,
371 head_len, PCI_DMA_TODEVICE),
372 head_len,
373 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000374 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700375
376 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000377 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700378}
379
380static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
381 struct vnic_wq *wq, struct sk_buff *skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000382 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700383{
384 unsigned int head_len = skb_headlen(skb);
385 unsigned int len_left = skb->len - head_len;
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000386 unsigned int hdr_len = skb_checksum_start_offset(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700387 unsigned int csum_offset = hdr_len + skb->csum_offset;
388 int eop = (len_left == 0);
389
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000390 /* Queue the main skb fragment. The fragments are no larger
391 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
392 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
393 * per fragment is queued.
394 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700395 enic_queue_wq_desc_csum_l4(wq, skb,
396 pci_map_single(enic->pdev, skb->data,
397 head_len, PCI_DMA_TODEVICE),
398 head_len,
399 csum_offset,
400 hdr_len,
401 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000402 eop, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700403
404 if (!eop)
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000405 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700406}
407
408static inline void enic_queue_wq_skb_tso(struct enic *enic,
409 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000410 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700411{
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000412 unsigned int frag_len_left = skb_headlen(skb);
413 unsigned int len_left = skb->len - frag_len_left;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700414 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
415 int eop = (len_left == 0);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000416 unsigned int len;
417 dma_addr_t dma_addr;
418 unsigned int offset = 0;
419 skb_frag_t *frag;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700420
421 /* Preload TCP csum field with IP pseudo hdr calculated
422 * with IP length set to zero. HW will later add in length
423 * to each TCP segment resulting from the TSO.
424 */
425
Harvey Harrison09640e62009-02-01 00:45:17 -0800426 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700427 ip_hdr(skb)->check = 0;
428 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
429 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
Harvey Harrison09640e62009-02-01 00:45:17 -0800430 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700431 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
432 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
433 }
434
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000435 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
436 * for the main skb fragment
437 */
438 while (frag_len_left) {
439 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
440 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
441 len, PCI_DMA_TODEVICE);
442 enic_queue_wq_desc_tso(wq, skb,
443 dma_addr,
444 len,
445 mss, hdr_len,
446 vlan_tag_insert, vlan_tag,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000447 eop && (len == frag_len_left), loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000448 frag_len_left -= len;
449 offset += len;
450 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700451
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000452 if (eop)
453 return;
454
455 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
456 * for additional data fragments
457 */
458 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000459 len_left -= skb_frag_size(frag);
460 frag_len_left = skb_frag_size(frag);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000461 offset = 0;
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000462
463 while (frag_len_left) {
464 len = min(frag_len_left,
465 (unsigned int)WQ_ENET_MAX_DESC_LEN);
Ian Campbell4bf5adb2011-08-29 23:18:27 +0000466 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
467 offset, len,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +0100468 DMA_TO_DEVICE);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000469 enic_queue_wq_desc_cont(wq, skb,
470 dma_addr,
471 len,
472 (len_left == 0) &&
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000473 (len == frag_len_left), /* EOP? */
474 loopback);
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000475 frag_len_left -= len;
476 offset += len;
477 }
478 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700479}
480
481static inline void enic_queue_wq_skb(struct enic *enic,
482 struct vnic_wq *wq, struct sk_buff *skb)
483{
484 unsigned int mss = skb_shinfo(skb)->gso_size;
485 unsigned int vlan_tag = 0;
486 int vlan_tag_insert = 0;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000487 int loopback = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700488
Jesse Grosseab6d182010-10-20 13:56:03 +0000489 if (vlan_tx_tag_present(skb)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700490 /* VLAN tag from trunking driver */
491 vlan_tag_insert = 1;
492 vlan_tag = vlan_tx_tag_get(skb);
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000493 } else if (enic->loop_enable) {
494 vlan_tag = enic->loop_tag;
495 loopback = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700496 }
497
498 if (mss)
499 enic_queue_wq_skb_tso(enic, wq, skb, mss,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000500 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700501 else if (skb->ip_summed == CHECKSUM_PARTIAL)
502 enic_queue_wq_skb_csum_l4(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000503 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700504 else
505 enic_queue_wq_skb_vlan(enic, wq, skb,
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000506 vlan_tag_insert, vlan_tag, loopback);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700507}
508
Scott Feldmaned8af6b2009-02-09 23:23:50 -0800509/* netif_tx_lock held, process context with BHs disabled, or BH */
Stephen Hemminger613573252009-08-31 19:50:58 +0000510static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
Scott Feldmand87fd252009-12-23 13:27:59 +0000511 struct net_device *netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700512{
513 struct enic *enic = netdev_priv(netdev);
514 struct vnic_wq *wq = &enic->wq[0];
515 unsigned long flags;
516
517 if (skb->len <= 0) {
518 dev_kfree_skb(skb);
519 return NETDEV_TX_OK;
520 }
521
522 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
523 * which is very likely. In the off chance it's going to take
524 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
525 */
526
527 if (skb_shinfo(skb)->gso_size == 0 &&
528 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
529 skb_linearize(skb)) {
530 dev_kfree_skb(skb);
531 return NETDEV_TX_OK;
532 }
533
534 spin_lock_irqsave(&enic->wq_lock[0], flags);
535
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000536 if (vnic_wq_desc_avail(wq) <
537 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700538 netif_stop_queue(netdev);
539 /* This is a hard error, log it */
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +0000540 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700541 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
542 return NETDEV_TX_BUSY;
543 }
544
545 enic_queue_wq_skb(enic, wq, skb);
546
Scott Feldmanea0d7d92009-09-03 17:02:03 +0000547 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700548 netif_stop_queue(netdev);
549
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700550 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
551
552 return NETDEV_TX_OK;
553}
554
555/* dev_base_lock rwlock held, nominally process context */
stephen hemmingerf20530b2011-06-08 14:54:02 +0000556static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
557 struct rtnl_link_stats64 *net_stats)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700558{
559 struct enic *enic = netdev_priv(netdev);
560 struct vnic_stats *stats;
561
Vasanthy Kolluri383ab922010-06-24 10:50:12 +0000562 enic_dev_stats_dump(enic, &stats);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700563
Scott Feldman25f0a062008-09-24 11:23:32 -0700564 net_stats->tx_packets = stats->tx.tx_frames_ok;
565 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
566 net_stats->tx_errors = stats->tx.tx_errors;
567 net_stats->tx_dropped = stats->tx.tx_drops;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700568
Scott Feldman25f0a062008-09-24 11:23:32 -0700569 net_stats->rx_packets = stats->rx.rx_frames_ok;
570 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
571 net_stats->rx_errors = stats->rx.rx_errors;
572 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
Scott Feldman350991e2009-09-03 17:02:19 +0000573 net_stats->rx_over_errors = enic->rq_truncated_pkts;
Scott Feldmanbd9fb1a2009-02-09 23:24:08 -0800574 net_stats->rx_crc_errors = enic->rq_bad_fcs;
Scott Feldman350991e2009-09-03 17:02:19 +0000575 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700576
Scott Feldman25f0a062008-09-24 11:23:32 -0700577 return net_stats;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700578}
579
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000580void enic_reset_addr_lists(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700581{
582 enic->mc_count = 0;
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000583 enic->uc_count = 0;
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +0000584 enic->flags = 0;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700585}
586
587static int enic_set_mac_addr(struct net_device *netdev, char *addr)
588{
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700589 struct enic *enic = netdev_priv(netdev);
590
Roopa Prabhu73359032012-01-18 04:24:02 +0000591 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700592 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
593 return -EADDRNOTAVAIL;
594 } else {
595 if (!is_valid_ether_addr(addr))
596 return -EADDRNOTAVAIL;
597 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700598
599 memcpy(netdev->dev_addr, addr, netdev->addr_len);
600
601 return 0;
602}
603
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700604static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
605{
606 struct enic *enic = netdev_priv(netdev);
607 struct sockaddr *saddr = p;
608 char *addr = saddr->sa_data;
609 int err;
610
611 if (netif_running(enic->netdev)) {
612 err = enic_dev_del_station_addr(enic);
613 if (err)
614 return err;
615 }
616
617 err = enic_set_mac_addr(netdev, addr);
618 if (err)
619 return err;
620
621 if (netif_running(enic->netdev)) {
622 err = enic_dev_add_station_addr(enic);
623 if (err)
624 return err;
625 }
626
627 return err;
628}
629
630static int enic_set_mac_address(struct net_device *netdev, void *p)
631{
Roopa Prabhu294dab22010-08-10 18:54:55 +0000632 struct sockaddr *saddr = p;
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000633 char *addr = saddr->sa_data;
634 struct enic *enic = netdev_priv(netdev);
635 int err;
Roopa Prabhu294dab22010-08-10 18:54:55 +0000636
Vasanthy Kolluric76fd322010-10-20 10:17:04 +0000637 err = enic_dev_del_station_addr(enic);
638 if (err)
639 return err;
640
641 err = enic_set_mac_addr(netdev, addr);
642 if (err)
643 return err;
644
645 return enic_dev_add_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700646}
647
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000648static void enic_update_multicast_addr_list(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700649{
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000650 struct net_device *netdev = enic->netdev;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000651 struct netdev_hw_addr *ha;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000652 unsigned int mc_count = netdev_mc_count(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700653 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700654 unsigned int i, j;
655
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000656 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
657 netdev_warn(netdev, "Registering only %d out of %d "
658 "multicast addresses\n",
659 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700660 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
Scott Feldman9959a182009-12-23 13:27:43 +0000661 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700662
663 /* Is there an easier way? Trying to minimize to
664 * calls to add/del multicast addrs. We keep the
665 * addrs from the last call in enic->mc_addr and
666 * look for changes to add/del.
667 */
668
Jiri Pirko48e2f182010-02-22 09:22:26 +0000669 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000670 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko48e2f182010-02-22 09:22:26 +0000671 if (i == mc_count)
672 break;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000673 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700674 }
675
676 for (i = 0; i < enic->mc_count; i++) {
677 for (j = 0; j < mc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000678 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700679 break;
680 if (j == mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000681 enic_dev_del_addr(enic, enic->mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700682 }
683
684 for (i = 0; i < mc_count; i++) {
685 for (j = 0; j < enic->mc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000686 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700687 break;
688 if (j == enic->mc_count)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000689 enic_dev_add_addr(enic, mc_addr[i]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700690 }
691
692 /* Save the list to compare against next time
693 */
694
695 for (i = 0; i < mc_count; i++)
696 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
697
698 enic->mc_count = mc_count;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700699}
700
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000701static void enic_update_unicast_addr_list(struct enic *enic)
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000702{
703 struct net_device *netdev = enic->netdev;
704 struct netdev_hw_addr *ha;
705 unsigned int uc_count = netdev_uc_count(netdev);
706 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
707 unsigned int i, j;
708
709 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
710 netdev_warn(netdev, "Registering only %d out of %d "
711 "unicast addresses\n",
712 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
713 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
714 }
715
716 /* Is there an easier way? Trying to minimize to
717 * calls to add/del unicast addrs. We keep the
718 * addrs from the last call in enic->uc_addr and
719 * look for changes to add/del.
720 */
721
722 i = 0;
723 netdev_for_each_uc_addr(ha, netdev) {
724 if (i == uc_count)
725 break;
726 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
727 }
728
729 for (i = 0; i < enic->uc_count; i++) {
730 for (j = 0; j < uc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000731 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000732 break;
733 if (j == uc_count)
734 enic_dev_del_addr(enic, enic->uc_addr[i]);
735 }
736
737 for (i = 0; i < uc_count; i++) {
738 for (j = 0; j < enic->uc_count; j++)
Joe Perches2e42e472012-05-09 17:17:46 +0000739 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000740 break;
741 if (j == enic->uc_count)
742 enic_dev_add_addr(enic, uc_addr[i]);
743 }
744
745 /* Save the list to compare against next time
746 */
747
748 for (i = 0; i < uc_count; i++)
749 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
750
751 enic->uc_count = uc_count;
752}
753
754/* netif_tx_lock held, BHs disabled */
755static void enic_set_rx_mode(struct net_device *netdev)
756{
757 struct enic *enic = netdev_priv(netdev);
758 int directed = 1;
759 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
760 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
761 int promisc = (netdev->flags & IFF_PROMISC) ||
762 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
763 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
764 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
765 unsigned int flags = netdev->flags |
766 (allmulti ? IFF_ALLMULTI : 0) |
767 (promisc ? IFF_PROMISC : 0);
768
769 if (enic->flags != flags) {
770 enic->flags = flags;
771 enic_dev_packet_filter(enic, directed,
772 multicast, broadcast, promisc, allmulti);
773 }
774
775 if (!promisc) {
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000776 enic_update_unicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000777 if (!allmulti)
Vasanthy Kollurie0afe532011-02-17 08:53:12 +0000778 enic_update_multicast_addr_list(enic);
Roopa Prabhu319d7e82010-12-08 13:19:58 +0000779 }
780}
781
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700782/* netif_tx_lock held, BHs disabled */
783static void enic_tx_timeout(struct net_device *netdev)
784{
785 struct enic *enic = netdev_priv(netdev);
786 schedule_work(&enic->reset);
787}
788
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000789static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
790{
791 struct enic *enic = netdev_priv(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +0000792 struct enic_port_profile *pp;
793 int err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000794
Roopa Prabhu3f192792011-09-22 03:44:43 +0000795 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
796 if (err)
797 return err;
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000798
Roopa Prabhub8622cb2012-03-07 03:50:44 +0000799 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
Roopa Prabhub4765832012-02-20 00:11:58 +0000800 if (vf == PORT_SELF_VF) {
801 memcpy(pp->vf_mac, mac, ETH_ALEN);
802 return 0;
803 } else {
804 /*
805 * For sriov vf's set the mac in hw
806 */
807 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
808 vnic_dev_set_mac_addr, mac);
809 return enic_dev_status_to_errno(err);
810 }
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +0000811 } else
812 return -EINVAL;
813}
814
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700815static int enic_set_vf_port(struct net_device *netdev, int vf,
816 struct nlattr *port[])
817{
818 struct enic *enic = netdev_priv(netdev);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000819 struct enic_port_profile prev_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000820 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000821 int err = 0, restore_pp = 1;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700822
Roopa Prabhu3f192792011-09-22 03:44:43 +0000823 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
824 if (err)
825 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700826
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000827 if (!port[IFLA_PORT_REQUEST])
Scott Feldman08f382e2010-06-01 08:59:33 +0000828 return -EOPNOTSUPP;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700829
Roopa Prabhu3f192792011-09-22 03:44:43 +0000830 memcpy(&prev_pp, pp, sizeof(*enic->pp));
831 memset(pp, 0, sizeof(*enic->pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000832
Roopa Prabhu3f192792011-09-22 03:44:43 +0000833 pp->set |= ENIC_SET_REQUEST;
834 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000835
836 if (port[IFLA_PORT_PROFILE]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000837 pp->set |= ENIC_SET_NAME;
838 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000839 PORT_PROFILE_MAX);
840 }
841
842 if (port[IFLA_PORT_INSTANCE_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000843 pp->set |= ENIC_SET_INSTANCE;
844 memcpy(pp->instance_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000845 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
846 }
847
848 if (port[IFLA_PORT_HOST_UUID]) {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000849 pp->set |= ENIC_SET_HOST;
850 memcpy(pp->host_uuid,
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000851 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
852 }
853
Roopa Prabhub4765832012-02-20 00:11:58 +0000854 if (vf == PORT_SELF_VF) {
855 /* Special case handling: mac came from IFLA_VF_MAC */
856 if (!is_zero_ether_addr(prev_pp.vf_mac))
857 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
Scott Feldman418c4372010-05-22 17:29:58 +0000858
Roopa Prabhub4765832012-02-20 00:11:58 +0000859 if (is_zero_ether_addr(netdev->dev_addr))
860 eth_hw_addr_random(netdev);
861 } else {
862 /* SR-IOV VF: get mac from adapter */
863 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
864 vnic_dev_get_mac_addr, pp->mac_addr);
865 if (err) {
866 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
867 memcpy(pp, &prev_pp, sizeof(*pp));
868 return enic_dev_status_to_errno(err);
869 }
870 }
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000871
Roopa Prabhu3f192792011-09-22 03:44:43 +0000872 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000873 if (err) {
874 if (restore_pp) {
875 /* Things are still the way they were: Implicit
876 * DISASSOCIATE failed
877 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000878 memcpy(pp, &prev_pp, sizeof(*pp));
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000879 } else {
Roopa Prabhu3f192792011-09-22 03:44:43 +0000880 memset(pp, 0, sizeof(*pp));
881 if (vf == PORT_SELF_VF)
882 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000883 }
884 } else {
885 /* Set flag to indicate that the port assoc/disassoc
886 * request has been sent out to fw
887 */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000888 pp->set |= ENIC_PORT_REQUEST_APPLIED;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000889
890 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
Roopa Prabhu3f192792011-09-22 03:44:43 +0000891 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
892 memset(pp->mac_addr, 0, ETH_ALEN);
893 if (vf == PORT_SELF_VF)
894 memset(netdev->dev_addr, 0, ETH_ALEN);
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000895 }
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700896 }
897
Roopa Prabhub4765832012-02-20 00:11:58 +0000898 if (vf == PORT_SELF_VF)
899 memset(pp->vf_mac, 0, ETH_ALEN);
Roopa Prabhu296390592010-12-08 13:54:03 +0000900
Roopa Prabhu296390592010-12-08 13:54:03 +0000901 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700902}
903
904static int enic_get_vf_port(struct net_device *netdev, int vf,
905 struct sk_buff *skb)
906{
907 struct enic *enic = netdev_priv(netdev);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700908 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
Roopa Prabhu3f192792011-09-22 03:44:43 +0000909 struct enic_port_profile *pp;
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000910 int err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700911
Roopa Prabhu3f192792011-09-22 03:44:43 +0000912 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700913 if (err)
Roopa Prabhub3abfbd2011-03-29 20:36:07 +0000914 return err;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700915
Roopa Prabhu3f192792011-09-22 03:44:43 +0000916 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
917 return -ENODATA;
918
919 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
920 if (err)
921 return err;
922
David S. Miller1a106de2012-04-01 20:22:22 -0400923 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
924 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
925 ((pp->set & ENIC_SET_NAME) &&
926 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
927 ((pp->set & ENIC_SET_INSTANCE) &&
928 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
929 pp->instance_uuid)) ||
930 ((pp->set & ENIC_SET_HOST) &&
931 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
932 goto nla_put_failure;
Scott Feldmanf8bd9092010-05-17 22:50:19 -0700933 return 0;
934
935nla_put_failure:
936 return -EMSGSIZE;
937}
938
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700939static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
940{
941 struct enic *enic = vnic_dev_priv(rq->vdev);
942
943 if (!buf->os_buf)
944 return;
945
946 pci_unmap_single(enic->pdev, buf->dma_addr,
947 buf->len, PCI_DMA_FROMDEVICE);
948 dev_kfree_skb_any(buf->os_buf);
949}
950
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700951static int enic_rq_alloc_buf(struct vnic_rq *rq)
952{
953 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldmand19e22d2009-09-03 17:02:08 +0000954 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700955 struct sk_buff *skb;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +0000956 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700957 unsigned int os_buf_index = 0;
958 dma_addr_t dma_addr;
959
Eric Dumazet89d71a62009-10-13 05:34:20 +0000960 skb = netdev_alloc_skb_ip_align(netdev, len);
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700961 if (!skb)
962 return -ENOMEM;
963
964 dma_addr = pci_map_single(enic->pdev, skb->data,
965 len, PCI_DMA_FROMDEVICE);
966
967 enic_queue_rq_desc(rq, skb, os_buf_index,
968 dma_addr, len);
969
970 return 0;
971}
972
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700973static void enic_rq_indicate_buf(struct vnic_rq *rq,
974 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
975 int skipped, void *opaque)
976{
977 struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman86ca9db2008-11-21 21:26:55 -0800978 struct net_device *netdev = enic->netdev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700979 struct sk_buff *skb;
980
981 u8 type, color, eop, sop, ingress_port, vlan_stripped;
982 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
983 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
984 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
985 u8 packet_error;
Vasanthy Kollurif8cac142010-06-24 10:49:51 +0000986 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
Scott Feldman01f2e4e2008-09-15 09:17:11 -0700987 u32 rss_hash;
988
989 if (skipped)
990 return;
991
992 skb = buf->os_buf;
993 prefetch(skb->data - NET_IP_ALIGN);
994 pci_unmap_single(enic->pdev, buf->dma_addr,
995 buf->len, PCI_DMA_FROMDEVICE);
996
997 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
998 &type, &color, &q_number, &completed_index,
999 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1000 &csum_not_calc, &rss_hash, &bytes_written,
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001001 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001002 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1003 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1004 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1005 &fcs_ok);
1006
1007 if (packet_error) {
1008
Scott Feldman350991e2009-09-03 17:02:19 +00001009 if (!fcs_ok) {
1010 if (bytes_written > 0)
1011 enic->rq_bad_fcs++;
1012 else if (bytes_written == 0)
1013 enic->rq_truncated_pkts++;
1014 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001015
1016 dev_kfree_skb_any(skb);
1017
1018 return;
1019 }
1020
1021 if (eop && bytes_written > 0) {
1022
1023 /* Good receive
1024 */
1025
1026 skb_put(skb, bytes_written);
Scott Feldman86ca9db2008-11-21 21:26:55 -08001027 skb->protocol = eth_type_trans(skb, netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001028
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00001029 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001030 skb->csum = htons(checksum);
1031 skb->ip_summed = CHECKSUM_COMPLETE;
1032 }
1033
Jiri Pirko6ede7462011-07-20 04:54:18 +00001034 if (vlan_stripped)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001035 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001036
Jiri Pirko6ede7462011-07-20 04:54:18 +00001037 if (netdev->features & NETIF_F_GRO)
1038 napi_gro_receive(&enic->napi[q_number], skb);
1039 else
1040 netif_receive_skb(skb);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001041 } else {
1042
1043 /* Buffer overflow
1044 */
1045
1046 dev_kfree_skb_any(skb);
1047 }
1048}
1049
1050static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1051 u8 type, u16 q_number, u16 completed_index, void *opaque)
1052{
1053 struct enic *enic = vnic_dev_priv(vdev);
1054
1055 vnic_rq_service(&enic->rq[q_number], cq_desc,
1056 completed_index, VNIC_RQ_RETURN_DESC,
1057 enic_rq_indicate_buf, opaque);
1058
1059 return 0;
1060}
1061
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001062static int enic_poll(struct napi_struct *napi, int budget)
1063{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001064 struct net_device *netdev = napi->dev;
1065 struct enic *enic = netdev_priv(netdev);
1066 unsigned int cq_rq = enic_cq_rq(enic, 0);
1067 unsigned int cq_wq = enic_cq_wq(enic, 0);
1068 unsigned int intr = enic_legacy_io_intr();
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001069 unsigned int rq_work_to_do = budget;
1070 unsigned int wq_work_to_do = -1; /* no limit */
1071 unsigned int work_done, rq_work_done, wq_work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001072 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001073
1074 /* Service RQ (first) and WQ
1075 */
1076
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001077 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001078 rq_work_to_do, enic_rq_service, NULL);
1079
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001080 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001081 wq_work_to_do, enic_wq_service, NULL);
1082
1083 /* Accumulate intr event credits for this polling
1084 * cycle. An intr event is the completion of a
1085 * a WQ or RQ packet.
1086 */
1087
1088 work_done = rq_work_done + wq_work_done;
1089
1090 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001091 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001092 work_done,
1093 0 /* don't unmask intr */,
1094 0 /* don't reset intr timer */);
1095
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001096 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001097
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001098 /* Buffer allocation failed. Stay in polling
1099 * mode so we can try to fill the ring again.
1100 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001101
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001102 if (err)
1103 rq_work_done = rq_work_to_do;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001104
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001105 if (rq_work_done < rq_work_to_do) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001106
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001107 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001108 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001109 */
1110
Ben Hutchings288379f2009-01-19 16:43:59 -08001111 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001112 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001113 }
1114
1115 return rq_work_done;
1116}
1117
1118static int enic_poll_msix(struct napi_struct *napi, int budget)
1119{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001120 struct net_device *netdev = napi->dev;
1121 struct enic *enic = netdev_priv(netdev);
1122 unsigned int rq = (napi - &enic->napi[0]);
1123 unsigned int cq = enic_cq_rq(enic, rq);
1124 unsigned int intr = enic_msix_rq_intr(enic, rq);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001125 unsigned int work_to_do = budget;
1126 unsigned int work_done;
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001127 int err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001128
1129 /* Service RQ
1130 */
1131
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001132 work_done = vnic_cq_service(&enic->cq[cq],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001133 work_to_do, enic_rq_service, NULL);
1134
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001135 /* Return intr event credits for this polling
1136 * cycle. An intr event is the completion of a
1137 * RQ packet.
1138 */
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001139
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001140 if (work_done > 0)
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001141 vnic_intr_return_credits(&enic->intr[intr],
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001142 work_done,
1143 0 /* don't unmask intr */,
1144 0 /* don't reset intr timer */);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001145
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001146 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001147
1148 /* Buffer allocation failed. Stay in polling mode
1149 * so we can try to fill the ring again.
1150 */
1151
1152 if (err)
1153 work_done = work_to_do;
1154
1155 if (work_done < work_to_do) {
1156
1157 /* Some work done, but not enough to stay in polling,
Vasanthy Kolluri88132f52010-06-24 10:49:25 +00001158 * exit polling
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001159 */
1160
Ben Hutchings288379f2009-01-19 16:43:59 -08001161 napi_complete(napi);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001162 vnic_intr_unmask(&enic->intr[intr]);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001163 }
1164
1165 return work_done;
1166}
1167
1168static void enic_notify_timer(unsigned long data)
1169{
1170 struct enic *enic = (struct enic *)data;
1171
1172 enic_notify_check(enic);
1173
Scott Feldman25f0a062008-09-24 11:23:32 -07001174 mod_timer(&enic->notify_timer,
1175 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001176}
1177
1178static void enic_free_intr(struct enic *enic)
1179{
1180 struct net_device *netdev = enic->netdev;
1181 unsigned int i;
1182
1183 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1184 case VNIC_DEV_INTR_MODE_INTX:
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001185 free_irq(enic->pdev->irq, netdev);
1186 break;
Scott Feldman8f4d2482008-09-24 11:23:42 -07001187 case VNIC_DEV_INTR_MODE_MSI:
1188 free_irq(enic->pdev->irq, enic);
1189 break;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001190 case VNIC_DEV_INTR_MODE_MSIX:
1191 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1192 if (enic->msix[i].requested)
1193 free_irq(enic->msix_entry[i].vector,
1194 enic->msix[i].devid);
1195 break;
1196 default:
1197 break;
1198 }
1199}
1200
1201static int enic_request_intr(struct enic *enic)
1202{
1203 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001204 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001205 int err = 0;
1206
1207 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1208
1209 case VNIC_DEV_INTR_MODE_INTX:
1210
1211 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1212 IRQF_SHARED, netdev->name, netdev);
1213 break;
1214
1215 case VNIC_DEV_INTR_MODE_MSI:
1216
1217 err = request_irq(enic->pdev->irq, enic_isr_msi,
1218 0, netdev->name, enic);
1219 break;
1220
1221 case VNIC_DEV_INTR_MODE_MSIX:
1222
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001223 for (i = 0; i < enic->rq_count; i++) {
1224 intr = enic_msix_rq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001225 snprintf(enic->msix[intr].devname,
1226 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001227 "%.11s-rx-%d", netdev->name, i);
1228 enic->msix[intr].isr = enic_isr_msix_rq;
1229 enic->msix[intr].devid = &enic->napi[i];
1230 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001231
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001232 for (i = 0; i < enic->wq_count; i++) {
1233 intr = enic_msix_wq_intr(enic, i);
Dan Carpenter4505f402013-01-17 21:46:18 +00001234 snprintf(enic->msix[intr].devname,
1235 sizeof(enic->msix[intr].devname),
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001236 "%.11s-tx-%d", netdev->name, i);
1237 enic->msix[intr].isr = enic_isr_msix_wq;
1238 enic->msix[intr].devid = enic;
1239 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001240
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001241 intr = enic_msix_err_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001242 snprintf(enic->msix[intr].devname,
1243 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001244 "%.11s-err", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001245 enic->msix[intr].isr = enic_isr_msix_err;
1246 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001247
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001248 intr = enic_msix_notify_intr(enic);
Dan Carpenter4505f402013-01-17 21:46:18 +00001249 snprintf(enic->msix[intr].devname,
1250 sizeof(enic->msix[intr].devname),
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001251 "%.11s-notify", netdev->name);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001252 enic->msix[intr].isr = enic_isr_msix_notify;
1253 enic->msix[intr].devid = enic;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001254
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001255 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1256 enic->msix[i].requested = 0;
1257
1258 for (i = 0; i < enic->intr_count; i++) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001259 err = request_irq(enic->msix_entry[i].vector,
1260 enic->msix[i].isr, 0,
1261 enic->msix[i].devname,
1262 enic->msix[i].devid);
1263 if (err) {
1264 enic_free_intr(enic);
1265 break;
1266 }
1267 enic->msix[i].requested = 1;
1268 }
1269
1270 break;
1271
1272 default:
1273 break;
1274 }
1275
1276 return err;
1277}
1278
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001279static void enic_synchronize_irqs(struct enic *enic)
1280{
1281 unsigned int i;
1282
1283 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1284 case VNIC_DEV_INTR_MODE_INTX:
1285 case VNIC_DEV_INTR_MODE_MSI:
1286 synchronize_irq(enic->pdev->irq);
1287 break;
1288 case VNIC_DEV_INTR_MODE_MSIX:
1289 for (i = 0; i < enic->intr_count; i++)
1290 synchronize_irq(enic->msix_entry[i].vector);
1291 break;
1292 default:
1293 break;
1294 }
1295}
1296
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001297static int enic_dev_notify_set(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001298{
1299 int err;
1300
Scott Feldman56ac88b2009-09-03 17:02:14 +00001301 spin_lock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001302 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1303 case VNIC_DEV_INTR_MODE_INTX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001304 err = vnic_dev_notify_set(enic->vdev,
1305 enic_legacy_notify_intr());
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001306 break;
1307 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001308 err = vnic_dev_notify_set(enic->vdev,
1309 enic_msix_notify_intr(enic));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001310 break;
1311 default:
1312 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1313 break;
1314 }
Scott Feldman56ac88b2009-09-03 17:02:14 +00001315 spin_unlock(&enic->devcmd_lock);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001316
1317 return err;
1318}
1319
1320static void enic_notify_timer_start(struct enic *enic)
1321{
1322 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1323 case VNIC_DEV_INTR_MODE_MSI:
1324 mod_timer(&enic->notify_timer, jiffies);
1325 break;
1326 default:
1327 /* Using intr for notification for INTx/MSI-X */
1328 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001329 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001330}
1331
1332/* rtnl lock is held, process context */
1333static int enic_open(struct net_device *netdev)
1334{
1335 struct enic *enic = netdev_priv(netdev);
1336 unsigned int i;
1337 int err;
1338
Scott Feldman4b75a442008-09-24 11:23:53 -07001339 err = enic_request_intr(enic);
1340 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001341 netdev_err(netdev, "Unable to request irq.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001342 return err;
1343 }
1344
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001345 err = enic_dev_notify_set(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001346 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001347 netdev_err(netdev,
1348 "Failed to alloc notify buffer, aborting.\n");
Scott Feldman4b75a442008-09-24 11:23:53 -07001349 goto err_out_free_intr;
1350 }
1351
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001352 for (i = 0; i < enic->rq_count; i++) {
Vasanthy Kolluri0eb26022011-02-04 16:17:21 +00001353 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001354 /* Need at least one buffer on ring to get going */
1355 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001356 netdev_err(netdev, "Unable to alloc receive buffers\n");
Scott Feldman2d6ddce2009-12-23 13:27:38 +00001357 err = -ENOMEM;
Scott Feldman4b75a442008-09-24 11:23:53 -07001358 goto err_out_notify_unset;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001359 }
1360 }
1361
1362 for (i = 0; i < enic->wq_count; i++)
1363 vnic_wq_enable(&enic->wq[i]);
1364 for (i = 0; i < enic->rq_count; i++)
1365 vnic_rq_enable(&enic->rq[i]);
1366
Roopa Prabhu73359032012-01-18 04:24:02 +00001367 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001368 enic_dev_add_station_addr(enic);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001369
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001370 enic_set_rx_mode(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001371
1372 netif_wake_queue(netdev);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001373
1374 for (i = 0; i < enic->rq_count; i++)
1375 napi_enable(&enic->napi[i]);
1376
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001377 enic_dev_enable(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001378
1379 for (i = 0; i < enic->intr_count; i++)
1380 vnic_intr_unmask(&enic->intr[i]);
1381
1382 enic_notify_timer_start(enic);
1383
1384 return 0;
Scott Feldman4b75a442008-09-24 11:23:53 -07001385
1386err_out_notify_unset:
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001387 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001388err_out_free_intr:
1389 enic_free_intr(enic);
1390
1391 return err;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001392}
1393
1394/* rtnl lock is held, process context */
1395static int enic_stop(struct net_device *netdev)
1396{
1397 struct enic *enic = netdev_priv(netdev);
1398 unsigned int i;
1399 int err;
1400
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001401 for (i = 0; i < enic->intr_count; i++) {
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001402 vnic_intr_mask(&enic->intr[i]);
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00001403 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1404 }
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001405
1406 enic_synchronize_irqs(enic);
1407
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001408 del_timer_sync(&enic->notify_timer);
1409
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001410 enic_dev_disable(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001411
1412 for (i = 0; i < enic->rq_count; i++)
1413 napi_disable(&enic->napi[i]);
1414
Scott Feldmanb3d18d12009-12-23 13:27:30 +00001415 netif_carrier_off(netdev);
1416 netif_tx_disable(netdev);
Roopa Prabhu3f192792011-09-22 03:44:43 +00001417
Roopa Prabhu73359032012-01-18 04:24:02 +00001418 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
Roopa Prabhu296390592010-12-08 13:54:03 +00001419 enic_dev_del_station_addr(enic);
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001420
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001421 for (i = 0; i < enic->wq_count; i++) {
1422 err = vnic_wq_disable(&enic->wq[i]);
1423 if (err)
1424 return err;
1425 }
1426 for (i = 0; i < enic->rq_count; i++) {
1427 err = vnic_rq_disable(&enic->rq[i]);
1428 if (err)
1429 return err;
1430 }
1431
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001432 enic_dev_notify_unset(enic);
Scott Feldman4b75a442008-09-24 11:23:53 -07001433 enic_free_intr(enic);
1434
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001435 for (i = 0; i < enic->wq_count; i++)
1436 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1437 for (i = 0; i < enic->rq_count; i++)
1438 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1439 for (i = 0; i < enic->cq_count; i++)
1440 vnic_cq_clean(&enic->cq[i]);
1441 for (i = 0; i < enic->intr_count; i++)
1442 vnic_intr_clean(&enic->intr[i]);
1443
1444 return 0;
1445}
1446
1447static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1448{
1449 struct enic *enic = netdev_priv(netdev);
1450 int running = netif_running(netdev);
1451
Scott Feldman25f0a062008-09-24 11:23:32 -07001452 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1453 return -EINVAL;
1454
Roopa Prabhu73359032012-01-18 04:24:02 +00001455 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001456 return -EOPNOTSUPP;
1457
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001458 if (running)
1459 enic_stop(netdev);
1460
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001461 netdev->mtu = new_mtu;
1462
1463 if (netdev->mtu > enic->port_mtu)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001464 netdev_warn(netdev,
1465 "interface MTU (%d) set higher than port MTU (%d)\n",
1466 netdev->mtu, enic->port_mtu);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001467
1468 if (running)
1469 enic_open(netdev);
1470
1471 return 0;
1472}
1473
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001474static void enic_change_mtu_work(struct work_struct *work)
1475{
1476 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1477 struct net_device *netdev = enic->netdev;
1478 int new_mtu = vnic_dev_mtu(enic->vdev);
1479 int err;
1480 unsigned int i;
1481
1482 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1483
1484 rtnl_lock();
1485
1486 /* Stop RQ */
1487 del_timer_sync(&enic->notify_timer);
1488
1489 for (i = 0; i < enic->rq_count; i++)
1490 napi_disable(&enic->napi[i]);
1491
1492 vnic_intr_mask(&enic->intr[0]);
1493 enic_synchronize_irqs(enic);
1494 err = vnic_rq_disable(&enic->rq[0]);
1495 if (err) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001496 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001497 netdev_err(netdev, "Unable to disable RQ.\n");
1498 return;
1499 }
1500 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1501 vnic_cq_clean(&enic->cq[0]);
1502 vnic_intr_clean(&enic->intr[0]);
1503
1504 /* Fill RQ with new_mtu-sized buffers */
1505 netdev->mtu = new_mtu;
1506 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1507 /* Need at least one buffer on ring to get going */
1508 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
Konstantin Khlebnikove0575902013-07-08 11:22:51 +04001509 rtnl_unlock();
Roopa Prabhuc97c8942011-06-03 14:35:17 +00001510 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1511 return;
1512 }
1513
1514 /* Start RQ */
1515 vnic_rq_enable(&enic->rq[0]);
1516 napi_enable(&enic->napi[0]);
1517 vnic_intr_unmask(&enic->intr[0]);
1518 enic_notify_timer_start(enic);
1519
1520 rtnl_unlock();
1521
1522 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1523}
1524
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001525#ifdef CONFIG_NET_POLL_CONTROLLER
1526static void enic_poll_controller(struct net_device *netdev)
1527{
1528 struct enic *enic = netdev_priv(netdev);
1529 struct vnic_dev *vdev = enic->vdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001530 unsigned int i, intr;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001531
1532 switch (vnic_dev_get_intr_mode(vdev)) {
1533 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001534 for (i = 0; i < enic->rq_count; i++) {
1535 intr = enic_msix_rq_intr(enic, i);
Vasanthy Kolluri79aeec52010-12-08 13:05:45 +00001536 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1537 &enic->napi[i]);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001538 }
Vasanthy Kollurib880a952011-06-09 10:37:07 +00001539
1540 for (i = 0; i < enic->wq_count; i++) {
1541 intr = enic_msix_wq_intr(enic, i);
1542 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1543 }
1544
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001545 break;
1546 case VNIC_DEV_INTR_MODE_MSI:
1547 enic_isr_msi(enic->pdev->irq, enic);
1548 break;
1549 case VNIC_DEV_INTR_MODE_INTX:
1550 enic_isr_legacy(enic->pdev->irq, netdev);
1551 break;
1552 default:
1553 break;
1554 }
1555}
1556#endif
1557
1558static int enic_dev_wait(struct vnic_dev *vdev,
1559 int (*start)(struct vnic_dev *, int),
1560 int (*finished)(struct vnic_dev *, int *),
1561 int arg)
1562{
1563 unsigned long time;
1564 int done;
1565 int err;
1566
1567 BUG_ON(in_interrupt());
1568
1569 err = start(vdev, arg);
1570 if (err)
1571 return err;
1572
1573 /* Wait for func to complete...2 seconds max
1574 */
1575
1576 time = jiffies + (HZ * 2);
1577 do {
1578
1579 err = finished(vdev, &done);
1580 if (err)
1581 return err;
1582
1583 if (done)
1584 return 0;
1585
1586 schedule_timeout_uninterruptible(HZ / 10);
1587
1588 } while (time_after(time, jiffies));
1589
1590 return -ETIMEDOUT;
1591}
1592
1593static int enic_dev_open(struct enic *enic)
1594{
1595 int err;
1596
1597 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1598 vnic_dev_open_done, 0);
1599 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001600 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1601 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001602
1603 return err;
1604}
1605
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001606static int enic_dev_hang_reset(struct enic *enic)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001607{
1608 int err;
1609
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001610 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1611 vnic_dev_hang_reset_done, 0);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001612 if (err)
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001613 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1614 err);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001615
1616 return err;
1617}
1618
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001619static int enic_set_rsskey(struct enic *enic)
Scott Feldman68f71702009-02-09 23:24:24 -08001620{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001621 dma_addr_t rss_key_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001622 union vnic_rss_key *rss_key_buf_va = NULL;
1623 union vnic_rss_key rss_key = {
1624 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1625 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1626 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1627 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1628 };
1629 int err;
1630
1631 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1632 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1633 if (!rss_key_buf_va)
1634 return -ENOMEM;
1635
1636 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1637
1638 spin_lock(&enic->devcmd_lock);
1639 err = enic_set_rss_key(enic,
1640 rss_key_buf_pa,
1641 sizeof(union vnic_rss_key));
1642 spin_unlock(&enic->devcmd_lock);
1643
1644 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1645 rss_key_buf_va, rss_key_buf_pa);
1646
1647 return err;
1648}
1649
1650static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1651{
Vasanthy Kolluri1f4f0672010-11-15 08:09:55 +00001652 dma_addr_t rss_cpu_buf_pa;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001653 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1654 unsigned int i;
1655 int err;
1656
1657 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1658 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1659 if (!rss_cpu_buf_va)
1660 return -ENOMEM;
1661
1662 for (i = 0; i < (1 << rss_hash_bits); i++)
1663 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1664
1665 spin_lock(&enic->devcmd_lock);
1666 err = enic_set_rss_cpu(enic,
1667 rss_cpu_buf_pa,
1668 sizeof(union vnic_rss_cpu));
1669 spin_unlock(&enic->devcmd_lock);
1670
1671 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1672 rss_cpu_buf_va, rss_cpu_buf_pa);
1673
1674 return err;
1675}
1676
1677static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1678 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1679{
Scott Feldman68f71702009-02-09 23:24:24 -08001680 const u8 tso_ipid_split_en = 0;
1681 const u8 ig_vlan_strip_en = 1;
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001682 int err;
Scott Feldman68f71702009-02-09 23:24:24 -08001683
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001684 /* Enable VLAN tag stripping.
1685 */
Scott Feldman68f71702009-02-09 23:24:24 -08001686
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001687 spin_lock(&enic->devcmd_lock);
1688 err = enic_set_nic_cfg(enic,
Scott Feldman68f71702009-02-09 23:24:24 -08001689 rss_default_cpu, rss_hash_type,
1690 rss_hash_bits, rss_base_cpu,
1691 rss_enable, tso_ipid_split_en,
1692 ig_vlan_strip_en);
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001693 spin_unlock(&enic->devcmd_lock);
1694
1695 return err;
1696}
1697
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001698static int enic_set_rss_nic_cfg(struct enic *enic)
1699{
1700 struct device *dev = enic_get_dev(enic);
1701 const u8 rss_default_cpu = 0;
1702 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1703 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1704 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1705 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1706 const u8 rss_hash_bits = 7;
1707 const u8 rss_base_cpu = 0;
1708 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1709
1710 if (rss_enable) {
1711 if (!enic_set_rsskey(enic)) {
1712 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1713 rss_enable = 0;
1714 dev_warn(dev, "RSS disabled, "
1715 "Failed to set RSS cpu indirection table.");
1716 }
1717 } else {
1718 rss_enable = 0;
1719 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1720 }
1721 }
1722
1723 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1724 rss_hash_bits, rss_base_cpu, rss_enable);
1725}
1726
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001727static void enic_reset(struct work_struct *work)
1728{
1729 struct enic *enic = container_of(work, struct enic, reset);
1730
1731 if (!netif_running(enic->netdev))
1732 return;
1733
1734 rtnl_lock();
1735
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00001736 enic_dev_hang_notify(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001737 enic_stop(enic->netdev);
Vasanthy Kolluri99ef5632010-06-24 10:50:00 +00001738 enic_dev_hang_reset(enic);
Vasanthy Kollurie0afe532011-02-17 08:53:12 +00001739 enic_reset_addr_lists(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001740 enic_init_vnic_resources(enic);
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001741 enic_set_rss_nic_cfg(enic);
Vasanthy Kollurif8cac142010-06-24 10:49:51 +00001742 enic_dev_set_ig_vlan_rewrite_mode(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001743 enic_open(enic->netdev);
1744
1745 rtnl_unlock();
1746}
1747
1748static int enic_set_intr_mode(struct enic *enic)
1749{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001750 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
Vasanthy Kolluri1cbb1a62011-02-17 13:57:19 +00001751 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001752 unsigned int i;
1753
1754 /* Set interrupt mode (INTx, MSI, MSI-X) depending
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001755 * on system capabilities.
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001756 *
1757 * Try MSI-X first
1758 *
1759 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1760 * (the second to last INTR is used for WQ/RQ errors)
1761 * (the last INTR is used for notifications)
1762 */
1763
1764 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1765 for (i = 0; i < n + m + 2; i++)
1766 enic->msix_entry[i].entry = i;
1767
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001768 /* Use multiple RQs if RSS is enabled
1769 */
1770
1771 if (ENIC_SETTING(enic, RSS) &&
1772 enic->config.intr_mode < 1 &&
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001773 enic->rq_count >= n &&
1774 enic->wq_count >= m &&
1775 enic->cq_count >= n + m &&
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001776 enic->intr_count >= n + m + 2) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001777
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001778 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001779
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001780 enic->rq_count = n;
1781 enic->wq_count = m;
1782 enic->cq_count = n + m;
1783 enic->intr_count = n + m + 2;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001784
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001785 vnic_dev_set_intr_mode(enic->vdev,
1786 VNIC_DEV_INTR_MODE_MSIX);
1787
1788 return 0;
1789 }
1790 }
1791
1792 if (enic->config.intr_mode < 1 &&
1793 enic->rq_count >= 1 &&
1794 enic->wq_count >= m &&
1795 enic->cq_count >= 1 + m &&
1796 enic->intr_count >= 1 + m + 2) {
1797 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
1798
1799 enic->rq_count = 1;
1800 enic->wq_count = m;
1801 enic->cq_count = 1 + m;
1802 enic->intr_count = 1 + m + 2;
1803
1804 vnic_dev_set_intr_mode(enic->vdev,
1805 VNIC_DEV_INTR_MODE_MSIX);
1806
1807 return 0;
1808 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07001809 }
1810
1811 /* Next try MSI
1812 *
1813 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1814 */
1815
1816 if (enic->config.intr_mode < 2 &&
1817 enic->rq_count >= 1 &&
1818 enic->wq_count >= 1 &&
1819 enic->cq_count >= 2 &&
1820 enic->intr_count >= 1 &&
1821 !pci_enable_msi(enic->pdev)) {
1822
1823 enic->rq_count = 1;
1824 enic->wq_count = 1;
1825 enic->cq_count = 2;
1826 enic->intr_count = 1;
1827
1828 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1829
1830 return 0;
1831 }
1832
1833 /* Next try INTx
1834 *
1835 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1836 * (the first INTR is used for WQ/RQ)
1837 * (the second INTR is used for WQ/RQ errors)
1838 * (the last INTR is used for notifications)
1839 */
1840
1841 if (enic->config.intr_mode < 3 &&
1842 enic->rq_count >= 1 &&
1843 enic->wq_count >= 1 &&
1844 enic->cq_count >= 2 &&
1845 enic->intr_count >= 3) {
1846
1847 enic->rq_count = 1;
1848 enic->wq_count = 1;
1849 enic->cq_count = 2;
1850 enic->intr_count = 3;
1851
1852 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1853
1854 return 0;
1855 }
1856
1857 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1858
1859 return -EINVAL;
1860}
1861
1862static void enic_clear_intr_mode(struct enic *enic)
1863{
1864 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1865 case VNIC_DEV_INTR_MODE_MSIX:
1866 pci_disable_msix(enic->pdev);
1867 break;
1868 case VNIC_DEV_INTR_MODE_MSI:
1869 pci_disable_msi(enic->pdev);
1870 break;
1871 default:
1872 break;
1873 }
1874
1875 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1876}
1877
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001878static const struct net_device_ops enic_netdev_dynamic_ops = {
1879 .ndo_open = enic_open,
1880 .ndo_stop = enic_stop,
1881 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00001882 .ndo_get_stats64 = enic_get_stats,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001883 .ndo_validate_addr = eth_validate_addr,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001884 .ndo_set_rx_mode = enic_set_rx_mode,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001885 .ndo_set_mac_address = enic_set_mac_address_dynamic,
1886 .ndo_change_mtu = enic_change_mtu,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001887 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1888 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1889 .ndo_tx_timeout = enic_tx_timeout,
1890 .ndo_set_vf_port = enic_set_vf_port,
1891 .ndo_get_vf_port = enic_get_vf_port,
Roopa Prabhu0b1c00f2010-12-08 13:53:58 +00001892 .ndo_set_vf_mac = enic_set_vf_mac,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001893#ifdef CONFIG_NET_POLL_CONTROLLER
1894 .ndo_poll_controller = enic_poll_controller,
1895#endif
1896};
1897
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001898static const struct net_device_ops enic_netdev_ops = {
1899 .ndo_open = enic_open,
1900 .ndo_stop = enic_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -08001901 .ndo_start_xmit = enic_hard_start_xmit,
stephen hemmingerf20530b2011-06-08 14:54:02 +00001902 .ndo_get_stats64 = enic_get_stats,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001903 .ndo_validate_addr = eth_validate_addr,
Scott Feldmanf8bd9092010-05-17 22:50:19 -07001904 .ndo_set_mac_address = enic_set_mac_address,
Roopa Prabhu319d7e82010-12-08 13:19:58 +00001905 .ndo_set_rx_mode = enic_set_rx_mode,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001906 .ndo_change_mtu = enic_change_mtu,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001907 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1908 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1909 .ndo_tx_timeout = enic_tx_timeout,
Roopa Prabhu3f192792011-09-22 03:44:43 +00001910 .ndo_set_vf_port = enic_set_vf_port,
1911 .ndo_get_vf_port = enic_get_vf_port,
1912 .ndo_set_vf_mac = enic_set_vf_mac,
Stephen Hemmingerafe29f72008-11-19 22:23:26 -08001913#ifdef CONFIG_NET_POLL_CONTROLLER
1914 .ndo_poll_controller = enic_poll_controller,
1915#endif
1916};
1917
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00001918static void enic_dev_deinit(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00001919{
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001920 unsigned int i;
1921
1922 for (i = 0; i < enic->rq_count; i++)
1923 netif_napi_del(&enic->napi[i]);
1924
Scott Feldman6fdfa972009-09-03 17:02:45 +00001925 enic_free_vnic_resources(enic);
1926 enic_clear_intr_mode(enic);
1927}
1928
Vasanthy Kolluri2fdba382010-09-30 13:35:45 +00001929static int enic_dev_init(struct enic *enic)
Scott Feldman6fdfa972009-09-03 17:02:45 +00001930{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001931 struct device *dev = enic_get_dev(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00001932 struct net_device *netdev = enic->netdev;
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001933 unsigned int i;
Scott Feldman6fdfa972009-09-03 17:02:45 +00001934 int err;
1935
Vasanthy Kolluriea7ea652011-06-17 07:56:48 +00001936 /* Get interrupt coalesce timer info */
1937 err = enic_dev_intr_coal_timer_info(enic);
1938 if (err) {
1939 dev_warn(dev, "Using default conversion factor for "
1940 "interrupt coalesce timer\n");
1941 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1942 }
1943
Scott Feldman6fdfa972009-09-03 17:02:45 +00001944 /* Get vNIC configuration
1945 */
1946
1947 err = enic_get_vnic_config(enic);
1948 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001949 dev_err(dev, "Get vNIC configuration failed, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001950 return err;
1951 }
1952
1953 /* Get available resource counts
1954 */
1955
1956 enic_get_res_counts(enic);
1957
1958 /* Set interrupt mode based on resource counts and system
1959 * capabilities
1960 */
1961
1962 err = enic_set_intr_mode(enic);
1963 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001964 dev_err(dev, "Failed to set intr mode based on resource "
1965 "counts and system capabilities, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001966 return err;
1967 }
1968
1969 /* Allocate and configure vNIC resources
1970 */
1971
1972 err = enic_alloc_vnic_resources(enic);
1973 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001974 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001975 goto err_out_free_vnic_resources;
1976 }
1977
1978 enic_init_vnic_resources(enic);
1979
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001980 err = enic_set_rss_nic_cfg(enic);
Scott Feldman6fdfa972009-09-03 17:02:45 +00001981 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00001982 dev_err(dev, "Failed to config nic, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00001983 goto err_out_free_vnic_resources;
1984 }
1985
1986 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1987 default:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001988 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00001989 break;
1990 case VNIC_DEV_INTR_MODE_MSIX:
Vasanthy Kolluri717258b2010-10-20 10:16:59 +00001991 for (i = 0; i < enic->rq_count; i++)
1992 netif_napi_add(netdev, &enic->napi[i],
1993 enic_poll_msix, 64);
Scott Feldman6fdfa972009-09-03 17:02:45 +00001994 break;
1995 }
1996
1997 return 0;
1998
1999err_out_free_vnic_resources:
2000 enic_clear_intr_mode(enic);
2001 enic_free_vnic_resources(enic);
2002
2003 return err;
2004}
2005
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002006static void enic_iounmap(struct enic *enic)
2007{
2008 unsigned int i;
2009
2010 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2011 if (enic->bar[i].vaddr)
2012 iounmap(enic->bar[i].vaddr);
2013}
2014
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002015static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002016{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002017 struct device *dev = &pdev->dev;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002018 struct net_device *netdev;
2019 struct enic *enic;
2020 int using_dac = 0;
2021 unsigned int i;
2022 int err;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002023#ifdef CONFIG_PCI_IOV
2024 int pos = 0;
2025#endif
Roopa Prabhub67f2312012-01-19 22:25:36 +00002026 int num_pps = 1;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002027
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002028 /* Allocate net device structure and initialize. Private
2029 * instance data is initialized to zero.
2030 */
2031
2032 netdev = alloc_etherdev(sizeof(struct enic));
Joe Perches41de8d42012-01-29 13:47:52 +00002033 if (!netdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002034 return -ENOMEM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002035
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002036 pci_set_drvdata(pdev, netdev);
2037
2038 SET_NETDEV_DEV(netdev, &pdev->dev);
2039
2040 enic = netdev_priv(netdev);
2041 enic->netdev = netdev;
2042 enic->pdev = pdev;
2043
2044 /* Setup PCI resources
2045 */
2046
Vasanthy Kolluri29046f92010-06-24 10:52:26 +00002047 err = pci_enable_device_mem(pdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002048 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002049 dev_err(dev, "Cannot enable PCI device, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002050 goto err_out_free_netdev;
2051 }
2052
2053 err = pci_request_regions(pdev, DRV_NAME);
2054 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002055 dev_err(dev, "Cannot request PCI regions, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002056 goto err_out_disable_device;
2057 }
2058
2059 pci_set_master(pdev);
2060
2061 /* Query PCI controller on system for DMA addressing
2062 * limitation for the device. Try 40-bit first, and
2063 * fail to 32-bit.
2064 */
2065
Yang Hongyang50cf1562009-04-06 19:01:14 -07002066 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002067 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07002068 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002069 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002070 dev_err(dev, "No usable DMA configuration, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002071 goto err_out_release_regions;
2072 }
Yang Hongyang284901a2009-04-06 19:01:15 -07002073 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002074 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002075 dev_err(dev, "Unable to obtain %u-bit DMA "
2076 "for consistent allocations, aborting\n", 32);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002077 goto err_out_release_regions;
2078 }
2079 } else {
Yang Hongyang50cf1562009-04-06 19:01:14 -07002080 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002081 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002082 dev_err(dev, "Unable to obtain %u-bit DMA "
2083 "for consistent allocations, aborting\n", 40);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002084 goto err_out_release_regions;
2085 }
2086 using_dac = 1;
2087 }
2088
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002089 /* Map vNIC resources from BAR0-5
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002090 */
2091
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002092 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2093 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2094 continue;
2095 enic->bar[i].len = pci_resource_len(pdev, i);
2096 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2097 if (!enic->bar[i].vaddr) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002098 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002099 err = -ENODEV;
2100 goto err_out_iounmap;
2101 }
2102 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002103 }
2104
2105 /* Register vNIC device
2106 */
2107
Scott Feldman27e6c7d2009-09-03 17:01:53 +00002108 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2109 ARRAY_SIZE(enic->bar));
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002110 if (!enic->vdev) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002111 dev_err(dev, "vNIC registration failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002112 err = -ENODEV;
2113 goto err_out_iounmap;
2114 }
2115
Roopa Prabhu8749b422011-09-22 03:44:33 +00002116#ifdef CONFIG_PCI_IOV
2117 /* Get number of subvnics */
2118 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2119 if (pos) {
2120 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
Dan Carpenter413708b2012-02-29 21:19:54 +00002121 &enic->num_vfs);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002122 if (enic->num_vfs) {
2123 err = pci_enable_sriov(pdev, enic->num_vfs);
2124 if (err) {
2125 dev_err(dev, "SRIOV enable failed, aborting."
2126 " pci_enable_sriov() returned %d\n",
2127 err);
2128 goto err_out_vnic_unregister;
2129 }
2130 enic->priv_flags |= ENIC_SRIOV_ENABLED;
Roopa Prabhub67f2312012-01-19 22:25:36 +00002131 num_pps = enic->num_vfs;
Roopa Prabhu8749b422011-09-22 03:44:33 +00002132 }
2133 }
Roopa Prabhu8749b422011-09-22 03:44:33 +00002134#endif
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002135
Roopa Prabhu3f192792011-09-22 03:44:43 +00002136 /* Allocate structure for port profiles */
Thomas Meyera1de2212011-11-29 11:08:00 +00002137 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
Roopa Prabhu3f192792011-09-22 03:44:43 +00002138 if (!enic->pp) {
Roopa Prabhu3f192792011-09-22 03:44:43 +00002139 err = -ENOMEM;
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002140 goto err_out_disable_sriov_pp;
Roopa Prabhu3f192792011-09-22 03:44:43 +00002141 }
2142
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002143 /* Issue device open to get device in known state
2144 */
2145
2146 err = enic_dev_open(enic);
2147 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002148 dev_err(dev, "vNIC dev open failed, aborting\n");
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002149 goto err_out_disable_sriov;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002150 }
2151
Vasanthy Kolluri69161422011-02-04 16:17:16 +00002152 /* Setup devcmd lock
2153 */
2154
2155 spin_lock_init(&enic->devcmd_lock);
2156
2157 /*
2158 * Set ingress vlan rewrite mode before vnic initialization
2159 */
2160
2161 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2162 if (err) {
2163 dev_err(dev,
2164 "Failed to set ingress vlan rewrite mode, aborting.\n");
2165 goto err_out_dev_close;
2166 }
2167
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002168 /* Issue device init to initialize the vnic-to-switch link.
2169 * We'll start with carrier off and wait for link UP
2170 * notification later to turn on carrier. We don't need
2171 * to wait here for the vnic-to-switch link initialization
2172 * to complete; link UP notification is the indication that
2173 * the process is complete.
2174 */
2175
2176 netif_carrier_off(netdev);
2177
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002178 /* Do not call dev_init for a dynamic vnic.
2179 * For a dynamic vnic, init_prov_info will be
2180 * called later by an upper layer.
2181 */
2182
Roopa Prabhu2b68c182012-02-20 00:12:04 +00002183 if (!enic_is_dynamic(enic)) {
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002184 err = vnic_dev_init(enic->vdev, 0);
2185 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002186 dev_err(dev, "vNIC dev init failed, aborting\n");
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002187 goto err_out_dev_close;
2188 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002189 }
2190
Scott Feldman6fdfa972009-09-03 17:02:45 +00002191 err = enic_dev_init(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002192 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002193 dev_err(dev, "Device initialization failed, aborting\n");
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002194 goto err_out_dev_close;
2195 }
2196
Vasanthy Kolluri383ab922010-06-24 10:50:12 +00002197 /* Setup notification timer, HW reset task, and wq locks
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002198 */
2199
2200 init_timer(&enic->notify_timer);
2201 enic->notify_timer.function = enic_notify_timer;
2202 enic->notify_timer.data = (unsigned long)enic;
2203
2204 INIT_WORK(&enic->reset, enic_reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002205 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002206
2207 for (i = 0; i < enic->wq_count; i++)
2208 spin_lock_init(&enic->wq_lock[i]);
2209
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002210 /* Register net device
2211 */
2212
2213 enic->port_mtu = enic->config.mtu;
2214 (void)enic_change_mtu(netdev, enic->port_mtu);
2215
2216 err = enic_set_mac_addr(netdev, enic->mac_addr);
2217 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002218 dev_err(dev, "Invalid MAC address, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002219 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002220 }
2221
Scott Feldman7c844592009-12-23 13:27:54 +00002222 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2223 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2224
Roopa Prabhu73359032012-01-18 04:24:02 +00002225 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
Scott Feldmanf8bd9092010-05-17 22:50:19 -07002226 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2227 else
2228 netdev->netdev_ops = &enic_netdev_ops;
2229
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002230 netdev->watchdog_timeo = 2 * HZ;
Neel Patelf13bbc22013-07-22 09:59:18 -07002231 enic_set_ethtool_ops(netdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002232
Patrick McHardyf6469682013-04-19 02:04:27 +00002233 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002234 if (ENIC_SETTING(enic, LOOP)) {
Patrick McHardyf6469682013-04-19 02:04:27 +00002235 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Vasanthy Kolluri1825aca2010-06-24 10:51:59 +00002236 enic->loop_enable = 1;
2237 enic->loop_tag = enic->config.loop_tag;
2238 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2239 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002240 if (ENIC_SETTING(enic, TXCSUM))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002241 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002242 if (ENIC_SETTING(enic, TSO))
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002243 netdev->hw_features |= NETIF_F_TSO |
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002244 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
Michał Mirosław5ec8f9b2011-04-07 02:43:48 +00002245 if (ENIC_SETTING(enic, RXCSUM))
2246 netdev->hw_features |= NETIF_F_RXCSUM;
2247
2248 netdev->features |= netdev->hw_features;
2249
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002250 if (using_dac)
2251 netdev->features |= NETIF_F_HIGHDMA;
2252
Jiri Pirko01789342011-08-16 06:29:00 +00002253 netdev->priv_flags |= IFF_UNICAST_FLT;
2254
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002255 err = register_netdev(netdev);
2256 if (err) {
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002257 dev_err(dev, "Cannot register net device, aborting\n");
Scott Feldman6fdfa972009-09-03 17:02:45 +00002258 goto err_out_dev_deinit;
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002259 }
2260
2261 return 0;
2262
Scott Feldman6fdfa972009-09-03 17:02:45 +00002263err_out_dev_deinit:
2264 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002265err_out_dev_close:
2266 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002267err_out_disable_sriov:
Roopa Prabhuca2b7212012-01-18 04:24:07 +00002268 kfree(enic->pp);
2269err_out_disable_sriov_pp:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002270#ifdef CONFIG_PCI_IOV
2271 if (enic_sriov_enabled(enic)) {
2272 pci_disable_sriov(pdev);
2273 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2274 }
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002275err_out_vnic_unregister:
Roopa Prabhu8749b422011-09-22 03:44:33 +00002276#endif
Roopa Prabhu35d87e32012-01-18 04:24:12 +00002277 vnic_dev_unregister(enic->vdev);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002278err_out_iounmap:
2279 enic_iounmap(enic);
2280err_out_release_regions:
2281 pci_release_regions(pdev);
2282err_out_disable_device:
2283 pci_disable_device(pdev);
2284err_out_free_netdev:
2285 pci_set_drvdata(pdev, NULL);
2286 free_netdev(netdev);
2287
2288 return err;
2289}
2290
Bill Pemberton854de922012-12-03 09:23:05 -05002291static void enic_remove(struct pci_dev *pdev)
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002292{
2293 struct net_device *netdev = pci_get_drvdata(pdev);
2294
2295 if (netdev) {
2296 struct enic *enic = netdev_priv(netdev);
2297
Tejun Heo23f333a2010-12-12 16:45:14 +01002298 cancel_work_sync(&enic->reset);
Roopa Prabhuc97c8942011-06-03 14:35:17 +00002299 cancel_work_sync(&enic->change_mtu_work);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002300 unregister_netdev(netdev);
Scott Feldman6fdfa972009-09-03 17:02:45 +00002301 enic_dev_deinit(enic);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002302 vnic_dev_close(enic->vdev);
Roopa Prabhu8749b422011-09-22 03:44:33 +00002303#ifdef CONFIG_PCI_IOV
2304 if (enic_sriov_enabled(enic)) {
2305 pci_disable_sriov(pdev);
2306 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2307 }
2308#endif
Roopa Prabhu3f192792011-09-22 03:44:43 +00002309 kfree(enic->pp);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002310 vnic_dev_unregister(enic->vdev);
2311 enic_iounmap(enic);
2312 pci_release_regions(pdev);
2313 pci_disable_device(pdev);
2314 pci_set_drvdata(pdev, NULL);
2315 free_netdev(netdev);
2316 }
2317}
2318
2319static struct pci_driver enic_driver = {
2320 .name = DRV_NAME,
2321 .id_table = enic_id_table,
2322 .probe = enic_probe,
Bill Pemberton854de922012-12-03 09:23:05 -05002323 .remove = enic_remove,
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002324};
2325
2326static int __init enic_init_module(void)
2327{
Vasanthy Kolluria7a79de2010-06-24 10:50:56 +00002328 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
Scott Feldman01f2e4e2008-09-15 09:17:11 -07002329
2330 return pci_register_driver(&enic_driver);
2331}
2332
2333static void __exit enic_cleanup_module(void)
2334{
2335 pci_unregister_driver(&enic_driver);
2336}
2337
2338module_init(enic_init_module);
2339module_exit(enic_cleanup_module);