blob: 7b10d9cb37b3c0ffd9e416f4308882df68e87af3 [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040027#include <linux/module.h>
Stephen Rothwellb038b042009-11-17 23:04:59 -080028#include <net/ip6_checksum.h>
29
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070030#include "vmxnet3_int.h"
31
32char vmxnet3_driver_name[] = "vmxnet3";
33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035/*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000039static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070040 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
46static atomic_t devices_found;
47
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000048#define VMXNET3_MAX_DEVICES 10
49static int enable_mq = 1;
50static int irq_share_mode;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070051
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +000052static void
53vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
54
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070055/*
56 * Enable/Disable the given intr
57 */
58static void
59vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
60{
61 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
62}
63
64
65static void
66vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
67{
68 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
69}
70
71
72/*
73 * Enable/Disable all intrs used by the device
74 */
75static void
76vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
77{
78 int i;
79
80 for (i = 0; i < adapter->intr.num_intrs; i++)
81 vmxnet3_enable_intr(adapter, i);
Ronghua Zang6929fe82010-07-15 22:18:47 -070082 adapter->shared->devRead.intrConf.intrCtrl &=
83 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070084}
85
86
87static void
88vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
89{
90 int i;
91
Ronghua Zang6929fe82010-07-15 22:18:47 -070092 adapter->shared->devRead.intrConf.intrCtrl |=
93 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070094 for (i = 0; i < adapter->intr.num_intrs; i++)
95 vmxnet3_disable_intr(adapter, i);
96}
97
98
99static void
100vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
101{
102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
103}
104
105
106static bool
107vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
108{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000109 return tq->stopped;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700110}
111
112
113static void
114vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
115{
116 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000117 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700118}
119
120
121static void
122vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123{
124 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000125 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700126}
127
128
129static void
130vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
131{
132 tq->stopped = true;
133 tq->num_stop++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000134 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700135}
136
137
138/*
139 * Check the link state. This may start or stop the tx queue.
140 */
141static void
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000142vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700143{
144 u32 ret;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000145 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000146 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700147
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000148 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700149 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
150 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000151 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
152
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700153 adapter->link_speed = ret >> 16;
154 if (ret & 1) { /* Link is up. */
155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
156 adapter->netdev->name, adapter->link_speed);
157 if (!netif_carrier_ok(adapter->netdev))
158 netif_carrier_on(adapter->netdev);
159
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000160 if (affectTxQueue) {
161 for (i = 0; i < adapter->num_tx_queues; i++)
162 vmxnet3_tq_start(&adapter->tx_queue[i],
163 adapter);
164 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700165 } else {
166 printk(KERN_INFO "%s: NIC Link is Down\n",
167 adapter->netdev->name);
168 if (netif_carrier_ok(adapter->netdev))
169 netif_carrier_off(adapter->netdev);
170
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000171 if (affectTxQueue) {
172 for (i = 0; i < adapter->num_tx_queues; i++)
173 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
174 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700175 }
176}
177
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700178static void
179vmxnet3_process_events(struct vmxnet3_adapter *adapter)
180{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000181 int i;
Roland Dreiere328d412011-05-06 08:32:53 +0000182 unsigned long flags;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000183 u32 events = le32_to_cpu(adapter->shared->ecr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700184 if (!events)
185 return;
186
187 vmxnet3_ack_events(adapter, events);
188
189 /* Check if link state has changed */
190 if (events & VMXNET3_ECR_LINK)
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000191 vmxnet3_check_link(adapter, true);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700192
193 /* Check if there is an error on xmit/recv queues */
194 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
Roland Dreiere328d412011-05-06 08:32:53 +0000195 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700196 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
197 VMXNET3_CMD_GET_QUEUE_STATUS);
Roland Dreiere328d412011-05-06 08:32:53 +0000198 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700199
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000200 for (i = 0; i < adapter->num_tx_queues; i++)
201 if (adapter->tqd_start[i].status.stopped)
202 dev_err(&adapter->netdev->dev,
203 "%s: tq[%d] error 0x%x\n",
204 adapter->netdev->name, i, le32_to_cpu(
205 adapter->tqd_start[i].status.error));
206 for (i = 0; i < adapter->num_rx_queues; i++)
207 if (adapter->rqd_start[i].status.stopped)
208 dev_err(&adapter->netdev->dev,
209 "%s: rq[%d] error 0x%x\n",
210 adapter->netdev->name, i,
211 adapter->rqd_start[i].status.error);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700212
213 schedule_work(&adapter->work);
214 }
215}
216
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000217#ifdef __BIG_ENDIAN_BITFIELD
218/*
219 * The device expects the bitfields in shared structures to be written in
220 * little endian. When CPU is big endian, the following routines are used to
221 * correctly read and write into ABI.
222 * The general technique used here is : double word bitfields are defined in
223 * opposite order for big endian architecture. Then before reading them in
224 * driver the complete double word is translated using le32_to_cpu. Similarly
225 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
226 * double words into required format.
227 * In order to avoid touching bits in shared structure more than once, temporary
228 * descriptors are used. These are passed as srcDesc to following functions.
229 */
230static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
231 struct Vmxnet3_RxDesc *dstDesc)
232{
233 u32 *src = (u32 *)srcDesc + 2;
234 u32 *dst = (u32 *)dstDesc + 2;
235 dstDesc->addr = le64_to_cpu(srcDesc->addr);
236 *dst = le32_to_cpu(*src);
237 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
238}
239
240static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
241 struct Vmxnet3_TxDesc *dstDesc)
242{
243 int i;
244 u32 *src = (u32 *)(srcDesc + 1);
245 u32 *dst = (u32 *)(dstDesc + 1);
246
247 /* Working backwards so that the gen bit is set at the end. */
248 for (i = 2; i > 0; i--) {
249 src--;
250 dst--;
251 *dst = cpu_to_le32(*src);
252 }
253}
254
255
256static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
257 struct Vmxnet3_RxCompDesc *dstDesc)
258{
259 int i = 0;
260 u32 *src = (u32 *)srcDesc;
261 u32 *dst = (u32 *)dstDesc;
262 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
263 *dst = le32_to_cpu(*src);
264 src++;
265 dst++;
266 }
267}
268
269
270/* Used to read bitfield values from double words. */
271static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
272{
273 u32 temp = le32_to_cpu(*bitfield);
274 u32 mask = ((1 << size) - 1) << pos;
275 temp &= mask;
276 temp >>= pos;
277 return temp;
278}
279
280
281
282#endif /* __BIG_ENDIAN_BITFIELD */
283
284#ifdef __BIG_ENDIAN_BITFIELD
285
286# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
287 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
288 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
289# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
290 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
291 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
292# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
293 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
294 VMXNET3_TCD_GEN_SIZE)
295# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
296 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
297# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
298 (dstrcd) = (tmp); \
299 vmxnet3_RxCompToCPU((rcd), (tmp)); \
300 } while (0)
301# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
302 (dstrxd) = (tmp); \
303 vmxnet3_RxDescToCPU((rxd), (tmp)); \
304 } while (0)
305
306#else
307
308# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
309# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
310# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
311# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
312# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
313# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
314
315#endif /* __BIG_ENDIAN_BITFIELD */
316
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700317
318static void
319vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
320 struct pci_dev *pdev)
321{
322 if (tbi->map_type == VMXNET3_MAP_SINGLE)
323 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
324 PCI_DMA_TODEVICE);
325 else if (tbi->map_type == VMXNET3_MAP_PAGE)
326 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
327 PCI_DMA_TODEVICE);
328 else
329 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
330
331 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
332}
333
334
335static int
336vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
337 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
338{
339 struct sk_buff *skb;
340 int entries = 0;
341
342 /* no out of order completion */
343 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000344 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700345
346 skb = tq->buf_info[eop_idx].skb;
347 BUG_ON(skb == NULL);
348 tq->buf_info[eop_idx].skb = NULL;
349
350 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
351
352 while (tq->tx_ring.next2comp != eop_idx) {
353 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
354 pdev);
355
356 /* update next2comp w/o tx_lock. Since we are marking more,
357 * instead of less, tx ring entries avail, the worst case is
358 * that the tx routine incorrectly re-queues a pkt due to
359 * insufficient tx ring entries.
360 */
361 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
362 entries++;
363 }
364
365 dev_kfree_skb_any(skb);
366 return entries;
367}
368
369
370static int
371vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
372 struct vmxnet3_adapter *adapter)
373{
374 int completed = 0;
375 union Vmxnet3_GenericDesc *gdesc;
376
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000378 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
379 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
380 &gdesc->tcd), tq, adapter->pdev,
381 adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700382
383 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
384 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
385 }
386
387 if (completed) {
388 spin_lock(&tq->tx_lock);
389 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
390 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
391 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
392 netif_carrier_ok(adapter->netdev))) {
393 vmxnet3_tq_wake(tq, adapter);
394 }
395 spin_unlock(&tq->tx_lock);
396 }
397 return completed;
398}
399
400
401static void
402vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
403 struct vmxnet3_adapter *adapter)
404{
405 int i;
406
407 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
408 struct vmxnet3_tx_buf_info *tbi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700409
410 tbi = tq->buf_info + tq->tx_ring.next2comp;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700411
412 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
413 if (tbi->skb) {
414 dev_kfree_skb_any(tbi->skb);
415 tbi->skb = NULL;
416 }
417 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
418 }
419
420 /* sanity check, verify all buffers are indeed unmapped and freed */
421 for (i = 0; i < tq->tx_ring.size; i++) {
422 BUG_ON(tq->buf_info[i].skb != NULL ||
423 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424 }
425
426 tq->tx_ring.gen = VMXNET3_INIT_GEN;
427 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
428
429 tq->comp_ring.gen = VMXNET3_INIT_GEN;
430 tq->comp_ring.next2proc = 0;
431}
432
433
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000434static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700435vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
436 struct vmxnet3_adapter *adapter)
437{
438 if (tq->tx_ring.base) {
439 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
440 sizeof(struct Vmxnet3_TxDesc),
441 tq->tx_ring.base, tq->tx_ring.basePA);
442 tq->tx_ring.base = NULL;
443 }
444 if (tq->data_ring.base) {
445 pci_free_consistent(adapter->pdev, tq->data_ring.size *
446 sizeof(struct Vmxnet3_TxDataDesc),
447 tq->data_ring.base, tq->data_ring.basePA);
448 tq->data_ring.base = NULL;
449 }
450 if (tq->comp_ring.base) {
451 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
452 sizeof(struct Vmxnet3_TxCompDesc),
453 tq->comp_ring.base, tq->comp_ring.basePA);
454 tq->comp_ring.base = NULL;
455 }
456 kfree(tq->buf_info);
457 tq->buf_info = NULL;
458}
459
460
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000461/* Destroy all tx queues */
462void
463vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
464{
465 int i;
466
467 for (i = 0; i < adapter->num_tx_queues; i++)
468 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
469}
470
471
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700472static void
473vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
474 struct vmxnet3_adapter *adapter)
475{
476 int i;
477
478 /* reset the tx ring contents to 0 and reset the tx ring states */
479 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
480 sizeof(struct Vmxnet3_TxDesc));
481 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
482 tq->tx_ring.gen = VMXNET3_INIT_GEN;
483
484 memset(tq->data_ring.base, 0, tq->data_ring.size *
485 sizeof(struct Vmxnet3_TxDataDesc));
486
487 /* reset the tx comp ring contents to 0 and reset comp ring states */
488 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
489 sizeof(struct Vmxnet3_TxCompDesc));
490 tq->comp_ring.next2proc = 0;
491 tq->comp_ring.gen = VMXNET3_INIT_GEN;
492
493 /* reset the bookkeeping data */
494 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
495 for (i = 0; i < tq->tx_ring.size; i++)
496 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
497
498 /* stats are not reset */
499}
500
501
502static int
503vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
504 struct vmxnet3_adapter *adapter)
505{
506 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
507 tq->comp_ring.base || tq->buf_info);
508
509 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
510 * sizeof(struct Vmxnet3_TxDesc),
511 &tq->tx_ring.basePA);
512 if (!tq->tx_ring.base) {
513 printk(KERN_ERR "%s: failed to allocate tx ring\n",
514 adapter->netdev->name);
515 goto err;
516 }
517
518 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
519 tq->data_ring.size *
520 sizeof(struct Vmxnet3_TxDataDesc),
521 &tq->data_ring.basePA);
522 if (!tq->data_ring.base) {
523 printk(KERN_ERR "%s: failed to allocate data ring\n",
524 adapter->netdev->name);
525 goto err;
526 }
527
528 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
529 tq->comp_ring.size *
530 sizeof(struct Vmxnet3_TxCompDesc),
531 &tq->comp_ring.basePA);
532 if (!tq->comp_ring.base) {
533 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
534 adapter->netdev->name);
535 goto err;
536 }
537
538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
539 GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000540 if (!tq->buf_info)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700541 goto err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700542
543 return 0;
544
545err:
546 vmxnet3_tq_destroy(tq, adapter);
547 return -ENOMEM;
548}
549
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000550static void
551vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
552{
553 int i;
554
555 for (i = 0; i < adapter->num_tx_queues; i++)
556 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
557}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700558
559/*
560 * starting from ring->next2fill, allocate rx buffers for the given ring
561 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
562 * are allocated or allocation fails
563 */
564
565static int
566vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
567 int num_to_alloc, struct vmxnet3_adapter *adapter)
568{
569 int num_allocated = 0;
570 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
571 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
572 u32 val;
573
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000574 while (num_allocated <= num_to_alloc) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700575 struct vmxnet3_rx_buf_info *rbi;
576 union Vmxnet3_GenericDesc *gd;
577
578 rbi = rbi_base + ring->next2fill;
579 gd = ring->base + ring->next2fill;
580
581 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
582 if (rbi->skb == NULL) {
Stephen Hemminger0d735f12013-01-15 07:28:26 +0000583 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
584 rbi->len,
585 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700586 if (unlikely(rbi->skb == NULL)) {
587 rq->stats.rx_buf_alloc_failure++;
588 break;
589 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700590
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700591 rbi->dma_addr = pci_map_single(adapter->pdev,
592 rbi->skb->data, rbi->len,
593 PCI_DMA_FROMDEVICE);
594 } else {
595 /* rx buffer skipped by the device */
596 }
597 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
598 } else {
599 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
600 rbi->len != PAGE_SIZE);
601
602 if (rbi->page == NULL) {
603 rbi->page = alloc_page(GFP_ATOMIC);
604 if (unlikely(rbi->page == NULL)) {
605 rq->stats.rx_buf_alloc_failure++;
606 break;
607 }
608 rbi->dma_addr = pci_map_page(adapter->pdev,
609 rbi->page, 0, PAGE_SIZE,
610 PCI_DMA_FROMDEVICE);
611 } else {
612 /* rx buffers skipped by the device */
613 }
614 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
615 }
616
617 BUG_ON(rbi->dma_addr == 0);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000618 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000619 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000620 | val | rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700621
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000622 /* Fill the last buffer but dont mark it ready, or else the
623 * device will think that the queue is full */
624 if (num_allocated == num_to_alloc)
625 break;
626
627 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700628 num_allocated++;
629 vmxnet3_cmd_ring_adv_next2fill(ring);
630 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700631
Randy Dunlapf69655822009-10-16 17:54:34 -0700632 dev_dbg(&adapter->netdev->dev,
Stephen Hemminger69b9a712013-01-15 07:28:27 +0000633 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
634 num_allocated, ring->next2fill, ring->next2comp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700635
636 /* so that the device can distinguish a full ring and an empty ring */
637 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
638
639 return num_allocated;
640}
641
642
643static void
644vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
645 struct vmxnet3_rx_buf_info *rbi)
646{
647 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
648 skb_shinfo(skb)->nr_frags;
649
650 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
651
Ian Campbell0e0634d2011-09-21 21:53:28 +0000652 __skb_frag_set_page(frag, rbi->page);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700653 frag->page_offset = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000654 skb_frag_size_set(frag, rcd->len);
655 skb->data_len += rcd->len;
Eric Dumazet5e6c3552011-10-13 11:38:17 +0000656 skb->truesize += PAGE_SIZE;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700657 skb_shinfo(skb)->nr_frags++;
658}
659
660
661static void
662vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
663 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
664 struct vmxnet3_adapter *adapter)
665{
666 u32 dw2, len;
667 unsigned long buf_offset;
668 int i;
669 union Vmxnet3_GenericDesc *gdesc;
670 struct vmxnet3_tx_buf_info *tbi = NULL;
671
672 BUG_ON(ctx->copy_size > skb_headlen(skb));
673
674 /* use the previous gen bit for the SOP desc */
675 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
676
677 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
678 gdesc = ctx->sop_txd; /* both loops below can be skipped */
679
680 /* no need to map the buffer if headers are copied */
681 if (ctx->copy_size) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000682 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700683 tq->tx_ring.next2fill *
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000684 sizeof(struct Vmxnet3_TxDataDesc));
685 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700686 ctx->sop_txd->dword[3] = 0;
687
688 tbi = tq->buf_info + tq->tx_ring.next2fill;
689 tbi->map_type = VMXNET3_MAP_NONE;
690
Randy Dunlapf69655822009-10-16 17:54:34 -0700691 dev_dbg(&adapter->netdev->dev,
692 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000693 tq->tx_ring.next2fill,
694 le64_to_cpu(ctx->sop_txd->txd.addr),
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700695 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
696 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
697
698 /* use the right gen for non-SOP desc */
699 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
700 }
701
702 /* linear part can use multiple tx desc if it's big */
703 len = skb_headlen(skb) - ctx->copy_size;
704 buf_offset = ctx->copy_size;
705 while (len) {
706 u32 buf_size;
707
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000708 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
709 buf_size = len;
710 dw2 |= len;
711 } else {
712 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
713 /* spec says that for TxDesc.len, 0 == 2^14 */
714 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700715
716 tbi = tq->buf_info + tq->tx_ring.next2fill;
717 tbi->map_type = VMXNET3_MAP_SINGLE;
718 tbi->dma_addr = pci_map_single(adapter->pdev,
719 skb->data + buf_offset, buf_size,
720 PCI_DMA_TODEVICE);
721
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000722 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700723
724 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
725 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
726
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000727 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000728 gdesc->dword[2] = cpu_to_le32(dw2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700729 gdesc->dword[3] = 0;
730
Randy Dunlapf69655822009-10-16 17:54:34 -0700731 dev_dbg(&adapter->netdev->dev,
732 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000733 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
734 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700735 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
736 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
737
738 len -= buf_size;
739 buf_offset += buf_size;
740 }
741
742 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000743 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000744 u32 buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700745
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000746 buf_offset = 0;
747 len = skb_frag_size(frag);
748 while (len) {
749 tbi = tq->buf_info + tq->tx_ring.next2fill;
750 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
751 buf_size = len;
752 dw2 |= len;
753 } else {
754 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
755 /* spec says that for TxDesc.len, 0 == 2^14 */
756 }
757 tbi->map_type = VMXNET3_MAP_PAGE;
758 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
759 buf_offset, buf_size,
760 DMA_TO_DEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700761
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000762 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700763
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000764 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
765 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700766
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000767 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
768 gdesc->dword[2] = cpu_to_le32(dw2);
769 gdesc->dword[3] = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700770
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000771 dev_dbg(&adapter->netdev->dev,
772 "txd[%u]: 0x%llu %u %u\n",
773 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
774 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
775 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
776 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
777
778 len -= buf_size;
779 buf_offset += buf_size;
780 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700781 }
782
783 ctx->eop_txd = gdesc;
784
785 /* set the last buf_info for the pkt */
786 tbi->skb = skb;
787 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
788}
789
790
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000791/* Init all tx queues */
792static void
793vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
794{
795 int i;
796
797 for (i = 0; i < adapter->num_tx_queues; i++)
798 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
799}
800
801
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700802/*
803 * parse and copy relevant protocol headers:
804 * For a tso pkt, relevant headers are L2/3/4 including options
805 * For a pkt requesting csum offloading, they are L2/3 and may include L4
806 * if it's a TCP/UDP pkt
807 *
808 * Returns:
809 * -1: error happens during parsing
810 * 0: protocol headers parsed, but too big to be copied
811 * 1: protocol headers parsed and copied
812 *
813 * Other effects:
814 * 1. related *ctx fields are updated.
815 * 2. ctx->copy_size is # of bytes copied
816 * 3. the portion copied is guaranteed to be in the linear part
817 *
818 */
819static int
820vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
821 struct vmxnet3_tx_ctx *ctx,
822 struct vmxnet3_adapter *adapter)
823{
824 struct Vmxnet3_TxDataDesc *tdd;
825
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000826 if (ctx->mss) { /* TSO */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700827 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000828 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700829 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
830 } else {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000832 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700833
834 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000835 const struct iphdr *iph = ip_hdr(skb);
836
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000837 if (iph->protocol == IPPROTO_TCP)
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000838 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000839 else if (iph->protocol == IPPROTO_UDP)
David S. Millerf6a1ad42012-03-05 21:16:26 -0500840 ctx->l4_hdr_size = sizeof(struct udphdr);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000841 else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700842 ctx->l4_hdr_size = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700843 } else {
844 /* for simplicity, don't copy L4 headers */
845 ctx->l4_hdr_size = 0;
846 }
Neil Hormanb2032622012-02-16 01:48:56 +0000847 ctx->copy_size = min(ctx->eth_ip_hdr_size +
848 ctx->l4_hdr_size, skb->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700849 } else {
850 ctx->eth_ip_hdr_size = 0;
851 ctx->l4_hdr_size = 0;
852 /* copy as much as allowed */
853 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
854 , skb_headlen(skb));
855 }
856
857 /* make sure headers are accessible directly */
858 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
859 goto err;
860 }
861
862 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
863 tq->stats.oversized_hdr++;
864 ctx->copy_size = 0;
865 return 0;
866 }
867
868 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
869
870 memcpy(tdd->data, skb->data, ctx->copy_size);
Randy Dunlapf69655822009-10-16 17:54:34 -0700871 dev_dbg(&adapter->netdev->dev,
872 "copy %u bytes to dataRing[%u]\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700873 ctx->copy_size, tq->tx_ring.next2fill);
874 return 1;
875
876err:
877 return -1;
878}
879
880
881static void
882vmxnet3_prepare_tso(struct sk_buff *skb,
883 struct vmxnet3_tx_ctx *ctx)
884{
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000885 struct tcphdr *tcph = tcp_hdr(skb);
886
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700887 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000888 struct iphdr *iph = ip_hdr(skb);
889
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700890 iph->check = 0;
891 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
892 IPPROTO_TCP, 0);
893 } else {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000894 struct ipv6hdr *iph = ipv6_hdr(skb);
895
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700896 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
897 IPPROTO_TCP, 0);
898 }
899}
900
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000901static int txd_estimate(const struct sk_buff *skb)
902{
903 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
904 int i;
905
906 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
907 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
908
909 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
910 }
911 return count;
912}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700913
914/*
915 * Transmits a pkt thru a given tq
916 * Returns:
917 * NETDEV_TX_OK: descriptors are setup successfully
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300918 * NETDEV_TX_OK: error occurred, the pkt is dropped
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700919 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
920 *
921 * Side-effects:
922 * 1. tx ring may be changed
923 * 2. tq stats may be updated accordingly
924 * 3. shared->txNumDeferred may be updated
925 */
926
927static int
928vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
929 struct vmxnet3_adapter *adapter, struct net_device *netdev)
930{
931 int ret;
932 u32 count;
933 unsigned long flags;
934 struct vmxnet3_tx_ctx ctx;
935 union Vmxnet3_GenericDesc *gdesc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000936#ifdef __BIG_ENDIAN_BITFIELD
937 /* Use temporary descriptor to avoid touching bits multiple times */
938 union Vmxnet3_GenericDesc tempTxDesc;
939#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700940
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000941 count = txd_estimate(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700942
Jesse Gross72e85c42011-06-23 13:04:39 +0000943 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700944
945 ctx.mss = skb_shinfo(skb)->gso_size;
946 if (ctx.mss) {
947 if (skb_header_cloned(skb)) {
948 if (unlikely(pskb_expand_head(skb, 0, 0,
949 GFP_ATOMIC) != 0)) {
950 tq->stats.drop_tso++;
951 goto drop_pkt;
952 }
953 tq->stats.copy_skb_header++;
954 }
955 vmxnet3_prepare_tso(skb, &ctx);
956 } else {
957 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
958
959 /* non-tso pkts must not use more than
960 * VMXNET3_MAX_TXD_PER_PKT entries
961 */
962 if (skb_linearize(skb) != 0) {
963 tq->stats.drop_too_many_frags++;
964 goto drop_pkt;
965 }
966 tq->stats.linearized++;
967
968 /* recalculate the # of descriptors to use */
969 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
970 }
971 }
972
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000973 spin_lock_irqsave(&tq->tx_lock, flags);
974
975 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
976 tq->stats.tx_ring_full++;
977 dev_dbg(&adapter->netdev->dev,
978 "tx queue stopped on %s, next2comp %u"
979 " next2fill %u\n", adapter->netdev->name,
980 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
981
982 vmxnet3_tq_stop(tq, adapter);
983 spin_unlock_irqrestore(&tq->tx_lock, flags);
984 return NETDEV_TX_BUSY;
985 }
986
987
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700988 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
989 if (ret >= 0) {
990 BUG_ON(ret <= 0 && ctx.copy_size != 0);
991 /* hdrs parsed, check against other limits */
992 if (ctx.mss) {
993 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
994 VMXNET3_MAX_TX_BUF_SIZE)) {
995 goto hdr_too_big;
996 }
997 } else {
998 if (skb->ip_summed == CHECKSUM_PARTIAL) {
999 if (unlikely(ctx.eth_ip_hdr_size +
1000 skb->csum_offset >
1001 VMXNET3_MAX_CSUM_OFFSET)) {
1002 goto hdr_too_big;
1003 }
1004 }
1005 }
1006 } else {
1007 tq->stats.drop_hdr_inspect_err++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001008 goto unlock_drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001009 }
1010
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001011 /* fill tx descs related to addr & len */
1012 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1013
1014 /* setup the EOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001015 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001016
1017 /* setup the SOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001018#ifdef __BIG_ENDIAN_BITFIELD
1019 gdesc = &tempTxDesc;
1020 gdesc->dword[2] = ctx.sop_txd->dword[2];
1021 gdesc->dword[3] = ctx.sop_txd->dword[3];
1022#else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001023 gdesc = ctx.sop_txd;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001024#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001025 if (ctx.mss) {
1026 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1027 gdesc->txd.om = VMXNET3_OM_TSO;
1028 gdesc->txd.msscof = ctx.mss;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001029 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1030 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001031 } else {
1032 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1033 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1034 gdesc->txd.om = VMXNET3_OM_CSUM;
1035 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1036 skb->csum_offset;
1037 } else {
1038 gdesc->txd.om = 0;
1039 gdesc->txd.msscof = 0;
1040 }
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001041 le32_add_cpu(&tq->shared->txNumDeferred, 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001042 }
1043
1044 if (vlan_tx_tag_present(skb)) {
1045 gdesc->txd.ti = 1;
1046 gdesc->txd.tci = vlan_tx_tag_get(skb);
1047 }
1048
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001049 /* finally flips the GEN bit of the SOP desc. */
1050 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1051 VMXNET3_TXD_GEN);
1052#ifdef __BIG_ENDIAN_BITFIELD
1053 /* Finished updating in bitfields of Tx Desc, so write them in original
1054 * place.
1055 */
1056 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1057 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1058 gdesc = ctx.sop_txd;
1059#endif
Randy Dunlapf69655822009-10-16 17:54:34 -07001060 dev_dbg(&adapter->netdev->dev,
1061 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
Joe Perchesc2fd03a2012-06-04 12:44:18 +00001062 (u32)(ctx.sop_txd -
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001063 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1064 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001065
1066 spin_unlock_irqrestore(&tq->tx_lock, flags);
1067
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001068 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1069 le32_to_cpu(tq->shared->txThreshold)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001070 tq->shared->txNumDeferred = 0;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001071 VMXNET3_WRITE_BAR0_REG(adapter,
1072 VMXNET3_REG_TXPROD + tq->qid * 8,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001073 tq->tx_ring.next2fill);
1074 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001075
1076 return NETDEV_TX_OK;
1077
1078hdr_too_big:
1079 tq->stats.drop_oversized_hdr++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001080unlock_drop_pkt:
1081 spin_unlock_irqrestore(&tq->tx_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001082drop_pkt:
1083 tq->stats.drop_total++;
1084 dev_kfree_skb(skb);
1085 return NETDEV_TX_OK;
1086}
1087
1088
1089static netdev_tx_t
1090vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1091{
1092 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001093
stephen hemminger96800ee2012-11-13 13:53:28 +00001094 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1095 return vmxnet3_tq_xmit(skb,
1096 &adapter->tx_queue[skb->queue_mapping],
1097 adapter, netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001098}
1099
1100
1101static void
1102vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1103 struct sk_buff *skb,
1104 union Vmxnet3_GenericDesc *gdesc)
1105{
Michał Mirosława0d27302011-04-18 13:31:21 +00001106 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001107 /* typical case: TCP/UDP over IP and both csums are correct */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001108 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001109 VMXNET3_RCD_CSUM_OK) {
1110 skb->ip_summed = CHECKSUM_UNNECESSARY;
1111 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1112 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1113 BUG_ON(gdesc->rcd.frg);
1114 } else {
1115 if (gdesc->rcd.csum) {
1116 skb->csum = htons(gdesc->rcd.csum);
1117 skb->ip_summed = CHECKSUM_PARTIAL;
1118 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001119 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001120 }
1121 }
1122 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001123 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001124 }
1125}
1126
1127
1128static void
1129vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1130 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1131{
1132 rq->stats.drop_err++;
1133 if (!rcd->fcs)
1134 rq->stats.drop_fcs++;
1135
1136 rq->stats.drop_total++;
1137
1138 /*
1139 * We do not unmap and chain the rx buffer to the skb.
1140 * We basically pretend this buffer is not used and will be recycled
1141 * by vmxnet3_rq_alloc_rx_buf()
1142 */
1143
1144 /*
1145 * ctx->skb may be NULL if this is the first and the only one
1146 * desc for the pkt
1147 */
1148 if (ctx->skb)
1149 dev_kfree_skb_irq(ctx->skb);
1150
1151 ctx->skb = NULL;
1152}
1153
1154
1155static int
1156vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1157 struct vmxnet3_adapter *adapter, int quota)
1158{
Joe Perches215faf92010-12-21 02:16:10 -08001159 static const u32 rxprod_reg[2] = {
1160 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1161 };
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001162 u32 num_rxd = 0;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001163 bool skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001164 struct Vmxnet3_RxCompDesc *rcd;
1165 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001166#ifdef __BIG_ENDIAN_BITFIELD
1167 struct Vmxnet3_RxDesc rxCmdDesc;
1168 struct Vmxnet3_RxCompDesc rxComp;
1169#endif
1170 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1171 &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001172 while (rcd->gen == rq->comp_ring.gen) {
1173 struct vmxnet3_rx_buf_info *rbi;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001174 struct sk_buff *skb, *new_skb = NULL;
1175 struct page *new_page = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001176 int num_to_alloc;
1177 struct Vmxnet3_RxDesc *rxd;
1178 u32 idx, ring_idx;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001179 struct vmxnet3_cmd_ring *ring = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001180 if (num_rxd >= quota) {
1181 /* we may stop even before we see the EOP desc of
1182 * the current pkt
1183 */
1184 break;
1185 }
1186 num_rxd++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001187 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001188 idx = rcd->rxdIdx;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001189 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001190 ring = rq->rx_ring + ring_idx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001191 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1192 &rxCmdDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001193 rbi = rq->buf_info[ring_idx] + idx;
1194
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001195 BUG_ON(rxd->addr != rbi->dma_addr ||
1196 rxd->len != rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001197
1198 if (unlikely(rcd->eop && rcd->err)) {
1199 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1200 goto rcd_done;
1201 }
1202
1203 if (rcd->sop) { /* first buf of the pkt */
1204 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1205 rcd->rqID != rq->qid);
1206
1207 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1208 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1209
1210 if (unlikely(rcd->len == 0)) {
1211 /* Pretend the rx buffer is skipped. */
1212 BUG_ON(!(rcd->sop && rcd->eop));
Randy Dunlapf69655822009-10-16 17:54:34 -07001213 dev_dbg(&adapter->netdev->dev,
1214 "rxRing[%u][%u] 0 length\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001215 ring_idx, idx);
1216 goto rcd_done;
1217 }
1218
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001219 skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001220 ctx->skb = rbi->skb;
Stephen Hemminger0d735f12013-01-15 07:28:26 +00001221 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1222 rbi->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001223 if (new_skb == NULL) {
1224 /* Skb allocation failed, do not handover this
1225 * skb to stack. Reuse it. Drop the existing pkt
1226 */
1227 rq->stats.rx_buf_alloc_failure++;
1228 ctx->skb = NULL;
1229 rq->stats.drop_total++;
1230 skip_page_frags = true;
1231 goto rcd_done;
1232 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001233
1234 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1235 PCI_DMA_FROMDEVICE);
1236
1237 skb_put(ctx->skb, rcd->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001238
1239 /* Immediate refill */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001240 rbi->skb = new_skb;
1241 rbi->dma_addr = pci_map_single(adapter->pdev,
stephen hemminger96800ee2012-11-13 13:53:28 +00001242 rbi->skb->data, rbi->len,
1243 PCI_DMA_FROMDEVICE);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001244 rxd->addr = cpu_to_le64(rbi->dma_addr);
1245 rxd->len = rbi->len;
1246
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001247 } else {
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001248 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1249
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001250 /* non SOP buffer must be type 1 in most cases */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001251 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1252 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001253
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001254 /* If an sop buffer was dropped, skip all
1255 * following non-sop fragments. They will be reused.
1256 */
1257 if (skip_page_frags)
1258 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001259
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001260 new_page = alloc_page(GFP_ATOMIC);
1261 if (unlikely(new_page == NULL)) {
1262 /* Replacement page frag could not be allocated.
1263 * Reuse this page. Drop the pkt and free the
1264 * skb which contained this page as a frag. Skip
1265 * processing all the following non-sop frags.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001266 */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001267 rq->stats.rx_buf_alloc_failure++;
1268 dev_kfree_skb(ctx->skb);
1269 ctx->skb = NULL;
1270 skip_page_frags = true;
1271 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001272 }
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001273
1274 if (rcd->len) {
1275 pci_unmap_page(adapter->pdev,
1276 rbi->dma_addr, rbi->len,
1277 PCI_DMA_FROMDEVICE);
1278
1279 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1280 }
1281
1282 /* Immediate refill */
1283 rbi->page = new_page;
1284 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1285 0, PAGE_SIZE,
1286 PCI_DMA_FROMDEVICE);
1287 rxd->addr = cpu_to_le64(rbi->dma_addr);
1288 rxd->len = rbi->len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001289 }
1290
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001291
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001292 skb = ctx->skb;
1293 if (rcd->eop) {
1294 skb->len += skb->data_len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001295
1296 vmxnet3_rx_csum(adapter, skb,
1297 (union Vmxnet3_GenericDesc *)rcd);
1298 skb->protocol = eth_type_trans(skb, adapter->netdev);
1299
Jesse Gross72e85c42011-06-23 13:04:39 +00001300 if (unlikely(rcd->ts))
1301 __vlan_hwaccel_put_tag(skb, rcd->tci);
1302
Jesse Gross213ade82011-06-24 14:24:35 +00001303 if (adapter->netdev->features & NETIF_F_LRO)
1304 netif_receive_skb(skb);
1305 else
1306 napi_gro_receive(&rq->napi, skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001307
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001308 ctx->skb = NULL;
1309 }
1310
1311rcd_done:
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001312 /* device may have skipped some rx descs */
1313 ring->next2comp = idx;
1314 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1315 ring = rq->rx_ring + ring_idx;
1316 while (num_to_alloc) {
1317 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1318 &rxCmdDesc);
1319 BUG_ON(!rxd->addr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001320
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001321 /* Recv desc is ready to be used by the device */
1322 rxd->gen = ring->gen;
1323 vmxnet3_cmd_ring_adv_next2fill(ring);
1324 num_to_alloc--;
1325 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001326
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001327 /* if needed, update the register */
1328 if (unlikely(rq->shared->updateRxProd)) {
1329 VMXNET3_WRITE_BAR0_REG(adapter,
stephen hemminger96800ee2012-11-13 13:53:28 +00001330 rxprod_reg[ring_idx] + rq->qid * 8,
1331 ring->next2fill);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001332 }
1333
1334 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001335 vmxnet3_getRxComp(rcd,
stephen hemminger96800ee2012-11-13 13:53:28 +00001336 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001337 }
1338
1339 return num_rxd;
1340}
1341
1342
1343static void
1344vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1345 struct vmxnet3_adapter *adapter)
1346{
1347 u32 i, ring_idx;
1348 struct Vmxnet3_RxDesc *rxd;
1349
1350 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1351 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001352#ifdef __BIG_ENDIAN_BITFIELD
1353 struct Vmxnet3_RxDesc rxDesc;
1354#endif
1355 vmxnet3_getRxDesc(rxd,
1356 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001357
1358 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1359 rq->buf_info[ring_idx][i].skb) {
1360 pci_unmap_single(adapter->pdev, rxd->addr,
1361 rxd->len, PCI_DMA_FROMDEVICE);
1362 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1363 rq->buf_info[ring_idx][i].skb = NULL;
1364 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1365 rq->buf_info[ring_idx][i].page) {
1366 pci_unmap_page(adapter->pdev, rxd->addr,
1367 rxd->len, PCI_DMA_FROMDEVICE);
1368 put_page(rq->buf_info[ring_idx][i].page);
1369 rq->buf_info[ring_idx][i].page = NULL;
1370 }
1371 }
1372
1373 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1374 rq->rx_ring[ring_idx].next2fill =
1375 rq->rx_ring[ring_idx].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001376 }
1377
1378 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1379 rq->comp_ring.next2proc = 0;
1380}
1381
1382
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001383static void
1384vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1385{
1386 int i;
1387
1388 for (i = 0; i < adapter->num_rx_queues; i++)
1389 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1390}
1391
1392
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001393void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1394 struct vmxnet3_adapter *adapter)
1395{
1396 int i;
1397 int j;
1398
1399 /* all rx buffers must have already been freed */
1400 for (i = 0; i < 2; i++) {
1401 if (rq->buf_info[i]) {
1402 for (j = 0; j < rq->rx_ring[i].size; j++)
1403 BUG_ON(rq->buf_info[i][j].page != NULL);
1404 }
1405 }
1406
1407
1408 kfree(rq->buf_info[0]);
1409
1410 for (i = 0; i < 2; i++) {
1411 if (rq->rx_ring[i].base) {
1412 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1413 * sizeof(struct Vmxnet3_RxDesc),
1414 rq->rx_ring[i].base,
1415 rq->rx_ring[i].basePA);
1416 rq->rx_ring[i].base = NULL;
1417 }
1418 rq->buf_info[i] = NULL;
1419 }
1420
1421 if (rq->comp_ring.base) {
1422 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1423 sizeof(struct Vmxnet3_RxCompDesc),
1424 rq->comp_ring.base, rq->comp_ring.basePA);
1425 rq->comp_ring.base = NULL;
1426 }
1427}
1428
1429
1430static int
1431vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1432 struct vmxnet3_adapter *adapter)
1433{
1434 int i;
1435
1436 /* initialize buf_info */
1437 for (i = 0; i < rq->rx_ring[0].size; i++) {
1438
1439 /* 1st buf for a pkt is skbuff */
1440 if (i % adapter->rx_buf_per_pkt == 0) {
1441 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1442 rq->buf_info[0][i].len = adapter->skb_buf_size;
1443 } else { /* subsequent bufs for a pkt is frag */
1444 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1445 rq->buf_info[0][i].len = PAGE_SIZE;
1446 }
1447 }
1448 for (i = 0; i < rq->rx_ring[1].size; i++) {
1449 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1450 rq->buf_info[1][i].len = PAGE_SIZE;
1451 }
1452
1453 /* reset internal state and allocate buffers for both rings */
1454 for (i = 0; i < 2; i++) {
1455 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001456
1457 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1458 sizeof(struct Vmxnet3_RxDesc));
1459 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1460 }
1461 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1462 adapter) == 0) {
1463 /* at least has 1 rx buffer for the 1st ring */
1464 return -ENOMEM;
1465 }
1466 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1467
1468 /* reset the comp ring */
1469 rq->comp_ring.next2proc = 0;
1470 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1471 sizeof(struct Vmxnet3_RxCompDesc));
1472 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1473
1474 /* reset rxctx */
1475 rq->rx_ctx.skb = NULL;
1476
1477 /* stats are not reset */
1478 return 0;
1479}
1480
1481
1482static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001483vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1484{
1485 int i, err = 0;
1486
1487 for (i = 0; i < adapter->num_rx_queues; i++) {
1488 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1489 if (unlikely(err)) {
1490 dev_err(&adapter->netdev->dev, "%s: failed to "
1491 "initialize rx queue%i\n",
1492 adapter->netdev->name, i);
1493 break;
1494 }
1495 }
1496 return err;
1497
1498}
1499
1500
1501static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001502vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1503{
1504 int i;
1505 size_t sz;
1506 struct vmxnet3_rx_buf_info *bi;
1507
1508 for (i = 0; i < 2; i++) {
1509
1510 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1511 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1512 &rq->rx_ring[i].basePA);
1513 if (!rq->rx_ring[i].base) {
1514 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1515 adapter->netdev->name, i);
1516 goto err;
1517 }
1518 }
1519
1520 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1521 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1522 &rq->comp_ring.basePA);
1523 if (!rq->comp_ring.base) {
1524 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1525 adapter->netdev->name);
1526 goto err;
1527 }
1528
1529 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1530 rq->rx_ring[1].size);
Julia Lawall476c6092010-05-13 10:05:40 +00001531 bi = kzalloc(sz, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +00001532 if (!bi)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001533 goto err;
Joe Perchese404dec2012-01-29 12:56:23 +00001534
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001535 rq->buf_info[0] = bi;
1536 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1537
1538 return 0;
1539
1540err:
1541 vmxnet3_rq_destroy(rq, adapter);
1542 return -ENOMEM;
1543}
1544
1545
1546static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001547vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1548{
1549 int i, err = 0;
1550
1551 for (i = 0; i < adapter->num_rx_queues; i++) {
1552 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1553 if (unlikely(err)) {
1554 dev_err(&adapter->netdev->dev,
1555 "%s: failed to create rx queue%i\n",
1556 adapter->netdev->name, i);
1557 goto err_out;
1558 }
1559 }
1560 return err;
1561err_out:
1562 vmxnet3_rq_destroy_all(adapter);
1563 return err;
1564
1565}
1566
1567/* Multiple queue aware polling function for tx and rx */
1568
1569static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001570vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1571{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001572 int rcd_done = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001573 if (unlikely(adapter->shared->ecr))
1574 vmxnet3_process_events(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001575 for (i = 0; i < adapter->num_tx_queues; i++)
1576 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001577
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001578 for (i = 0; i < adapter->num_rx_queues; i++)
1579 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1580 adapter, budget);
1581 return rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001582}
1583
1584
1585static int
1586vmxnet3_poll(struct napi_struct *napi, int budget)
1587{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001588 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1589 struct vmxnet3_rx_queue, napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001590 int rxd_done;
1591
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001592 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001593
1594 if (rxd_done < budget) {
1595 napi_complete(napi);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001596 vmxnet3_enable_all_intrs(rx_queue->adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001597 }
1598 return rxd_done;
1599}
1600
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001601/*
1602 * NAPI polling function for MSI-X mode with multiple Rx queues
1603 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1604 */
1605
1606static int
1607vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1608{
1609 struct vmxnet3_rx_queue *rq = container_of(napi,
1610 struct vmxnet3_rx_queue, napi);
1611 struct vmxnet3_adapter *adapter = rq->adapter;
1612 int rxd_done;
1613
1614 /* When sharing interrupt with corresponding tx queue, process
1615 * tx completions in that queue as well
1616 */
1617 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1618 struct vmxnet3_tx_queue *tq =
1619 &adapter->tx_queue[rq - adapter->rx_queue];
1620 vmxnet3_tq_tx_complete(tq, adapter);
1621 }
1622
1623 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1624
1625 if (rxd_done < budget) {
1626 napi_complete(napi);
1627 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1628 }
1629 return rxd_done;
1630}
1631
1632
1633#ifdef CONFIG_PCI_MSI
1634
1635/*
1636 * Handle completion interrupts on tx queues
1637 * Returns whether or not the intr is handled
1638 */
1639
1640static irqreturn_t
1641vmxnet3_msix_tx(int irq, void *data)
1642{
1643 struct vmxnet3_tx_queue *tq = data;
1644 struct vmxnet3_adapter *adapter = tq->adapter;
1645
1646 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1647 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1648
1649 /* Handle the case where only one irq is allocate for all tx queues */
1650 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1651 int i;
1652 for (i = 0; i < adapter->num_tx_queues; i++) {
1653 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1654 vmxnet3_tq_tx_complete(txq, adapter);
1655 }
1656 } else {
1657 vmxnet3_tq_tx_complete(tq, adapter);
1658 }
1659 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1660
1661 return IRQ_HANDLED;
1662}
1663
1664
1665/*
1666 * Handle completion interrupts on rx queues. Returns whether or not the
1667 * intr is handled
1668 */
1669
1670static irqreturn_t
1671vmxnet3_msix_rx(int irq, void *data)
1672{
1673 struct vmxnet3_rx_queue *rq = data;
1674 struct vmxnet3_adapter *adapter = rq->adapter;
1675
1676 /* disable intr if needed */
1677 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1678 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1679 napi_schedule(&rq->napi);
1680
1681 return IRQ_HANDLED;
1682}
1683
1684/*
1685 *----------------------------------------------------------------------------
1686 *
1687 * vmxnet3_msix_event --
1688 *
1689 * vmxnet3 msix event intr handler
1690 *
1691 * Result:
1692 * whether or not the intr is handled
1693 *
1694 *----------------------------------------------------------------------------
1695 */
1696
1697static irqreturn_t
1698vmxnet3_msix_event(int irq, void *data)
1699{
1700 struct net_device *dev = data;
1701 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1702
1703 /* disable intr if needed */
1704 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1705 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1706
1707 if (adapter->shared->ecr)
1708 vmxnet3_process_events(adapter);
1709
1710 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1711
1712 return IRQ_HANDLED;
1713}
1714
1715#endif /* CONFIG_PCI_MSI */
1716
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001717
1718/* Interrupt handler for vmxnet3 */
1719static irqreturn_t
1720vmxnet3_intr(int irq, void *dev_id)
1721{
1722 struct net_device *dev = dev_id;
1723 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1724
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001725 if (adapter->intr.type == VMXNET3_IT_INTX) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001726 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1727 if (unlikely(icr == 0))
1728 /* not ours */
1729 return IRQ_NONE;
1730 }
1731
1732
1733 /* disable intr if needed */
1734 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001735 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001736
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001737 napi_schedule(&adapter->rx_queue[0].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001738
1739 return IRQ_HANDLED;
1740}
1741
1742#ifdef CONFIG_NET_POLL_CONTROLLER
1743
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001744/* netpoll callback. */
1745static void
1746vmxnet3_netpoll(struct net_device *netdev)
1747{
1748 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001749
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001750 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1751 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001752
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001753 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1754 vmxnet3_enable_all_intrs(adapter);
1755
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001756}
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001757#endif /* CONFIG_NET_POLL_CONTROLLER */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001758
1759static int
1760vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1761{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001762 struct vmxnet3_intr *intr = &adapter->intr;
1763 int err = 0, i;
1764 int vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001765
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001766#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001767 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001768 for (i = 0; i < adapter->num_tx_queues; i++) {
1769 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1770 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1771 adapter->netdev->name, vector);
1772 err = request_irq(
1773 intr->msix_entries[vector].vector,
1774 vmxnet3_msix_tx, 0,
1775 adapter->tx_queue[i].name,
1776 &adapter->tx_queue[i]);
1777 } else {
1778 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1779 adapter->netdev->name, vector);
1780 }
1781 if (err) {
1782 dev_err(&adapter->netdev->dev,
1783 "Failed to request irq for MSIX, %s, "
1784 "error %d\n",
1785 adapter->tx_queue[i].name, err);
1786 return err;
1787 }
1788
1789 /* Handle the case where only 1 MSIx was allocated for
1790 * all tx queues */
1791 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1792 for (; i < adapter->num_tx_queues; i++)
1793 adapter->tx_queue[i].comp_ring.intr_idx
1794 = vector;
1795 vector++;
1796 break;
1797 } else {
1798 adapter->tx_queue[i].comp_ring.intr_idx
1799 = vector++;
1800 }
1801 }
1802 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1803 vector = 0;
1804
1805 for (i = 0; i < adapter->num_rx_queues; i++) {
1806 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1807 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1808 adapter->netdev->name, vector);
1809 else
1810 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1811 adapter->netdev->name, vector);
1812 err = request_irq(intr->msix_entries[vector].vector,
1813 vmxnet3_msix_rx, 0,
1814 adapter->rx_queue[i].name,
1815 &(adapter->rx_queue[i]));
1816 if (err) {
1817 printk(KERN_ERR "Failed to request irq for MSIX"
1818 ", %s, error %d\n",
1819 adapter->rx_queue[i].name, err);
1820 return err;
1821 }
1822
1823 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1824 }
1825
1826 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1827 adapter->netdev->name, vector);
1828 err = request_irq(intr->msix_entries[vector].vector,
1829 vmxnet3_msix_event, 0,
1830 intr->event_msi_vector_name, adapter->netdev);
1831 intr->event_intr_idx = vector;
1832
1833 } else if (intr->type == VMXNET3_IT_MSI) {
1834 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001835 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1836 adapter->netdev->name, adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001837 } else {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001838#endif
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001839 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001840 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1841 IRQF_SHARED, adapter->netdev->name,
1842 adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001843#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001844 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001845#endif
1846 intr->num_intrs = vector + 1;
1847 if (err) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001848 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001849 ":%d\n", adapter->netdev->name, intr->type, err);
1850 } else {
1851 /* Number of rx queues will not change after this */
1852 for (i = 0; i < adapter->num_rx_queues; i++) {
1853 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1854 rq->qid = i;
1855 rq->qid2 = i + adapter->num_rx_queues;
1856 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001857
1858
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001859
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001860 /* init our intr settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001861 for (i = 0; i < intr->num_intrs; i++)
1862 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1863 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1864 adapter->intr.event_intr_idx = 0;
1865 for (i = 0; i < adapter->num_tx_queues; i++)
1866 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1867 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1868 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001869
1870 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001871 "allocated\n", adapter->netdev->name, intr->type,
1872 intr->mask_mode, intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001873 }
1874
1875 return err;
1876}
1877
1878
1879static void
1880vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1881{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001882 struct vmxnet3_intr *intr = &adapter->intr;
1883 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001884
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001885 switch (intr->type) {
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001886#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001887 case VMXNET3_IT_MSIX:
1888 {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001889 int i, vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001890
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001891 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1892 for (i = 0; i < adapter->num_tx_queues; i++) {
1893 free_irq(intr->msix_entries[vector++].vector,
1894 &(adapter->tx_queue[i]));
1895 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1896 break;
1897 }
1898 }
1899
1900 for (i = 0; i < adapter->num_rx_queues; i++) {
1901 free_irq(intr->msix_entries[vector++].vector,
1902 &(adapter->rx_queue[i]));
1903 }
1904
1905 free_irq(intr->msix_entries[vector].vector,
1906 adapter->netdev);
1907 BUG_ON(vector >= intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001908 break;
1909 }
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001910#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001911 case VMXNET3_IT_MSI:
1912 free_irq(adapter->pdev->irq, adapter->netdev);
1913 break;
1914 case VMXNET3_IT_INTX:
1915 free_irq(adapter->pdev->irq, adapter->netdev);
1916 break;
1917 default:
Sasha Levinc068e772012-11-08 10:23:03 +00001918 BUG();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001919 }
1920}
1921
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001922
1923static void
1924vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1925{
Jesse Gross72e85c42011-06-23 13:04:39 +00001926 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1927 u16 vid;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001928
Jesse Gross72e85c42011-06-23 13:04:39 +00001929 /* allow untagged pkts */
1930 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1931
1932 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1933 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001934}
1935
1936
Jiri Pirko8e586132011-12-08 19:52:37 -05001937static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001938vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001941
Jesse Grossf6957f82011-08-07 23:15:47 +00001942 if (!(netdev->flags & IFF_PROMISC)) {
1943 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1944 unsigned long flags;
1945
1946 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1947 spin_lock_irqsave(&adapter->cmd_lock, flags);
1948 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1949 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1950 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1951 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001952
1953 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001954
1955 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001956}
1957
1958
Jiri Pirko8e586132011-12-08 19:52:37 -05001959static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001960vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1961{
1962 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001963
Jesse Grossf6957f82011-08-07 23:15:47 +00001964 if (!(netdev->flags & IFF_PROMISC)) {
1965 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1966 unsigned long flags;
1967
1968 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1969 spin_lock_irqsave(&adapter->cmd_lock, flags);
1970 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1971 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1972 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1973 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001974
1975 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001976
1977 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001978}
1979
1980
1981static u8 *
1982vmxnet3_copy_mc(struct net_device *netdev)
1983{
1984 u8 *buf = NULL;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001985 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001986
1987 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1988 if (sz <= 0xffff) {
1989 /* We may be called with BH disabled */
1990 buf = kmalloc(sz, GFP_ATOMIC);
1991 if (buf) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00001992 struct netdev_hw_addr *ha;
Jiri Pirko567ec872010-02-23 23:17:07 +00001993 int i = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001994
Jiri Pirko22bedad32010-04-01 21:22:57 +00001995 netdev_for_each_mc_addr(ha, netdev)
1996 memcpy(buf + i++ * ETH_ALEN, ha->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001997 ETH_ALEN);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001998 }
1999 }
2000 return buf;
2001}
2002
2003
2004static void
2005vmxnet3_set_mc(struct net_device *netdev)
2006{
2007 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002008 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002009 struct Vmxnet3_RxFilterConf *rxConf =
2010 &adapter->shared->devRead.rxFilterConf;
2011 u8 *new_table = NULL;
2012 u32 new_mode = VMXNET3_RXM_UCAST;
2013
Jesse Gross72e85c42011-06-23 13:04:39 +00002014 if (netdev->flags & IFF_PROMISC) {
2015 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2016 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2017
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002018 new_mode |= VMXNET3_RXM_PROMISC;
Jesse Gross72e85c42011-06-23 13:04:39 +00002019 } else {
2020 vmxnet3_restore_vlan(adapter);
2021 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002022
2023 if (netdev->flags & IFF_BROADCAST)
2024 new_mode |= VMXNET3_RXM_BCAST;
2025
2026 if (netdev->flags & IFF_ALLMULTI)
2027 new_mode |= VMXNET3_RXM_ALL_MULTI;
2028 else
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002029 if (!netdev_mc_empty(netdev)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002030 new_table = vmxnet3_copy_mc(netdev);
2031 if (new_table) {
2032 new_mode |= VMXNET3_RXM_MCAST;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002033 rxConf->mfTableLen = cpu_to_le16(
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002034 netdev_mc_count(netdev) * ETH_ALEN);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002035 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2036 new_table));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002037 } else {
2038 printk(KERN_INFO "%s: failed to copy mcast list"
2039 ", setting ALL_MULTI\n", netdev->name);
2040 new_mode |= VMXNET3_RXM_ALL_MULTI;
2041 }
2042 }
2043
2044
2045 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2046 rxConf->mfTableLen = 0;
2047 rxConf->mfTablePA = 0;
2048 }
2049
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002050 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002051 if (new_mode != rxConf->rxMode) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002052 rxConf->rxMode = cpu_to_le32(new_mode);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002053 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2054 VMXNET3_CMD_UPDATE_RX_MODE);
Jesse Gross72e85c42011-06-23 13:04:39 +00002055 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2056 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002057 }
2058
2059 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2060 VMXNET3_CMD_UPDATE_MAC_FILTERS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002061 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002062
2063 kfree(new_table);
2064}
2065
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002066void
2067vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2068{
2069 int i;
2070
2071 for (i = 0; i < adapter->num_rx_queues; i++)
2072 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2073}
2074
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002075
2076/*
2077 * Set up driver_shared based on settings in adapter.
2078 */
2079
2080static void
2081vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2082{
2083 struct Vmxnet3_DriverShared *shared = adapter->shared;
2084 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2085 struct Vmxnet3_TxQueueConf *tqc;
2086 struct Vmxnet3_RxQueueConf *rqc;
2087 int i;
2088
2089 memset(shared, 0, sizeof(*shared));
2090
2091 /* driver settings */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002092 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2093 devRead->misc.driverInfo.version = cpu_to_le32(
2094 VMXNET3_DRIVER_VERSION_NUM);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002095 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2096 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2097 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002098 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2099 *((u32 *)&devRead->misc.driverInfo.gos));
2100 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2101 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002102
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002103 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2104 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002105
2106 /* set up feature flags */
Michał Mirosława0d27302011-04-18 13:31:21 +00002107 if (adapter->netdev->features & NETIF_F_RXCSUM)
Harvey Harrison3843e512010-10-21 18:05:32 +00002108 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002109
Michał Mirosława0d27302011-04-18 13:31:21 +00002110 if (adapter->netdev->features & NETIF_F_LRO) {
Harvey Harrison3843e512010-10-21 18:05:32 +00002111 devRead->misc.uptFeatures |= UPT1_F_LRO;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002112 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002113 }
Shreyas Bhatewara54da3d02011-01-14 14:59:36 +00002114 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
Harvey Harrison3843e512010-10-21 18:05:32 +00002115 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002116
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002117 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2118 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2119 devRead->misc.queueDescLen = cpu_to_le32(
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002120 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2121 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002122
2123 /* tx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002124 devRead->misc.numTxQueues = adapter->num_tx_queues;
2125 for (i = 0; i < adapter->num_tx_queues; i++) {
2126 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2127 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2128 tqc = &adapter->tqd_start[i].conf;
2129 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2130 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2131 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2132 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2133 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2134 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2135 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2136 tqc->ddLen = cpu_to_le32(
2137 sizeof(struct vmxnet3_tx_buf_info) *
2138 tqc->txRingSize);
2139 tqc->intrIdx = tq->comp_ring.intr_idx;
2140 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002141
2142 /* rx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002143 devRead->misc.numRxQueues = adapter->num_rx_queues;
2144 for (i = 0; i < adapter->num_rx_queues; i++) {
2145 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2146 rqc = &adapter->rqd_start[i].conf;
2147 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2148 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2149 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2150 rqc->ddPA = cpu_to_le64(virt_to_phys(
2151 rq->buf_info));
2152 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2153 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2154 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2155 rqc->ddLen = cpu_to_le32(
2156 sizeof(struct vmxnet3_rx_buf_info) *
2157 (rqc->rxRingSize[0] +
2158 rqc->rxRingSize[1]));
2159 rqc->intrIdx = rq->comp_ring.intr_idx;
2160 }
2161
2162#ifdef VMXNET3_RSS
2163 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2164
2165 if (adapter->rss) {
2166 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2167 devRead->misc.uptFeatures |= UPT1_F_RSS;
2168 devRead->misc.numRxQueues = adapter->num_rx_queues;
2169 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2170 UPT1_RSS_HASH_TYPE_IPV4 |
2171 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2172 UPT1_RSS_HASH_TYPE_IPV6;
2173 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2174 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2175 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2176 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2177 for (i = 0; i < rssConf->indTableSize; i++)
Ben Hutchings278bc422011-12-15 13:56:49 +00002178 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2179 i, adapter->num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002180
2181 devRead->rssConfDesc.confVer = 1;
2182 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2183 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2184 }
2185
2186#endif /* VMXNET3_RSS */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002187
2188 /* intr settings */
2189 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2190 VMXNET3_IMM_AUTO;
2191 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2192 for (i = 0; i < adapter->intr.num_intrs; i++)
2193 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2194
2195 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
Ronghua Zang6929fe82010-07-15 22:18:47 -07002196 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002197
2198 /* rx filter settings */
2199 devRead->rxFilterConf.rxMode = 0;
2200 vmxnet3_restore_vlan(adapter);
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +00002201 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2202
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002203 /* the rest are already zeroed */
2204}
2205
2206
2207int
2208vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2209{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002210 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002211 u32 ret;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002212 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002213
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002214 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2215 " ring sizes %u %u %u\n", adapter->netdev->name,
2216 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2217 adapter->tx_queue[0].tx_ring.size,
2218 adapter->rx_queue[0].rx_ring[0].size,
2219 adapter->rx_queue[0].rx_ring[1].size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002220
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002221 vmxnet3_tq_init_all(adapter);
2222 err = vmxnet3_rq_init_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002223 if (err) {
2224 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
2225 adapter->netdev->name, err);
2226 goto rq_err;
2227 }
2228
2229 err = vmxnet3_request_irqs(adapter);
2230 if (err) {
2231 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
2232 adapter->netdev->name, err);
2233 goto irq_err;
2234 }
2235
2236 vmxnet3_setup_driver_shared(adapter);
2237
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002238 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2239 adapter->shared_pa));
2240 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2241 adapter->shared_pa));
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002242 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002243 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2244 VMXNET3_CMD_ACTIVATE_DEV);
2245 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002246 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002247
2248 if (ret != 0) {
2249 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
2250 adapter->netdev->name, ret);
2251 err = -EINVAL;
2252 goto activate_err;
2253 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002254
2255 for (i = 0; i < adapter->num_rx_queues; i++) {
2256 VMXNET3_WRITE_BAR0_REG(adapter,
2257 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2258 adapter->rx_queue[i].rx_ring[0].next2fill);
2259 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2260 (i * VMXNET3_REG_ALIGN)),
2261 adapter->rx_queue[i].rx_ring[1].next2fill);
2262 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002263
2264 /* Apply the rx filter settins last. */
2265 vmxnet3_set_mc(adapter->netdev);
2266
2267 /*
2268 * Check link state when first activating device. It will start the
2269 * tx queue if the link is up.
2270 */
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00002271 vmxnet3_check_link(adapter, true);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002272 for (i = 0; i < adapter->num_rx_queues; i++)
2273 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002274 vmxnet3_enable_all_intrs(adapter);
2275 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2276 return 0;
2277
2278activate_err:
2279 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2280 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2281 vmxnet3_free_irqs(adapter);
2282irq_err:
2283rq_err:
2284 /* free up buffers we allocated */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002285 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002286 return err;
2287}
2288
2289
2290void
2291vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2292{
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002293 unsigned long flags;
2294 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002296 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002297}
2298
2299
2300int
2301vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2302{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002303 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002304 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002305 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2306 return 0;
2307
2308
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002309 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2311 VMXNET3_CMD_QUIESCE_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002312 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002313 vmxnet3_disable_all_intrs(adapter);
2314
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002315 for (i = 0; i < adapter->num_rx_queues; i++)
2316 napi_disable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002317 netif_tx_disable(adapter->netdev);
2318 adapter->link_speed = 0;
2319 netif_carrier_off(adapter->netdev);
2320
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002321 vmxnet3_tq_cleanup_all(adapter);
2322 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002323 vmxnet3_free_irqs(adapter);
2324 return 0;
2325}
2326
2327
2328static void
2329vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2330{
2331 u32 tmp;
2332
2333 tmp = *(u32 *)mac;
2334 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2335
2336 tmp = (mac[5] << 8) | mac[4];
2337 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2338}
2339
2340
2341static int
2342vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2343{
2344 struct sockaddr *addr = p;
2345 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2346
2347 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2348 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2349
2350 return 0;
2351}
2352
2353
2354/* ==================== initialization and cleanup routines ============ */
2355
2356static int
2357vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2358{
2359 int err;
2360 unsigned long mmio_start, mmio_len;
2361 struct pci_dev *pdev = adapter->pdev;
2362
2363 err = pci_enable_device(pdev);
2364 if (err) {
2365 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
2366 pci_name(pdev), err);
2367 return err;
2368 }
2369
2370 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2371 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2372 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
2373 "for adapter %s\n", pci_name(pdev));
2374 err = -EIO;
2375 goto err_set_mask;
2376 }
2377 *dma64 = true;
2378 } else {
2379 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2380 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2381 "%s\n", pci_name(pdev));
2382 err = -EIO;
2383 goto err_set_mask;
2384 }
2385 *dma64 = false;
2386 }
2387
2388 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2389 vmxnet3_driver_name);
2390 if (err) {
2391 printk(KERN_ERR "Failed to request region for adapter %s: "
2392 "error %d\n", pci_name(pdev), err);
2393 goto err_set_mask;
2394 }
2395
2396 pci_set_master(pdev);
2397
2398 mmio_start = pci_resource_start(pdev, 0);
2399 mmio_len = pci_resource_len(pdev, 0);
2400 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2401 if (!adapter->hw_addr0) {
2402 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2403 pci_name(pdev));
2404 err = -EIO;
2405 goto err_ioremap;
2406 }
2407
2408 mmio_start = pci_resource_start(pdev, 1);
2409 mmio_len = pci_resource_len(pdev, 1);
2410 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2411 if (!adapter->hw_addr1) {
2412 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2413 pci_name(pdev));
2414 err = -EIO;
2415 goto err_bar1;
2416 }
2417 return 0;
2418
2419err_bar1:
2420 iounmap(adapter->hw_addr0);
2421err_ioremap:
2422 pci_release_selected_regions(pdev, (1 << 2) - 1);
2423err_set_mask:
2424 pci_disable_device(pdev);
2425 return err;
2426}
2427
2428
2429static void
2430vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2431{
2432 BUG_ON(!adapter->pdev);
2433
2434 iounmap(adapter->hw_addr0);
2435 iounmap(adapter->hw_addr1);
2436 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2437 pci_disable_device(adapter->pdev);
2438}
2439
2440
2441static void
2442vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2443{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002444 size_t sz, i, ring0_size, ring1_size, comp_size;
2445 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2446
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002447
2448 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2449 VMXNET3_MAX_ETH_HDR_SIZE) {
2450 adapter->skb_buf_size = adapter->netdev->mtu +
2451 VMXNET3_MAX_ETH_HDR_SIZE;
2452 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2453 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2454
2455 adapter->rx_buf_per_pkt = 1;
2456 } else {
2457 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2458 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2459 VMXNET3_MAX_ETH_HDR_SIZE;
2460 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2461 }
2462
2463 /*
2464 * for simplicity, force the ring0 size to be a multiple of
2465 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2466 */
2467 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002468 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2469 ring0_size = (ring0_size + sz - 1) / sz * sz;
Shreyas Bhatewaraa53255d2011-01-14 14:59:25 +00002470 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002471 sz * sz);
2472 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2473 comp_size = ring0_size + ring1_size;
2474
2475 for (i = 0; i < adapter->num_rx_queues; i++) {
2476 rq = &adapter->rx_queue[i];
2477 rq->rx_ring[0].size = ring0_size;
2478 rq->rx_ring[1].size = ring1_size;
2479 rq->comp_ring.size = comp_size;
2480 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002481}
2482
2483
2484int
2485vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2486 u32 rx_ring_size, u32 rx_ring2_size)
2487{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002488 int err = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002489
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002490 for (i = 0; i < adapter->num_tx_queues; i++) {
2491 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2492 tq->tx_ring.size = tx_ring_size;
2493 tq->data_ring.size = tx_ring_size;
2494 tq->comp_ring.size = tx_ring_size;
2495 tq->shared = &adapter->tqd_start[i].ctrl;
2496 tq->stopped = true;
2497 tq->adapter = adapter;
2498 tq->qid = i;
2499 err = vmxnet3_tq_create(tq, adapter);
2500 /*
2501 * Too late to change num_tx_queues. We cannot do away with
2502 * lesser number of queues than what we asked for
2503 */
2504 if (err)
2505 goto queue_err;
2506 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002507
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002508 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2509 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002510 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002511 for (i = 0; i < adapter->num_rx_queues; i++) {
2512 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2513 /* qid and qid2 for rx queues will be assigned later when num
2514 * of rx queues is finalized after allocating intrs */
2515 rq->shared = &adapter->rqd_start[i].ctrl;
2516 rq->adapter = adapter;
2517 err = vmxnet3_rq_create(rq, adapter);
2518 if (err) {
2519 if (i == 0) {
2520 printk(KERN_ERR "Could not allocate any rx"
2521 "queues. Aborting.\n");
2522 goto queue_err;
2523 } else {
2524 printk(KERN_INFO "Number of rx queues changed "
2525 "to : %d.\n", i);
2526 adapter->num_rx_queues = i;
2527 err = 0;
2528 break;
2529 }
2530 }
2531 }
2532 return err;
2533queue_err:
2534 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002535 return err;
2536}
2537
2538static int
2539vmxnet3_open(struct net_device *netdev)
2540{
2541 struct vmxnet3_adapter *adapter;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002542 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002543
2544 adapter = netdev_priv(netdev);
2545
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002546 for (i = 0; i < adapter->num_tx_queues; i++)
2547 spin_lock_init(&adapter->tx_queue[i].tx_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002548
2549 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2550 VMXNET3_DEF_RX_RING_SIZE,
2551 VMXNET3_DEF_RX_RING_SIZE);
2552 if (err)
2553 goto queue_err;
2554
2555 err = vmxnet3_activate_dev(adapter);
2556 if (err)
2557 goto activate_err;
2558
2559 return 0;
2560
2561activate_err:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002562 vmxnet3_rq_destroy_all(adapter);
2563 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002564queue_err:
2565 return err;
2566}
2567
2568
2569static int
2570vmxnet3_close(struct net_device *netdev)
2571{
2572 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2573
2574 /*
2575 * Reset_work may be in the middle of resetting the device, wait for its
2576 * completion.
2577 */
2578 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2579 msleep(1);
2580
2581 vmxnet3_quiesce_dev(adapter);
2582
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002583 vmxnet3_rq_destroy_all(adapter);
2584 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002585
2586 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2587
2588
2589 return 0;
2590}
2591
2592
2593void
2594vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2595{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002596 int i;
2597
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002598 /*
2599 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2600 * vmxnet3_close() will deadlock.
2601 */
2602 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2603
2604 /* we need to enable NAPI, otherwise dev_close will deadlock */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002605 for (i = 0; i < adapter->num_rx_queues; i++)
2606 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002607 dev_close(adapter->netdev);
2608}
2609
2610
2611static int
2612vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2613{
2614 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2615 int err = 0;
2616
2617 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2618 return -EINVAL;
2619
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002620 netdev->mtu = new_mtu;
2621
2622 /*
2623 * Reset_work may be in the middle of resetting the device, wait for its
2624 * completion.
2625 */
2626 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2627 msleep(1);
2628
2629 if (netif_running(netdev)) {
2630 vmxnet3_quiesce_dev(adapter);
2631 vmxnet3_reset_dev(adapter);
2632
2633 /* we need to re-create the rx queue based on the new mtu */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002634 vmxnet3_rq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002635 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002636 err = vmxnet3_rq_create_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002637 if (err) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002638 printk(KERN_ERR "%s: failed to re-create rx queues,"
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002639 " error %d. Closing it.\n", netdev->name, err);
2640 goto out;
2641 }
2642
2643 err = vmxnet3_activate_dev(adapter);
2644 if (err) {
2645 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2646 "Closing it\n", netdev->name, err);
2647 goto out;
2648 }
2649 }
2650
2651out:
2652 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2653 if (err)
2654 vmxnet3_force_close(adapter);
2655
2656 return err;
2657}
2658
2659
2660static void
2661vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2662{
2663 struct net_device *netdev = adapter->netdev;
2664
Michał Mirosława0d27302011-04-18 13:31:21 +00002665 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2666 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
Jesse Gross72e85c42011-06-23 13:04:39 +00002667 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2668 NETIF_F_LRO;
Michał Mirosława0d27302011-04-18 13:31:21 +00002669 if (dma64)
Shreyas Bhatewaraebbf9292011-07-20 17:21:51 +00002670 netdev->hw_features |= NETIF_F_HIGHDMA;
Jesse Gross72e85c42011-06-23 13:04:39 +00002671 netdev->vlan_features = netdev->hw_features &
2672 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2673 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002674}
2675
2676
2677static void
2678vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2679{
2680 u32 tmp;
2681
2682 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2683 *(u32 *)mac = tmp;
2684
2685 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2686 mac[4] = tmp & 0xff;
2687 mac[5] = (tmp >> 8) & 0xff;
2688}
2689
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002690#ifdef CONFIG_PCI_MSI
2691
2692/*
2693 * Enable MSIx vectors.
2694 * Returns :
2695 * 0 on successful enabling of required vectors,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002696 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002697 * could be enabled.
2698 * number of vectors which can be enabled otherwise (this number is smaller
2699 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2700 */
2701
2702static int
2703vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2704 int vectors)
2705{
2706 int err = 0, vector_threshold;
2707 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2708
2709 while (vectors >= vector_threshold) {
2710 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2711 vectors);
2712 if (!err) {
2713 adapter->intr.num_intrs = vectors;
2714 return 0;
2715 } else if (err < 0) {
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002716 dev_err(&adapter->netdev->dev,
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002717 "Failed to enable MSI-X, error: %d\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002718 vectors = 0;
2719 } else if (err < vector_threshold) {
2720 break;
2721 } else {
2722 /* If fails to enable required number of MSI-x vectors
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002723 * try enabling minimum number of vectors required.
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002724 */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002725 dev_err(&adapter->netdev->dev,
2726 "Failed to enable %d MSI-X, trying %d instead\n",
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002727 vectors, vector_threshold);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002728 vectors = vector_threshold;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002729 }
2730 }
2731
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002732 dev_info(&adapter->pdev->dev,
2733 "Number of MSI-X interrupts which can be allocated "
2734 "is lower than min threshold required.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002735 return err;
2736}
2737
2738
2739#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002740
2741static void
2742vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2743{
2744 u32 cfg;
Roland Dreiere328d412011-05-06 08:32:53 +00002745 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002746
2747 /* intr settings */
Roland Dreiere328d412011-05-06 08:32:53 +00002748 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002749 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2750 VMXNET3_CMD_GET_CONF_INTR);
2751 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Roland Dreiere328d412011-05-06 08:32:53 +00002752 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002753 adapter->intr.type = cfg & 0x3;
2754 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2755
2756 if (adapter->intr.type == VMXNET3_IT_AUTO) {
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002757 adapter->intr.type = VMXNET3_IT_MSIX;
2758 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002759
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002760#ifdef CONFIG_PCI_MSI
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002761 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002762 int vector, err = 0;
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002763
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002764 adapter->intr.num_intrs = (adapter->share_intr ==
2765 VMXNET3_INTR_TXSHARE) ? 1 :
2766 adapter->num_tx_queues;
2767 adapter->intr.num_intrs += (adapter->share_intr ==
2768 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2769 adapter->num_rx_queues;
2770 adapter->intr.num_intrs += 1; /* for link event */
2771
2772 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2773 VMXNET3_LINUX_MIN_MSIX_VECT
2774 ? adapter->intr.num_intrs :
2775 VMXNET3_LINUX_MIN_MSIX_VECT);
2776
2777 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2778 adapter->intr.msix_entries[vector].entry = vector;
2779
2780 err = vmxnet3_acquire_msix_vectors(adapter,
2781 adapter->intr.num_intrs);
2782 /* If we cannot allocate one MSIx vector per queue
2783 * then limit the number of rx queues to 1
2784 */
2785 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2786 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002787 || adapter->num_rx_queues != 1) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002788 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2789 printk(KERN_ERR "Number of rx queues : 1\n");
2790 adapter->num_rx_queues = 1;
2791 adapter->intr.num_intrs =
2792 VMXNET3_LINUX_MIN_MSIX_VECT;
2793 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002794 return;
2795 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002796 if (!err)
2797 return;
2798
2799 /* If we cannot allocate MSIx vectors use only one rx queue */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002800 dev_info(&adapter->pdev->dev,
2801 "Failed to enable MSI-X, error %d. "
2802 "Limiting #rx queues to 1, try MSI.\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002803
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002804 adapter->intr.type = VMXNET3_IT_MSI;
2805 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002806
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002807 if (adapter->intr.type == VMXNET3_IT_MSI) {
2808 int err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002809 err = pci_enable_msi(adapter->pdev);
2810 if (!err) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002811 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002812 adapter->intr.num_intrs = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002813 return;
2814 }
2815 }
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002816#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002817
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002818 adapter->num_rx_queues = 1;
2819 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002820 adapter->intr.type = VMXNET3_IT_INTX;
2821
2822 /* INT-X related setting */
2823 adapter->intr.num_intrs = 1;
2824}
2825
2826
2827static void
2828vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2829{
2830 if (adapter->intr.type == VMXNET3_IT_MSIX)
2831 pci_disable_msix(adapter->pdev);
2832 else if (adapter->intr.type == VMXNET3_IT_MSI)
2833 pci_disable_msi(adapter->pdev);
2834 else
2835 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2836}
2837
2838
2839static void
2840vmxnet3_tx_timeout(struct net_device *netdev)
2841{
2842 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2843 adapter->tx_timeout_count++;
2844
2845 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2846 schedule_work(&adapter->work);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002847 netif_wake_queue(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002848}
2849
2850
2851static void
2852vmxnet3_reset_work(struct work_struct *data)
2853{
2854 struct vmxnet3_adapter *adapter;
2855
2856 adapter = container_of(data, struct vmxnet3_adapter, work);
2857
2858 /* if another thread is resetting the device, no need to proceed */
2859 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2860 return;
2861
2862 /* if the device is closed, we must leave it alone */
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002863 rtnl_lock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002864 if (netif_running(adapter->netdev)) {
2865 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2866 vmxnet3_quiesce_dev(adapter);
2867 vmxnet3_reset_dev(adapter);
2868 vmxnet3_activate_dev(adapter);
2869 } else {
2870 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2871 }
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002872 rtnl_unlock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002873
2874 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2875}
2876
2877
Bill Pemberton3a4751a2012-12-03 09:24:16 -05002878static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002879vmxnet3_probe_device(struct pci_dev *pdev,
2880 const struct pci_device_id *id)
2881{
2882 static const struct net_device_ops vmxnet3_netdev_ops = {
2883 .ndo_open = vmxnet3_open,
2884 .ndo_stop = vmxnet3_close,
2885 .ndo_start_xmit = vmxnet3_xmit_frame,
2886 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2887 .ndo_change_mtu = vmxnet3_change_mtu,
Michał Mirosława0d27302011-04-18 13:31:21 +00002888 .ndo_set_features = vmxnet3_set_features,
stephen hemminger95305f62011-06-08 14:53:57 +00002889 .ndo_get_stats64 = vmxnet3_get_stats64,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002890 .ndo_tx_timeout = vmxnet3_tx_timeout,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00002891 .ndo_set_rx_mode = vmxnet3_set_mc,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002892 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2893 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2894#ifdef CONFIG_NET_POLL_CONTROLLER
2895 .ndo_poll_controller = vmxnet3_netpoll,
2896#endif
2897 };
2898 int err;
2899 bool dma64 = false; /* stupid gcc */
2900 u32 ver;
2901 struct net_device *netdev;
2902 struct vmxnet3_adapter *adapter;
2903 u8 mac[ETH_ALEN];
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002904 int size;
2905 int num_tx_queues;
2906 int num_rx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002907
Shreyas Bhatewarae154b632011-05-10 06:13:56 +00002908 if (!pci_msi_enabled())
2909 enable_mq = 0;
2910
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002911#ifdef VMXNET3_RSS
2912 if (enable_mq)
2913 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2914 (int)num_online_cpus());
2915 else
2916#endif
2917 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002918 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002919
2920 if (enable_mq)
2921 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2922 (int)num_online_cpus());
2923 else
2924 num_tx_queues = 1;
2925
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002926 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002927 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2928 max(num_tx_queues, num_rx_queues));
2929 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2930 num_tx_queues, num_rx_queues);
2931
Joe Perches41de8d42012-01-29 13:47:52 +00002932 if (!netdev)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002933 return -ENOMEM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002934
2935 pci_set_drvdata(pdev, netdev);
2936 adapter = netdev_priv(netdev);
2937 adapter->netdev = netdev;
2938 adapter->pdev = pdev;
2939
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002940 spin_lock_init(&adapter->cmd_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002941 adapter->shared = pci_alloc_consistent(adapter->pdev,
stephen hemminger96800ee2012-11-13 13:53:28 +00002942 sizeof(struct Vmxnet3_DriverShared),
2943 &adapter->shared_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002944 if (!adapter->shared) {
2945 printk(KERN_ERR "Failed to allocate memory for %s\n",
stephen hemminger96800ee2012-11-13 13:53:28 +00002946 pci_name(pdev));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002947 err = -ENOMEM;
2948 goto err_alloc_shared;
2949 }
2950
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002951 adapter->num_rx_queues = num_rx_queues;
2952 adapter->num_tx_queues = num_tx_queues;
2953
2954 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2955 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2956 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
stephen hemminger96800ee2012-11-13 13:53:28 +00002957 &adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002958
2959 if (!adapter->tqd_start) {
2960 printk(KERN_ERR "Failed to allocate memory for %s\n",
stephen hemminger96800ee2012-11-13 13:53:28 +00002961 pci_name(pdev));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002962 err = -ENOMEM;
2963 goto err_alloc_queue_desc;
2964 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002965 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
stephen hemminger96800ee2012-11-13 13:53:28 +00002966 adapter->num_tx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002967
2968 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2969 if (adapter->pm_conf == NULL) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002970 err = -ENOMEM;
2971 goto err_alloc_pm;
2972 }
2973
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002974#ifdef VMXNET3_RSS
2975
2976 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2977 if (adapter->rss_conf == NULL) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002978 err = -ENOMEM;
2979 goto err_alloc_rss;
2980 }
2981#endif /* VMXNET3_RSS */
2982
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002983 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2984 if (err < 0)
2985 goto err_alloc_pci;
2986
2987 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2988 if (ver & 1) {
2989 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2990 } else {
2991 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2992 " %s\n", ver, pci_name(pdev));
2993 err = -EBUSY;
2994 goto err_ver;
2995 }
2996
2997 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2998 if (ver & 1) {
2999 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3000 } else {
3001 printk(KERN_ERR "Incompatible upt version (0x%x) for "
3002 "adapter %s\n", ver, pci_name(pdev));
3003 err = -EBUSY;
3004 goto err_ver;
3005 }
3006
Shreyas Bhatewarae101e7d2011-07-20 16:01:11 +00003007 SET_NETDEV_DEV(netdev, &pdev->dev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003008 vmxnet3_declare_features(adapter, dma64);
3009
3010 adapter->dev_number = atomic_read(&devices_found);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003011
stephen hemminger96800ee2012-11-13 13:53:28 +00003012 adapter->share_intr = irq_share_mode;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003013 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3014 adapter->num_tx_queues != adapter->num_rx_queues)
3015 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3016
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003017 vmxnet3_alloc_intr_resources(adapter);
3018
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003019#ifdef VMXNET3_RSS
3020 if (adapter->num_rx_queues > 1 &&
3021 adapter->intr.type == VMXNET3_IT_MSIX) {
3022 adapter->rss = true;
3023 printk(KERN_INFO "RSS is enabled.\n");
3024 } else {
3025 adapter->rss = false;
3026 }
3027#endif
3028
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003029 vmxnet3_read_mac_addr(adapter, mac);
3030 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3031
3032 netdev->netdev_ops = &vmxnet3_netdev_ops;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003033 vmxnet3_set_ethtool_ops(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003034 netdev->watchdog_timeo = 5 * HZ;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003035
3036 INIT_WORK(&adapter->work, vmxnet3_reset_work);
Steve Hodgsone3bc4ff2012-08-14 17:13:36 +01003037 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003038
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003039 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3040 int i;
3041 for (i = 0; i < adapter->num_rx_queues; i++) {
3042 netif_napi_add(adapter->netdev,
3043 &adapter->rx_queue[i].napi,
3044 vmxnet3_poll_rx_only, 64);
3045 }
3046 } else {
3047 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3048 vmxnet3_poll, 64);
3049 }
3050
3051 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3052 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3053
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003054 err = register_netdev(netdev);
3055
3056 if (err) {
3057 printk(KERN_ERR "Failed to register adapter %s\n",
stephen hemminger96800ee2012-11-13 13:53:28 +00003058 pci_name(pdev));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003059 goto err_register;
3060 }
3061
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00003062 vmxnet3_check_link(adapter, false);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003063 atomic_inc(&devices_found);
3064 return 0;
3065
3066err_register:
3067 vmxnet3_free_intr_resources(adapter);
3068err_ver:
3069 vmxnet3_free_pci_resources(adapter);
3070err_alloc_pci:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003071#ifdef VMXNET3_RSS
3072 kfree(adapter->rss_conf);
3073err_alloc_rss:
3074#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003075 kfree(adapter->pm_conf);
3076err_alloc_pm:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003077 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3078 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003079err_alloc_queue_desc:
3080 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3081 adapter->shared, adapter->shared_pa);
3082err_alloc_shared:
3083 pci_set_drvdata(pdev, NULL);
3084 free_netdev(netdev);
3085 return err;
3086}
3087
3088
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003089static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003090vmxnet3_remove_device(struct pci_dev *pdev)
3091{
3092 struct net_device *netdev = pci_get_drvdata(pdev);
3093 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003094 int size = 0;
3095 int num_rx_queues;
3096
3097#ifdef VMXNET3_RSS
3098 if (enable_mq)
3099 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3100 (int)num_online_cpus());
3101 else
3102#endif
3103 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003104 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003105
Tejun Heo23f333a2010-12-12 16:45:14 +01003106 cancel_work_sync(&adapter->work);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003107
3108 unregister_netdev(netdev);
3109
3110 vmxnet3_free_intr_resources(adapter);
3111 vmxnet3_free_pci_resources(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003112#ifdef VMXNET3_RSS
3113 kfree(adapter->rss_conf);
3114#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003115 kfree(adapter->pm_conf);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003116
3117 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3118 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3119 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3120 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003121 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3122 adapter->shared, adapter->shared_pa);
3123 free_netdev(netdev);
3124}
3125
3126
3127#ifdef CONFIG_PM
3128
3129static int
3130vmxnet3_suspend(struct device *device)
3131{
3132 struct pci_dev *pdev = to_pci_dev(device);
3133 struct net_device *netdev = pci_get_drvdata(pdev);
3134 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3135 struct Vmxnet3_PMConf *pmConf;
3136 struct ethhdr *ehdr;
3137 struct arphdr *ahdr;
3138 u8 *arpreq;
3139 struct in_device *in_dev;
3140 struct in_ifaddr *ifa;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003141 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003142 int i = 0;
3143
3144 if (!netif_running(netdev))
3145 return 0;
3146
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003147 for (i = 0; i < adapter->num_rx_queues; i++)
3148 napi_disable(&adapter->rx_queue[i].napi);
3149
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003150 vmxnet3_disable_all_intrs(adapter);
3151 vmxnet3_free_irqs(adapter);
3152 vmxnet3_free_intr_resources(adapter);
3153
3154 netif_device_detach(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003155 netif_tx_stop_all_queues(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003156
3157 /* Create wake-up filters. */
3158 pmConf = adapter->pm_conf;
3159 memset(pmConf, 0, sizeof(*pmConf));
3160
3161 if (adapter->wol & WAKE_UCAST) {
3162 pmConf->filters[i].patternSize = ETH_ALEN;
3163 pmConf->filters[i].maskSize = 1;
3164 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3165 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3166
Harvey Harrison3843e512010-10-21 18:05:32 +00003167 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003168 i++;
3169 }
3170
3171 if (adapter->wol & WAKE_ARP) {
3172 in_dev = in_dev_get(netdev);
3173 if (!in_dev)
3174 goto skip_arp;
3175
3176 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3177 if (!ifa)
3178 goto skip_arp;
3179
3180 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3181 sizeof(struct arphdr) + /* ARP header */
3182 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3183 2 * sizeof(u32); /*2 IPv4 addresses */
3184 pmConf->filters[i].maskSize =
3185 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3186
3187 /* ETH_P_ARP in Ethernet header. */
3188 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3189 ehdr->h_proto = htons(ETH_P_ARP);
3190
3191 /* ARPOP_REQUEST in ARP header. */
3192 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3193 ahdr->ar_op = htons(ARPOP_REQUEST);
3194 arpreq = (u8 *)(ahdr + 1);
3195
3196 /* The Unicast IPv4 address in 'tip' field. */
3197 arpreq += 2 * ETH_ALEN + sizeof(u32);
3198 *(u32 *)arpreq = ifa->ifa_address;
3199
3200 /* The mask for the relevant bits. */
3201 pmConf->filters[i].mask[0] = 0x00;
3202 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3203 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3204 pmConf->filters[i].mask[3] = 0x00;
3205 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3206 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3207 in_dev_put(in_dev);
3208
Harvey Harrison3843e512010-10-21 18:05:32 +00003209 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003210 i++;
3211 }
3212
3213skip_arp:
3214 if (adapter->wol & WAKE_MAGIC)
Harvey Harrison3843e512010-10-21 18:05:32 +00003215 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003216
3217 pmConf->numFilters = i;
3218
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003219 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3220 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3221 *pmConf));
3222 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3223 pmConf));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003224
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003225 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003226 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3227 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003228 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003229
3230 pci_save_state(pdev);
3231 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3232 adapter->wol);
3233 pci_disable_device(pdev);
3234 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3235
3236 return 0;
3237}
3238
3239
3240static int
3241vmxnet3_resume(struct device *device)
3242{
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003243 int err, i = 0;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003244 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003245 struct pci_dev *pdev = to_pci_dev(device);
3246 struct net_device *netdev = pci_get_drvdata(pdev);
3247 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3248 struct Vmxnet3_PMConf *pmConf;
3249
3250 if (!netif_running(netdev))
3251 return 0;
3252
3253 /* Destroy wake-up filters. */
3254 pmConf = adapter->pm_conf;
3255 memset(pmConf, 0, sizeof(*pmConf));
3256
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003257 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3258 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3259 *pmConf));
Harvey Harrison0561cf32010-10-21 18:05:34 +00003260 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003261 pmConf));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003262
3263 netif_device_attach(netdev);
3264 pci_set_power_state(pdev, PCI_D0);
3265 pci_restore_state(pdev);
3266 err = pci_enable_device_mem(pdev);
3267 if (err != 0)
3268 return err;
3269
3270 pci_enable_wake(pdev, PCI_D0, 0);
3271
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003272 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003273 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3274 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003275 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003276 vmxnet3_alloc_intr_resources(adapter);
3277 vmxnet3_request_irqs(adapter);
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003278 for (i = 0; i < adapter->num_rx_queues; i++)
3279 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003280 vmxnet3_enable_all_intrs(adapter);
3281
3282 return 0;
3283}
3284
Alexey Dobriyan47145212009-12-14 18:00:08 -08003285static const struct dev_pm_ops vmxnet3_pm_ops = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003286 .suspend = vmxnet3_suspend,
3287 .resume = vmxnet3_resume,
3288};
3289#endif
3290
3291static struct pci_driver vmxnet3_driver = {
3292 .name = vmxnet3_driver_name,
3293 .id_table = vmxnet3_pciid_table,
3294 .probe = vmxnet3_probe_device,
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003295 .remove = vmxnet3_remove_device,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003296#ifdef CONFIG_PM
3297 .driver.pm = &vmxnet3_pm_ops,
3298#endif
3299};
3300
3301
3302static int __init
3303vmxnet3_init_module(void)
3304{
3305 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
3306 VMXNET3_DRIVER_VERSION_REPORT);
3307 return pci_register_driver(&vmxnet3_driver);
3308}
3309
3310module_init(vmxnet3_init_module);
3311
3312
3313static void
3314vmxnet3_exit_module(void)
3315{
3316 pci_unregister_driver(&vmxnet3_driver);
3317}
3318
3319module_exit(vmxnet3_exit_module);
3320
3321MODULE_AUTHOR("VMware, Inc.");
3322MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3323MODULE_LICENSE("GPL v2");
3324MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);