blob: 4244b9d4418e15e2cce0772ca8a8f3e07da1c64e [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
Shrikrishna Khare190af102016-06-16 10:51:53 -07004 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
Shrikrishna Khare190af102016-06-16 10:51:53 -070023 * Maintained by: pv-drivers@vmware.com
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070024 *
25 */
26
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040027#include <linux/module.h>
Stephen Rothwellb038b042009-11-17 23:04:59 -080028#include <net/ip6_checksum.h>
29
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070030#include "vmxnet3_int.h"
31
32char vmxnet3_driver_name[] = "vmxnet3";
33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035/*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020039static const struct pci_device_id vmxnet3_pciid_table[] = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070040 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000046static int enable_mq = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070047
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +000048static void
49vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070051/*
52 * Enable/Disable the given intr
53 */
54static void
55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56{
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58}
59
60
61static void
62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63{
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65}
66
67
68/*
69 * Enable/Disable all intrs used by the device
70 */
71static void
72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73{
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
Ronghua Zang6929fe82010-07-15 22:18:47 -070078 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070080}
81
82
83static void
84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85{
86 int i;
87
Ronghua Zang6929fe82010-07-15 22:18:47 -070088 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070090 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92}
93
94
95static void
96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97{
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99}
100
101
102static bool
103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000105 return tq->stopped;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700106}
107
108
109static void
110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700114}
115
116
117static void
118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700122}
123
124
125static void
126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127{
128 tq->stopped = true;
129 tq->num_stop++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700131}
132
133
134/*
135 * Check the link state. This may start or stop the tx queue.
136 */
137static void
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700139{
140 u32 ret;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000141 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000142 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700143
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000144 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
148
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
Neil Horman6cdd20c2013-01-29 16:15:45 -0500153 netif_carrier_on(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700154
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000155 if (affectTxQueue) {
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
158 adapter);
159 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700160 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000161 netdev_info(adapter->netdev, "NIC Link is Down\n");
Neil Horman6cdd20c2013-01-29 16:15:45 -0500162 netif_carrier_off(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700163
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000164 if (affectTxQueue) {
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
167 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700168 }
169}
170
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700171static void
172vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000174 int i;
Roland Dreiere328d412011-05-06 08:32:53 +0000175 unsigned long flags;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000176 u32 events = le32_to_cpu(adapter->shared->ecr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700177 if (!events)
178 return;
179
180 vmxnet3_ack_events(adapter, events);
181
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000184 vmxnet3_check_link(adapter, true);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700185
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
Roland Dreiere328d412011-05-06 08:32:53 +0000188 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
Roland Dreiere328d412011-05-06 08:32:53 +0000191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700192
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700205
206 schedule_work(&adapter->work);
207 }
208}
209
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000210#ifdef __BIG_ENDIAN_BITFIELD
211/*
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
222 */
223static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
225{
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231}
232
233static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
235{
236 int i;
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
239
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
242 src--;
243 dst--;
244 *dst = cpu_to_le32(*src);
245 }
246}
247
248
249static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
251{
252 int i = 0;
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
257 src++;
258 dst++;
259 }
260}
261
262
263/* Used to read bitfield values from double words. */
264static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265{
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
268 temp &= mask;
269 temp >>= pos;
270 return temp;
271}
272
273
274
275#endif /* __BIG_ENDIAN_BITFIELD */
276
277#ifdef __BIG_ENDIAN_BITFIELD
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291 (dstrcd) = (tmp); \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
293 } while (0)
294# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295 (dstrxd) = (tmp); \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
297 } while (0)
298
299#else
300
301# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307
308#endif /* __BIG_ENDIAN_BITFIELD */
309
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700310
311static void
312vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
314{
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
Andy Kingb0eb57c2013-08-23 09:33:49 -0700316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700317 PCI_DMA_TODEVICE);
318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
Andy Kingb0eb57c2013-08-23 09:33:49 -0700319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700320 PCI_DMA_TODEVICE);
321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325}
326
327
328static int
329vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
331{
332 struct sk_buff *skb;
333 int entries = 0;
334
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700338
339 skb = tq->buf_info[eop_idx].skb;
340 BUG_ON(skb == NULL);
341 tq->buf_info[eop_idx].skb = NULL;
342
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347 pdev);
348
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
353 */
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355 entries++;
356 }
357
358 dev_kfree_skb_any(skb);
359 return entries;
360}
361
362
363static int
364vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 int completed = 0;
368 union Vmxnet3_GenericDesc *gdesc;
369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev,
374 adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700375
376 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378 }
379
380 if (completed) {
381 spin_lock(&tq->tx_lock);
382 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385 netif_carrier_ok(adapter->netdev))) {
386 vmxnet3_tq_wake(tq, adapter);
387 }
388 spin_unlock(&tq->tx_lock);
389 }
390 return completed;
391}
392
393
394static void
395vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396 struct vmxnet3_adapter *adapter)
397{
398 int i;
399
400 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401 struct vmxnet3_tx_buf_info *tbi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700402
403 tbi = tq->buf_info + tq->tx_ring.next2comp;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700404
405 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
406 if (tbi->skb) {
407 dev_kfree_skb_any(tbi->skb);
408 tbi->skb = NULL;
409 }
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411 }
412
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i = 0; i < tq->tx_ring.size; i++) {
415 BUG_ON(tq->buf_info[i].skb != NULL ||
416 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
417 }
418
419 tq->tx_ring.gen = VMXNET3_INIT_GEN;
420 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
421
422 tq->comp_ring.gen = VMXNET3_INIT_GEN;
423 tq->comp_ring.next2proc = 0;
424}
425
426
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000427static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700428vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter)
430{
431 if (tq->tx_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -0700432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700435 tq->tx_ring.base = NULL;
436 }
437 if (tq->data_ring.base) {
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700438 dma_free_coherent(&adapter->pdev->dev,
439 tq->data_ring.size * tq->txdata_desc_size,
Andy Kingb0eb57c2013-08-23 09:33:49 -0700440 tq->data_ring.base, tq->data_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700441 tq->data_ring.base = NULL;
442 }
443 if (tq->comp_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -0700444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700447 tq->comp_ring.base = NULL;
448 }
Andy Kingb0eb57c2013-08-23 09:33:49 -0700449 if (tq->buf_info) {
450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
453 tq->buf_info = NULL;
454 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700455}
456
457
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000458/* Destroy all tx queues */
459void
460vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
461{
462 int i;
463
464 for (i = 0; i < adapter->num_tx_queues; i++)
465 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
466}
467
468
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700469static void
470vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
471 struct vmxnet3_adapter *adapter)
472{
473 int i;
474
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
477 sizeof(struct Vmxnet3_TxDesc));
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
480
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700481 memset(tq->data_ring.base, 0,
482 tq->data_ring.size * tq->txdata_desc_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700483
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
486 sizeof(struct Vmxnet3_TxCompDesc));
487 tq->comp_ring.next2proc = 0;
488 tq->comp_ring.gen = VMXNET3_INIT_GEN;
489
490 /* reset the bookkeeping data */
491 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
492 for (i = 0; i < tq->tx_ring.size; i++)
493 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
494
495 /* stats are not reset */
496}
497
498
499static int
500vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
501 struct vmxnet3_adapter *adapter)
502{
Andy Kingb0eb57c2013-08-23 09:33:49 -0700503 size_t sz;
504
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
506 tq->comp_ring.base || tq->buf_info);
507
Andy Kingb0eb57c2013-08-23 09:33:49 -0700508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
510 &tq->tx_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700511 if (!tq->tx_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700513 goto err;
514 }
515
Andy Kingb0eb57c2013-08-23 09:33:49 -0700516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700517 tq->data_ring.size * tq->txdata_desc_size,
Andy Kingb0eb57c2013-08-23 09:33:49 -0700518 &tq->data_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700519 if (!tq->data_ring.base) {
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700520 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700521 goto err;
522 }
523
Andy Kingb0eb57c2013-08-23 09:33:49 -0700524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
526 &tq->comp_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700527 if (!tq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700529 goto err;
530 }
531
Andy Kingb0eb57c2013-08-23 09:33:49 -0700532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000535 if (!tq->buf_info)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700536 goto err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700537
538 return 0;
539
540err:
541 vmxnet3_tq_destroy(tq, adapter);
542 return -ENOMEM;
543}
544
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000545static void
546vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
547{
548 int i;
549
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
552}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700553
554/*
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
558 */
559
560static int
561vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562 int num_to_alloc, struct vmxnet3_adapter *adapter)
563{
564 int num_allocated = 0;
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
567 u32 val;
568
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000569 while (num_allocated <= num_to_alloc) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700570 struct vmxnet3_rx_buf_info *rbi;
571 union Vmxnet3_GenericDesc *gd;
572
573 rbi = rbi_base + ring->next2fill;
574 gd = ring->base + ring->next2fill;
575
576 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577 if (rbi->skb == NULL) {
Stephen Hemminger0d735f12013-01-15 07:28:26 +0000578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
579 rbi->len,
580 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700581 if (unlikely(rbi->skb == NULL)) {
582 rq->stats.rx_buf_alloc_failure++;
583 break;
584 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700585
Andy Kingb0eb57c2013-08-23 09:33:49 -0700586 rbi->dma_addr = dma_map_single(
587 &adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300590 if (dma_mapping_error(&adapter->pdev->dev,
591 rbi->dma_addr)) {
592 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700596 } else {
597 /* rx buffer skipped by the device */
598 }
599 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
600 } else {
601 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
602 rbi->len != PAGE_SIZE);
603
604 if (rbi->page == NULL) {
605 rbi->page = alloc_page(GFP_ATOMIC);
606 if (unlikely(rbi->page == NULL)) {
607 rq->stats.rx_buf_alloc_failure++;
608 break;
609 }
Andy Kingb0eb57c2013-08-23 09:33:49 -0700610 rbi->dma_addr = dma_map_page(
611 &adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700612 rbi->page, 0, PAGE_SIZE,
613 PCI_DMA_FROMDEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300614 if (dma_mapping_error(&adapter->pdev->dev,
615 rbi->dma_addr)) {
616 put_page(rbi->page);
617 rq->stats.rx_buf_alloc_failure++;
618 break;
619 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700620 } else {
621 /* rx buffers skipped by the device */
622 }
623 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
624 }
625
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000626 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000627 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000628 | val | rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700629
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000630 /* Fill the last buffer but dont mark it ready, or else the
631 * device will think that the queue is full */
632 if (num_allocated == num_to_alloc)
633 break;
634
635 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700636 num_allocated++;
637 vmxnet3_cmd_ring_adv_next2fill(ring);
638 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700639
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000640 netdev_dbg(adapter->netdev,
Stephen Hemminger69b9a712013-01-15 07:28:27 +0000641 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
642 num_allocated, ring->next2fill, ring->next2comp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700643
644 /* so that the device can distinguish a full ring and an empty ring */
645 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
646
647 return num_allocated;
648}
649
650
651static void
652vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
653 struct vmxnet3_rx_buf_info *rbi)
654{
655 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
656 skb_shinfo(skb)->nr_frags;
657
658 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
659
Ian Campbell0e0634d2011-09-21 21:53:28 +0000660 __skb_frag_set_page(frag, rbi->page);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700661 frag->page_offset = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000662 skb_frag_size_set(frag, rcd->len);
663 skb->data_len += rcd->len;
Eric Dumazet5e6c3552011-10-13 11:38:17 +0000664 skb->truesize += PAGE_SIZE;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700665 skb_shinfo(skb)->nr_frags++;
666}
667
668
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300669static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700670vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
672 struct vmxnet3_adapter *adapter)
673{
674 u32 dw2, len;
675 unsigned long buf_offset;
676 int i;
677 union Vmxnet3_GenericDesc *gdesc;
678 struct vmxnet3_tx_buf_info *tbi = NULL;
679
680 BUG_ON(ctx->copy_size > skb_headlen(skb));
681
682 /* use the previous gen bit for the SOP desc */
683 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
684
685 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
686 gdesc = ctx->sop_txd; /* both loops below can be skipped */
687
688 /* no need to map the buffer if headers are copied */
689 if (ctx->copy_size) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700691 tq->tx_ring.next2fill *
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700692 tq->txdata_desc_size);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700694 ctx->sop_txd->dword[3] = 0;
695
696 tbi = tq->buf_info + tq->tx_ring.next2fill;
697 tbi->map_type = VMXNET3_MAP_NONE;
698
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000699 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700700 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000701 tq->tx_ring.next2fill,
702 le64_to_cpu(ctx->sop_txd->txd.addr),
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700703 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
704 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
705
706 /* use the right gen for non-SOP desc */
707 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
708 }
709
710 /* linear part can use multiple tx desc if it's big */
711 len = skb_headlen(skb) - ctx->copy_size;
712 buf_offset = ctx->copy_size;
713 while (len) {
714 u32 buf_size;
715
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000716 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
717 buf_size = len;
718 dw2 |= len;
719 } else {
720 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
721 /* spec says that for TxDesc.len, 0 == 2^14 */
722 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700723
724 tbi = tq->buf_info + tq->tx_ring.next2fill;
725 tbi->map_type = VMXNET3_MAP_SINGLE;
Andy Kingb0eb57c2013-08-23 09:33:49 -0700726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700727 skb->data + buf_offset, buf_size,
728 PCI_DMA_TODEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730 return -EFAULT;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700731
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000732 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700733
734 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
735 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
736
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000737 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000738 gdesc->dword[2] = cpu_to_le32(dw2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700739 gdesc->dword[3] = 0;
740
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000741 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700742 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000743 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
744 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700745 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
746 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
747
748 len -= buf_size;
749 buf_offset += buf_size;
750 }
751
752 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000753 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000754 u32 buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700755
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000756 buf_offset = 0;
757 len = skb_frag_size(frag);
758 while (len) {
759 tbi = tq->buf_info + tq->tx_ring.next2fill;
760 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
761 buf_size = len;
762 dw2 |= len;
763 } else {
764 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
765 /* spec says that for TxDesc.len, 0 == 2^14 */
766 }
767 tbi->map_type = VMXNET3_MAP_PAGE;
768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
769 buf_offset, buf_size,
770 DMA_TO_DEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772 return -EFAULT;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700773
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000774 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700775
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000776 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
777 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700778
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000779 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
780 gdesc->dword[2] = cpu_to_le32(dw2);
781 gdesc->dword[3] = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700782
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000783 netdev_dbg(adapter->netdev,
Hans Wennborg8b429462014-08-05 21:42:41 -0700784 "txd[%u]: 0x%llx %u %u\n",
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000785 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
786 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
787 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
788 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
789
790 len -= buf_size;
791 buf_offset += buf_size;
792 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700793 }
794
795 ctx->eop_txd = gdesc;
796
797 /* set the last buf_info for the pkt */
798 tbi->skb = skb;
799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
Alexey Khoroshilov5738a092015-11-28 01:29:30 +0300800
801 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700802}
803
804
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000805/* Init all tx queues */
806static void
807vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
808{
809 int i;
810
811 for (i = 0; i < adapter->num_tx_queues; i++)
812 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
813}
814
815
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700816/*
Neil Hormancec05562016-03-04 13:40:48 -0500817 * parse relevant protocol headers:
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt
821 *
822 * Returns:
823 * -1: error happens during parsing
824 * 0: protocol headers parsed, but too big to be copied
825 * 1: protocol headers parsed and copied
826 *
827 * Other effects:
828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied
Neil Hormancec05562016-03-04 13:40:48 -0500830 * 3. the portion to be copied is guaranteed to be in the linear part
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700831 *
832 */
833static int
Neil Hormancec05562016-03-04 13:40:48 -0500834vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700837{
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800838 u8 protocol = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700839
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000840 if (ctx->mss) { /* TSO */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700841 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000842 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700843 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
844 } else {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700845 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000846 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700847
848 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000849 const struct iphdr *iph = ip_hdr(skb);
850
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800851 protocol = iph->protocol;
852 } else if (ctx->ipv6) {
853 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854
855 protocol = ipv6h->nexthdr;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700856 }
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800857
858 switch (protocol) {
859 case IPPROTO_TCP:
860 ctx->l4_hdr_size = tcp_hdrlen(skb);
861 break;
862 case IPPROTO_UDP:
863 ctx->l4_hdr_size = sizeof(struct udphdr);
864 break;
865 default:
866 ctx->l4_hdr_size = 0;
867 break;
868 }
869
Neil Hormanb2032622012-02-16 01:48:56 +0000870 ctx->copy_size = min(ctx->eth_ip_hdr_size +
871 ctx->l4_hdr_size, skb->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700872 } else {
873 ctx->eth_ip_hdr_size = 0;
874 ctx->l4_hdr_size = 0;
875 /* copy as much as allowed */
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700876 ctx->copy_size = min_t(unsigned int,
877 tq->txdata_desc_size,
878 skb_headlen(skb));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700879 }
880
Shreyas Bhatewarac41fcce2015-06-19 13:37:03 -0700881 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
882 ctx->copy_size = skb->len;
883
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700884 /* make sure headers are accessible directly */
885 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
886 goto err;
887 }
888
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700889 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700890 tq->stats.oversized_hdr++;
891 ctx->copy_size = 0;
892 return 0;
893 }
894
Neil Hormancec05562016-03-04 13:40:48 -0500895 return 1;
896err:
897 return -1;
898}
899
900/*
901 * copy relevant protocol headers to the transmit ring:
902 * For a tso pkt, relevant headers are L2/3/4 including options
903 * For a pkt requesting csum offloading, they are L2/3 and may include L4
904 * if it's a TCP/UDP pkt
905 *
906 *
907 * Note that this requires that vmxnet3_parse_hdr be called first to set the
908 * appropriate bits in ctx first
909 */
910static void
911vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
912 struct vmxnet3_tx_ctx *ctx,
913 struct vmxnet3_adapter *adapter)
914{
915 struct Vmxnet3_TxDataDesc *tdd;
916
Shrikrishna Khareff2e7d52016-08-19 10:33:42 -0700917 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
918 tq->tx_ring.next2fill *
919 tq->txdata_desc_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700920
921 memcpy(tdd->data, skb->data, ctx->copy_size);
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000922 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700923 "copy %u bytes to dataRing[%u]\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700924 ctx->copy_size, tq->tx_ring.next2fill);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700925}
926
927
928static void
929vmxnet3_prepare_tso(struct sk_buff *skb,
930 struct vmxnet3_tx_ctx *ctx)
931{
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000932 struct tcphdr *tcph = tcp_hdr(skb);
933
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700934 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000935 struct iphdr *iph = ip_hdr(skb);
936
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700937 iph->check = 0;
938 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
939 IPPROTO_TCP, 0);
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800940 } else if (ctx->ipv6) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000941 struct ipv6hdr *iph = ipv6_hdr(skb);
942
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700943 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
944 IPPROTO_TCP, 0);
945 }
946}
947
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000948static int txd_estimate(const struct sk_buff *skb)
949{
950 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
951 int i;
952
953 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
954 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
955
956 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
957 }
958 return count;
959}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700960
961/*
962 * Transmits a pkt thru a given tq
963 * Returns:
964 * NETDEV_TX_OK: descriptors are setup successfully
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300965 * NETDEV_TX_OK: error occurred, the pkt is dropped
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700966 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
967 *
968 * Side-effects:
969 * 1. tx ring may be changed
970 * 2. tq stats may be updated accordingly
971 * 3. shared->txNumDeferred may be updated
972 */
973
974static int
975vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
976 struct vmxnet3_adapter *adapter, struct net_device *netdev)
977{
978 int ret;
979 u32 count;
980 unsigned long flags;
981 struct vmxnet3_tx_ctx ctx;
982 union Vmxnet3_GenericDesc *gdesc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000983#ifdef __BIG_ENDIAN_BITFIELD
984 /* Use temporary descriptor to avoid touching bits multiple times */
985 union Vmxnet3_GenericDesc tempTxDesc;
986#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700987
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000988 count = txd_estimate(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700989
Jesse Gross72e85c42011-06-23 13:04:39 +0000990 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800991 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700992
993 ctx.mss = skb_shinfo(skb)->gso_size;
994 if (ctx.mss) {
995 if (skb_header_cloned(skb)) {
996 if (unlikely(pskb_expand_head(skb, 0, 0,
997 GFP_ATOMIC) != 0)) {
998 tq->stats.drop_tso++;
999 goto drop_pkt;
1000 }
1001 tq->stats.copy_skb_header++;
1002 }
1003 vmxnet3_prepare_tso(skb, &ctx);
1004 } else {
1005 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1006
1007 /* non-tso pkts must not use more than
1008 * VMXNET3_MAX_TXD_PER_PKT entries
1009 */
1010 if (skb_linearize(skb) != 0) {
1011 tq->stats.drop_too_many_frags++;
1012 goto drop_pkt;
1013 }
1014 tq->stats.linearized++;
1015
1016 /* recalculate the # of descriptors to use */
1017 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1018 }
1019 }
1020
Neil Hormancec05562016-03-04 13:40:48 -05001021 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001022 if (ret >= 0) {
1023 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1024 /* hdrs parsed, check against other limits */
1025 if (ctx.mss) {
1026 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1027 VMXNET3_MAX_TX_BUF_SIZE)) {
Arnd Bergmannefc21d92016-03-14 15:53:57 +01001028 tq->stats.drop_oversized_hdr++;
1029 goto drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001030 }
1031 } else {
1032 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1033 if (unlikely(ctx.eth_ip_hdr_size +
1034 skb->csum_offset >
1035 VMXNET3_MAX_CSUM_OFFSET)) {
Arnd Bergmannefc21d92016-03-14 15:53:57 +01001036 tq->stats.drop_oversized_hdr++;
1037 goto drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001038 }
1039 }
1040 }
1041 } else {
1042 tq->stats.drop_hdr_inspect_err++;
Neil Hormancec05562016-03-04 13:40:48 -05001043 goto drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001044 }
1045
Neil Hormancec05562016-03-04 13:40:48 -05001046 spin_lock_irqsave(&tq->tx_lock, flags);
1047
1048 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1049 tq->stats.tx_ring_full++;
1050 netdev_dbg(adapter->netdev,
1051 "tx queue stopped on %s, next2comp %u"
1052 " next2fill %u\n", adapter->netdev->name,
1053 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1054
1055 vmxnet3_tq_stop(tq, adapter);
1056 spin_unlock_irqrestore(&tq->tx_lock, flags);
1057 return NETDEV_TX_BUSY;
1058 }
1059
1060
1061 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1062
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001063 /* fill tx descs related to addr & len */
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03001064 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1065 goto unlock_drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001066
1067 /* setup the EOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001068 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001069
1070 /* setup the SOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001071#ifdef __BIG_ENDIAN_BITFIELD
1072 gdesc = &tempTxDesc;
1073 gdesc->dword[2] = ctx.sop_txd->dword[2];
1074 gdesc->dword[3] = ctx.sop_txd->dword[3];
1075#else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001076 gdesc = ctx.sop_txd;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001077#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001078 if (ctx.mss) {
1079 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1080 gdesc->txd.om = VMXNET3_OM_TSO;
1081 gdesc->txd.msscof = ctx.mss;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001082 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1083 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001084 } else {
1085 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1086 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1087 gdesc->txd.om = VMXNET3_OM_CSUM;
1088 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1089 skb->csum_offset;
1090 } else {
1091 gdesc->txd.om = 0;
1092 gdesc->txd.msscof = 0;
1093 }
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001094 le32_add_cpu(&tq->shared->txNumDeferred, 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001095 }
1096
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001097 if (skb_vlan_tag_present(skb)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001098 gdesc->txd.ti = 1;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001099 gdesc->txd.tci = skb_vlan_tag_get(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001100 }
1101
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001102 /* finally flips the GEN bit of the SOP desc. */
1103 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1104 VMXNET3_TXD_GEN);
1105#ifdef __BIG_ENDIAN_BITFIELD
1106 /* Finished updating in bitfields of Tx Desc, so write them in original
1107 * place.
1108 */
1109 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1110 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1111 gdesc = ctx.sop_txd;
1112#endif
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001113 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001114 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
Joe Perchesc2fd03a2012-06-04 12:44:18 +00001115 (u32)(ctx.sop_txd -
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001116 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1117 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001118
1119 spin_unlock_irqrestore(&tq->tx_lock, flags);
1120
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001121 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1122 le32_to_cpu(tq->shared->txThreshold)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001123 tq->shared->txNumDeferred = 0;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001124 VMXNET3_WRITE_BAR0_REG(adapter,
1125 VMXNET3_REG_TXPROD + tq->qid * 8,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001126 tq->tx_ring.next2fill);
1127 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001128
1129 return NETDEV_TX_OK;
1130
Dan Carpenterf955e142010-12-20 03:03:15 +00001131unlock_drop_pkt:
1132 spin_unlock_irqrestore(&tq->tx_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001133drop_pkt:
1134 tq->stats.drop_total++;
Eric W. Biedermanb1b71812014-03-15 18:31:16 -07001135 dev_kfree_skb_any(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001136 return NETDEV_TX_OK;
1137}
1138
1139
1140static netdev_tx_t
1141vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1142{
1143 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001144
stephen hemminger96800ee2012-11-13 13:53:28 +00001145 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1146 return vmxnet3_tq_xmit(skb,
1147 &adapter->tx_queue[skb->queue_mapping],
1148 adapter, netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001149}
1150
1151
1152static void
1153vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1154 struct sk_buff *skb,
1155 union Vmxnet3_GenericDesc *gdesc)
1156{
Michał Mirosława0d27302011-04-18 13:31:21 +00001157 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
Shrikrishna Kharef0d43782016-04-20 18:12:29 -07001158 if (gdesc->rcd.v4 &&
1159 (le32_to_cpu(gdesc->dword[3]) &
1160 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001161 skb->ip_summed = CHECKSUM_UNNECESSARY;
1162 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
Shrikrishna Kharef0d43782016-04-20 18:12:29 -07001163 BUG_ON(gdesc->rcd.frg);
1164 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1165 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1166 skb->ip_summed = CHECKSUM_UNNECESSARY;
1167 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001168 BUG_ON(gdesc->rcd.frg);
1169 } else {
1170 if (gdesc->rcd.csum) {
1171 skb->csum = htons(gdesc->rcd.csum);
1172 skb->ip_summed = CHECKSUM_PARTIAL;
1173 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001174 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001175 }
1176 }
1177 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001178 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001179 }
1180}
1181
1182
1183static void
1184vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1185 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1186{
1187 rq->stats.drop_err++;
1188 if (!rcd->fcs)
1189 rq->stats.drop_fcs++;
1190
1191 rq->stats.drop_total++;
1192
1193 /*
1194 * We do not unmap and chain the rx buffer to the skb.
1195 * We basically pretend this buffer is not used and will be recycled
1196 * by vmxnet3_rq_alloc_rx_buf()
1197 */
1198
1199 /*
1200 * ctx->skb may be NULL if this is the first and the only one
1201 * desc for the pkt
1202 */
1203 if (ctx->skb)
1204 dev_kfree_skb_irq(ctx->skb);
1205
1206 ctx->skb = NULL;
1207}
1208
1209
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001210static u32
1211vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1212 union Vmxnet3_GenericDesc *gdesc)
1213{
1214 u32 hlen, maplen;
1215 union {
1216 void *ptr;
1217 struct ethhdr *eth;
1218 struct iphdr *ipv4;
1219 struct ipv6hdr *ipv6;
1220 struct tcphdr *tcp;
1221 } hdr;
1222 BUG_ON(gdesc->rcd.tcp == 0);
1223
1224 maplen = skb_headlen(skb);
1225 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1226 return 0;
1227
1228 hdr.eth = eth_hdr(skb);
1229 if (gdesc->rcd.v4) {
1230 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
1231 hdr.ptr += sizeof(struct ethhdr);
1232 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1233 hlen = hdr.ipv4->ihl << 2;
1234 hdr.ptr += hdr.ipv4->ihl << 2;
1235 } else if (gdesc->rcd.v6) {
1236 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
1237 hdr.ptr += sizeof(struct ethhdr);
1238 /* Use an estimated value, since we also need to handle
1239 * TSO case.
1240 */
1241 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1242 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1243 hlen = sizeof(struct ipv6hdr);
1244 hdr.ptr += sizeof(struct ipv6hdr);
1245 } else {
1246 /* Non-IP pkt, dont estimate header length */
1247 return 0;
1248 }
1249
1250 if (hlen + sizeof(struct tcphdr) > maplen)
1251 return 0;
1252
1253 return (hlen + (hdr.tcp->doff << 2));
1254}
1255
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001256static int
1257vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1258 struct vmxnet3_adapter *adapter, int quota)
1259{
Joe Perches215faf92010-12-21 02:16:10 -08001260 static const u32 rxprod_reg[2] = {
1261 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1262 };
Neil Horman07696362015-07-07 14:02:18 -04001263 u32 num_pkts = 0;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001264 bool skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001265 struct Vmxnet3_RxCompDesc *rcd;
1266 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001267 u16 segCnt = 0, mss = 0;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001268#ifdef __BIG_ENDIAN_BITFIELD
1269 struct Vmxnet3_RxDesc rxCmdDesc;
1270 struct Vmxnet3_RxCompDesc rxComp;
1271#endif
1272 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1273 &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001274 while (rcd->gen == rq->comp_ring.gen) {
1275 struct vmxnet3_rx_buf_info *rbi;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001276 struct sk_buff *skb, *new_skb = NULL;
1277 struct page *new_page = NULL;
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03001278 dma_addr_t new_dma_addr;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001279 int num_to_alloc;
1280 struct Vmxnet3_RxDesc *rxd;
1281 u32 idx, ring_idx;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001282 struct vmxnet3_cmd_ring *ring = NULL;
Neil Horman07696362015-07-07 14:02:18 -04001283 if (num_pkts >= quota) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001284 /* we may stop even before we see the EOP desc of
1285 * the current pkt
1286 */
1287 break;
1288 }
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001289 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1290 rcd->rqID != rq->dataRingQid);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001291 idx = rcd->rxdIdx;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001292 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001293 ring = rq->rx_ring + ring_idx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001294 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1295 &rxCmdDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001296 rbi = rq->buf_info[ring_idx] + idx;
1297
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001298 BUG_ON(rxd->addr != rbi->dma_addr ||
1299 rxd->len != rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001300
1301 if (unlikely(rcd->eop && rcd->err)) {
1302 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1303 goto rcd_done;
1304 }
1305
1306 if (rcd->sop) { /* first buf of the pkt */
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001307 bool rxDataRingUsed;
1308 u16 len;
1309
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001310 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001311 (rcd->rqID != rq->qid &&
1312 rcd->rqID != rq->dataRingQid));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001313
1314 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1315 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1316
1317 if (unlikely(rcd->len == 0)) {
1318 /* Pretend the rx buffer is skipped. */
1319 BUG_ON(!(rcd->sop && rcd->eop));
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001320 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001321 "rxRing[%u][%u] 0 length\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001322 ring_idx, idx);
1323 goto rcd_done;
1324 }
1325
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001326 skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001327 ctx->skb = rbi->skb;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001328
1329 rxDataRingUsed =
1330 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1331 len = rxDataRingUsed ? rcd->len : rbi->len;
Stephen Hemminger0d735f12013-01-15 07:28:26 +00001332 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001333 len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001334 if (new_skb == NULL) {
1335 /* Skb allocation failed, do not handover this
1336 * skb to stack. Reuse it. Drop the existing pkt
1337 */
1338 rq->stats.rx_buf_alloc_failure++;
1339 ctx->skb = NULL;
1340 rq->stats.drop_total++;
1341 skip_page_frags = true;
1342 goto rcd_done;
1343 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001344
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001345 if (rxDataRingUsed) {
1346 size_t sz;
1347
1348 BUG_ON(rcd->len > rq->data_ring.desc_size);
1349
1350 ctx->skb = new_skb;
1351 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1352 memcpy(new_skb->data,
1353 &rq->data_ring.base[sz], rcd->len);
1354 } else {
1355 ctx->skb = rbi->skb;
1356
1357 new_dma_addr =
1358 dma_map_single(&adapter->pdev->dev,
1359 new_skb->data, rbi->len,
1360 PCI_DMA_FROMDEVICE);
1361 if (dma_mapping_error(&adapter->pdev->dev,
1362 new_dma_addr)) {
1363 dev_kfree_skb(new_skb);
1364 /* Skb allocation failed, do not
1365 * handover this skb to stack. Reuse
1366 * it. Drop the existing pkt.
1367 */
1368 rq->stats.rx_buf_alloc_failure++;
1369 ctx->skb = NULL;
1370 rq->stats.drop_total++;
1371 skip_page_frags = true;
1372 goto rcd_done;
1373 }
1374
1375 dma_unmap_single(&adapter->pdev->dev,
1376 rbi->dma_addr,
1377 rbi->len,
1378 PCI_DMA_FROMDEVICE);
1379
1380 /* Immediate refill */
1381 rbi->skb = new_skb;
1382 rbi->dma_addr = new_dma_addr;
1383 rxd->addr = cpu_to_le64(rbi->dma_addr);
1384 rxd->len = rbi->len;
1385 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001386
Stephen Hemminger7db11f72013-01-15 07:28:35 +00001387#ifdef VMXNET3_RSS
1388 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1389 (adapter->netdev->features & NETIF_F_RXHASH))
Michal Schmidt2c15a152013-12-20 13:16:57 +01001390 skb_set_hash(ctx->skb,
1391 le32_to_cpu(rcd->rssHash),
Tom Herbert0b680702013-12-17 23:32:08 -08001392 PKT_HASH_TYPE_L3);
Stephen Hemminger7db11f72013-01-15 07:28:35 +00001393#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001394 skb_put(ctx->skb, rcd->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001395
Shrikrishna Khare190af102016-06-16 10:51:53 -07001396 if (VMXNET3_VERSION_GE_2(adapter) &&
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001397 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1398 struct Vmxnet3_RxCompDescExt *rcdlro;
1399 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001400
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001401 segCnt = rcdlro->segCnt;
Shrikrishna Khare50219532016-06-08 07:40:53 -07001402 WARN_ON_ONCE(segCnt == 0);
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001403 mss = rcdlro->mss;
1404 if (unlikely(segCnt <= 1))
1405 segCnt = 0;
1406 } else {
1407 segCnt = 0;
1408 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001409 } else {
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001410 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1411
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001412 /* non SOP buffer must be type 1 in most cases */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001413 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1414 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001415
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001416 /* If an sop buffer was dropped, skip all
1417 * following non-sop fragments. They will be reused.
1418 */
1419 if (skip_page_frags)
1420 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001421
Shreyas Bhatewarac41fcce2015-06-19 13:37:03 -07001422 if (rcd->len) {
1423 new_page = alloc_page(GFP_ATOMIC);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001424 /* Replacement page frag could not be allocated.
1425 * Reuse this page. Drop the pkt and free the
1426 * skb which contained this page as a frag. Skip
1427 * processing all the following non-sop frags.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001428 */
Shreyas Bhatewarac41fcce2015-06-19 13:37:03 -07001429 if (unlikely(!new_page)) {
1430 rq->stats.rx_buf_alloc_failure++;
1431 dev_kfree_skb(ctx->skb);
1432 ctx->skb = NULL;
1433 skip_page_frags = true;
1434 goto rcd_done;
1435 }
Shrikrishna Khare58caf632016-01-06 10:44:27 -08001436 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1437 new_page,
1438 0, PAGE_SIZE,
1439 PCI_DMA_FROMDEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03001440 if (dma_mapping_error(&adapter->pdev->dev,
1441 new_dma_addr)) {
1442 put_page(new_page);
1443 rq->stats.rx_buf_alloc_failure++;
1444 dev_kfree_skb(ctx->skb);
1445 ctx->skb = NULL;
1446 skip_page_frags = true;
1447 goto rcd_done;
1448 }
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001449
Andy Kingb0eb57c2013-08-23 09:33:49 -07001450 dma_unmap_page(&adapter->pdev->dev,
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001451 rbi->dma_addr, rbi->len,
1452 PCI_DMA_FROMDEVICE);
1453
1454 vmxnet3_append_frag(ctx->skb, rcd, rbi);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001455
Shreyas Bhatewarac41fcce2015-06-19 13:37:03 -07001456 /* Immediate refill */
1457 rbi->page = new_page;
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03001458 rbi->dma_addr = new_dma_addr;
Shreyas Bhatewarac41fcce2015-06-19 13:37:03 -07001459 rxd->addr = cpu_to_le64(rbi->dma_addr);
1460 rxd->len = rbi->len;
1461 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001462 }
1463
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001464
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001465 skb = ctx->skb;
1466 if (rcd->eop) {
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001467 u32 mtu = adapter->netdev->mtu;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001468 skb->len += skb->data_len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001469
1470 vmxnet3_rx_csum(adapter, skb,
1471 (union Vmxnet3_GenericDesc *)rcd);
1472 skb->protocol = eth_type_trans(skb, adapter->netdev);
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001473 if (!rcd->tcp || !adapter->lro)
1474 goto not_lro;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001475
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07001476 if (segCnt != 0 && mss != 0) {
1477 skb_shinfo(skb)->gso_type = rcd->v4 ?
1478 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1479 skb_shinfo(skb)->gso_size = mss;
1480 skb_shinfo(skb)->gso_segs = segCnt;
1481 } else if (segCnt != 0 || skb->len > mtu) {
1482 u32 hlen;
1483
1484 hlen = vmxnet3_get_hdr_len(adapter, skb,
1485 (union Vmxnet3_GenericDesc *)rcd);
1486 if (hlen == 0)
1487 goto not_lro;
1488
1489 skb_shinfo(skb)->gso_type =
1490 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1491 if (segCnt != 0) {
1492 skb_shinfo(skb)->gso_segs = segCnt;
1493 skb_shinfo(skb)->gso_size =
1494 DIV_ROUND_UP(skb->len -
1495 hlen, segCnt);
1496 } else {
1497 skb_shinfo(skb)->gso_size = mtu - hlen;
1498 }
1499 }
1500not_lro:
Jesse Gross72e85c42011-06-23 13:04:39 +00001501 if (unlikely(rcd->ts))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001502 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
Jesse Gross72e85c42011-06-23 13:04:39 +00001503
Jesse Gross213ade82011-06-24 14:24:35 +00001504 if (adapter->netdev->features & NETIF_F_LRO)
1505 netif_receive_skb(skb);
1506 else
1507 napi_gro_receive(&rq->napi, skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001508
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001509 ctx->skb = NULL;
Neil Horman07696362015-07-07 14:02:18 -04001510 num_pkts++;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001511 }
1512
1513rcd_done:
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001514 /* device may have skipped some rx descs */
1515 ring->next2comp = idx;
1516 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1517 ring = rq->rx_ring + ring_idx;
1518 while (num_to_alloc) {
1519 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1520 &rxCmdDesc);
1521 BUG_ON(!rxd->addr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001522
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001523 /* Recv desc is ready to be used by the device */
1524 rxd->gen = ring->gen;
1525 vmxnet3_cmd_ring_adv_next2fill(ring);
1526 num_to_alloc--;
1527 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001528
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001529 /* if needed, update the register */
1530 if (unlikely(rq->shared->updateRxProd)) {
1531 VMXNET3_WRITE_BAR0_REG(adapter,
stephen hemminger96800ee2012-11-13 13:53:28 +00001532 rxprod_reg[ring_idx] + rq->qid * 8,
1533 ring->next2fill);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001534 }
1535
1536 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001537 vmxnet3_getRxComp(rcd,
stephen hemminger96800ee2012-11-13 13:53:28 +00001538 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001539 }
1540
Neil Horman07696362015-07-07 14:02:18 -04001541 return num_pkts;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001542}
1543
1544
1545static void
1546vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1547 struct vmxnet3_adapter *adapter)
1548{
1549 u32 i, ring_idx;
1550 struct Vmxnet3_RxDesc *rxd;
1551
1552 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1553 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001554#ifdef __BIG_ENDIAN_BITFIELD
1555 struct Vmxnet3_RxDesc rxDesc;
1556#endif
1557 vmxnet3_getRxDesc(rxd,
1558 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001559
1560 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1561 rq->buf_info[ring_idx][i].skb) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001562 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001563 rxd->len, PCI_DMA_FROMDEVICE);
1564 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1565 rq->buf_info[ring_idx][i].skb = NULL;
1566 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1567 rq->buf_info[ring_idx][i].page) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001568 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001569 rxd->len, PCI_DMA_FROMDEVICE);
1570 put_page(rq->buf_info[ring_idx][i].page);
1571 rq->buf_info[ring_idx][i].page = NULL;
1572 }
1573 }
1574
1575 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1576 rq->rx_ring[ring_idx].next2fill =
1577 rq->rx_ring[ring_idx].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001578 }
1579
1580 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1581 rq->comp_ring.next2proc = 0;
1582}
1583
1584
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001585static void
1586vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1587{
1588 int i;
1589
1590 for (i = 0; i < adapter->num_rx_queues; i++)
1591 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1592}
1593
1594
stephen hemminger280b74f2013-02-22 08:26:29 +00001595static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1596 struct vmxnet3_adapter *adapter)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001597{
1598 int i;
1599 int j;
1600
1601 /* all rx buffers must have already been freed */
1602 for (i = 0; i < 2; i++) {
1603 if (rq->buf_info[i]) {
1604 for (j = 0; j < rq->rx_ring[i].size; j++)
1605 BUG_ON(rq->buf_info[i][j].page != NULL);
1606 }
1607 }
1608
1609
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001610 for (i = 0; i < 2; i++) {
1611 if (rq->rx_ring[i].base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001612 dma_free_coherent(&adapter->pdev->dev,
1613 rq->rx_ring[i].size
1614 * sizeof(struct Vmxnet3_RxDesc),
1615 rq->rx_ring[i].base,
1616 rq->rx_ring[i].basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001617 rq->rx_ring[i].base = NULL;
1618 }
1619 rq->buf_info[i] = NULL;
1620 }
1621
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001622 if (rq->data_ring.base) {
1623 dma_free_coherent(&adapter->pdev->dev,
1624 rq->rx_ring[0].size * rq->data_ring.desc_size,
1625 rq->data_ring.base, rq->data_ring.basePA);
1626 rq->data_ring.base = NULL;
1627 }
1628
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001629 if (rq->comp_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001630 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1631 * sizeof(struct Vmxnet3_RxCompDesc),
1632 rq->comp_ring.base, rq->comp_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001633 rq->comp_ring.base = NULL;
1634 }
Andy Kingb0eb57c2013-08-23 09:33:49 -07001635
1636 if (rq->buf_info[0]) {
1637 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1638 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1639 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1640 rq->buf_info_pa);
1641 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001642}
1643
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001644void
1645vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1646{
1647 int i;
1648
1649 for (i = 0; i < adapter->num_rx_queues; i++) {
1650 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1651
1652 if (rq->data_ring.base) {
1653 dma_free_coherent(&adapter->pdev->dev,
1654 (rq->rx_ring[0].size *
1655 rq->data_ring.desc_size),
1656 rq->data_ring.base,
1657 rq->data_ring.basePA);
1658 rq->data_ring.base = NULL;
1659 rq->data_ring.desc_size = 0;
1660 }
1661 }
1662}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001663
1664static int
1665vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1666 struct vmxnet3_adapter *adapter)
1667{
1668 int i;
1669
1670 /* initialize buf_info */
1671 for (i = 0; i < rq->rx_ring[0].size; i++) {
1672
1673 /* 1st buf for a pkt is skbuff */
1674 if (i % adapter->rx_buf_per_pkt == 0) {
1675 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1676 rq->buf_info[0][i].len = adapter->skb_buf_size;
1677 } else { /* subsequent bufs for a pkt is frag */
1678 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1679 rq->buf_info[0][i].len = PAGE_SIZE;
1680 }
1681 }
1682 for (i = 0; i < rq->rx_ring[1].size; i++) {
1683 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1684 rq->buf_info[1][i].len = PAGE_SIZE;
1685 }
1686
1687 /* reset internal state and allocate buffers for both rings */
1688 for (i = 0; i < 2; i++) {
1689 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001690
1691 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1692 sizeof(struct Vmxnet3_RxDesc));
1693 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1694 }
1695 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1696 adapter) == 0) {
1697 /* at least has 1 rx buffer for the 1st ring */
1698 return -ENOMEM;
1699 }
1700 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1701
1702 /* reset the comp ring */
1703 rq->comp_ring.next2proc = 0;
1704 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1705 sizeof(struct Vmxnet3_RxCompDesc));
1706 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1707
1708 /* reset rxctx */
1709 rq->rx_ctx.skb = NULL;
1710
1711 /* stats are not reset */
1712 return 0;
1713}
1714
1715
1716static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001717vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1718{
1719 int i, err = 0;
1720
1721 for (i = 0; i < adapter->num_rx_queues; i++) {
1722 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1723 if (unlikely(err)) {
1724 dev_err(&adapter->netdev->dev, "%s: failed to "
1725 "initialize rx queue%i\n",
1726 adapter->netdev->name, i);
1727 break;
1728 }
1729 }
1730 return err;
1731
1732}
1733
1734
1735static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001736vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1737{
1738 int i;
1739 size_t sz;
1740 struct vmxnet3_rx_buf_info *bi;
1741
1742 for (i = 0; i < 2; i++) {
1743
1744 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001745 rq->rx_ring[i].base = dma_alloc_coherent(
1746 &adapter->pdev->dev, sz,
1747 &rq->rx_ring[i].basePA,
1748 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001749 if (!rq->rx_ring[i].base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001750 netdev_err(adapter->netdev,
1751 "failed to allocate rx ring %d\n", i);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001752 goto err;
1753 }
1754 }
1755
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001756 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1757 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1758 rq->data_ring.base =
1759 dma_alloc_coherent(&adapter->pdev->dev, sz,
1760 &rq->data_ring.basePA,
1761 GFP_KERNEL);
1762 if (!rq->data_ring.base) {
1763 netdev_err(adapter->netdev,
1764 "rx data ring will be disabled\n");
1765 adapter->rxdataring_enabled = false;
1766 }
1767 } else {
1768 rq->data_ring.base = NULL;
1769 rq->data_ring.desc_size = 0;
1770 }
1771
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001772 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001773 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1774 &rq->comp_ring.basePA,
1775 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001776 if (!rq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001777 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001778 goto err;
1779 }
1780
1781 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1782 rq->rx_ring[1].size);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001783 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1784 GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +00001785 if (!bi)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001786 goto err;
Joe Perchese404dec2012-01-29 12:56:23 +00001787
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001788 rq->buf_info[0] = bi;
1789 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1790
1791 return 0;
1792
1793err:
1794 vmxnet3_rq_destroy(rq, adapter);
1795 return -ENOMEM;
1796}
1797
1798
1799static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001800vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1801{
1802 int i, err = 0;
1803
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001804 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1805
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001806 for (i = 0; i < adapter->num_rx_queues; i++) {
1807 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1808 if (unlikely(err)) {
1809 dev_err(&adapter->netdev->dev,
1810 "%s: failed to create rx queue%i\n",
1811 adapter->netdev->name, i);
1812 goto err_out;
1813 }
1814 }
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07001815
1816 if (!adapter->rxdataring_enabled)
1817 vmxnet3_rq_destroy_all_rxdataring(adapter);
1818
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001819 return err;
1820err_out:
1821 vmxnet3_rq_destroy_all(adapter);
1822 return err;
1823
1824}
1825
1826/* Multiple queue aware polling function for tx and rx */
1827
1828static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001829vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1830{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001831 int rcd_done = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001832 if (unlikely(adapter->shared->ecr))
1833 vmxnet3_process_events(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001834 for (i = 0; i < adapter->num_tx_queues; i++)
1835 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001836
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001837 for (i = 0; i < adapter->num_rx_queues; i++)
1838 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1839 adapter, budget);
1840 return rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001841}
1842
1843
1844static int
1845vmxnet3_poll(struct napi_struct *napi, int budget)
1846{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001847 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1848 struct vmxnet3_rx_queue, napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001849 int rxd_done;
1850
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001851 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001852
1853 if (rxd_done < budget) {
1854 napi_complete(napi);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001855 vmxnet3_enable_all_intrs(rx_queue->adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001856 }
1857 return rxd_done;
1858}
1859
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001860/*
1861 * NAPI polling function for MSI-X mode with multiple Rx queues
1862 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1863 */
1864
1865static int
1866vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1867{
1868 struct vmxnet3_rx_queue *rq = container_of(napi,
1869 struct vmxnet3_rx_queue, napi);
1870 struct vmxnet3_adapter *adapter = rq->adapter;
1871 int rxd_done;
1872
1873 /* When sharing interrupt with corresponding tx queue, process
1874 * tx completions in that queue as well
1875 */
1876 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1877 struct vmxnet3_tx_queue *tq =
1878 &adapter->tx_queue[rq - adapter->rx_queue];
1879 vmxnet3_tq_tx_complete(tq, adapter);
1880 }
1881
1882 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1883
1884 if (rxd_done < budget) {
1885 napi_complete(napi);
1886 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1887 }
1888 return rxd_done;
1889}
1890
1891
1892#ifdef CONFIG_PCI_MSI
1893
1894/*
1895 * Handle completion interrupts on tx queues
1896 * Returns whether or not the intr is handled
1897 */
1898
1899static irqreturn_t
1900vmxnet3_msix_tx(int irq, void *data)
1901{
1902 struct vmxnet3_tx_queue *tq = data;
1903 struct vmxnet3_adapter *adapter = tq->adapter;
1904
1905 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1906 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1907
1908 /* Handle the case where only one irq is allocate for all tx queues */
1909 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1910 int i;
1911 for (i = 0; i < adapter->num_tx_queues; i++) {
1912 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1913 vmxnet3_tq_tx_complete(txq, adapter);
1914 }
1915 } else {
1916 vmxnet3_tq_tx_complete(tq, adapter);
1917 }
1918 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1919
1920 return IRQ_HANDLED;
1921}
1922
1923
1924/*
1925 * Handle completion interrupts on rx queues. Returns whether or not the
1926 * intr is handled
1927 */
1928
1929static irqreturn_t
1930vmxnet3_msix_rx(int irq, void *data)
1931{
1932 struct vmxnet3_rx_queue *rq = data;
1933 struct vmxnet3_adapter *adapter = rq->adapter;
1934
1935 /* disable intr if needed */
1936 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1937 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1938 napi_schedule(&rq->napi);
1939
1940 return IRQ_HANDLED;
1941}
1942
1943/*
1944 *----------------------------------------------------------------------------
1945 *
1946 * vmxnet3_msix_event --
1947 *
1948 * vmxnet3 msix event intr handler
1949 *
1950 * Result:
1951 * whether or not the intr is handled
1952 *
1953 *----------------------------------------------------------------------------
1954 */
1955
1956static irqreturn_t
1957vmxnet3_msix_event(int irq, void *data)
1958{
1959 struct net_device *dev = data;
1960 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1961
1962 /* disable intr if needed */
1963 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1964 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1965
1966 if (adapter->shared->ecr)
1967 vmxnet3_process_events(adapter);
1968
1969 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1970
1971 return IRQ_HANDLED;
1972}
1973
1974#endif /* CONFIG_PCI_MSI */
1975
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001976
1977/* Interrupt handler for vmxnet3 */
1978static irqreturn_t
1979vmxnet3_intr(int irq, void *dev_id)
1980{
1981 struct net_device *dev = dev_id;
1982 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1983
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001984 if (adapter->intr.type == VMXNET3_IT_INTX) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001985 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1986 if (unlikely(icr == 0))
1987 /* not ours */
1988 return IRQ_NONE;
1989 }
1990
1991
1992 /* disable intr if needed */
1993 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001994 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001995
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001996 napi_schedule(&adapter->rx_queue[0].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001997
1998 return IRQ_HANDLED;
1999}
2000
2001#ifdef CONFIG_NET_POLL_CONTROLLER
2002
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002003/* netpoll callback. */
2004static void
2005vmxnet3_netpoll(struct net_device *netdev)
2006{
2007 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002008
Neil Hormand25f06e2014-03-10 06:55:55 -04002009 switch (adapter->intr.type) {
Arnd Bergmann0a8d8c42014-03-13 10:44:34 +01002010#ifdef CONFIG_PCI_MSI
2011 case VMXNET3_IT_MSIX: {
2012 int i;
Neil Hormand25f06e2014-03-10 06:55:55 -04002013 for (i = 0; i < adapter->num_rx_queues; i++)
2014 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2015 break;
Arnd Bergmann0a8d8c42014-03-13 10:44:34 +01002016 }
2017#endif
Neil Hormand25f06e2014-03-10 06:55:55 -04002018 case VMXNET3_IT_MSI:
2019 default:
2020 vmxnet3_intr(0, adapter->netdev);
2021 break;
2022 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002023
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002024}
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002025#endif /* CONFIG_NET_POLL_CONTROLLER */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002026
2027static int
2028vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2029{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002030 struct vmxnet3_intr *intr = &adapter->intr;
2031 int err = 0, i;
2032 int vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002033
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002034#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002035 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002036 for (i = 0; i < adapter->num_tx_queues; i++) {
2037 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2038 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2039 adapter->netdev->name, vector);
2040 err = request_irq(
2041 intr->msix_entries[vector].vector,
2042 vmxnet3_msix_tx, 0,
2043 adapter->tx_queue[i].name,
2044 &adapter->tx_queue[i]);
2045 } else {
2046 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2047 adapter->netdev->name, vector);
2048 }
2049 if (err) {
2050 dev_err(&adapter->netdev->dev,
2051 "Failed to request irq for MSIX, %s, "
2052 "error %d\n",
2053 adapter->tx_queue[i].name, err);
2054 return err;
2055 }
2056
2057 /* Handle the case where only 1 MSIx was allocated for
2058 * all tx queues */
2059 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2060 for (; i < adapter->num_tx_queues; i++)
2061 adapter->tx_queue[i].comp_ring.intr_idx
2062 = vector;
2063 vector++;
2064 break;
2065 } else {
2066 adapter->tx_queue[i].comp_ring.intr_idx
2067 = vector++;
2068 }
2069 }
2070 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2071 vector = 0;
2072
2073 for (i = 0; i < adapter->num_rx_queues; i++) {
2074 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2075 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2076 adapter->netdev->name, vector);
2077 else
2078 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2079 adapter->netdev->name, vector);
2080 err = request_irq(intr->msix_entries[vector].vector,
2081 vmxnet3_msix_rx, 0,
2082 adapter->rx_queue[i].name,
2083 &(adapter->rx_queue[i]));
2084 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002085 netdev_err(adapter->netdev,
2086 "Failed to request irq for MSIX, "
2087 "%s, error %d\n",
2088 adapter->rx_queue[i].name, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002089 return err;
2090 }
2091
2092 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2093 }
2094
2095 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2096 adapter->netdev->name, vector);
2097 err = request_irq(intr->msix_entries[vector].vector,
2098 vmxnet3_msix_event, 0,
2099 intr->event_msi_vector_name, adapter->netdev);
2100 intr->event_intr_idx = vector;
2101
2102 } else if (intr->type == VMXNET3_IT_MSI) {
2103 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002104 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2105 adapter->netdev->name, adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002106 } else {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002107#endif
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002108 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002109 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2110 IRQF_SHARED, adapter->netdev->name,
2111 adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002112#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002113 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002114#endif
2115 intr->num_intrs = vector + 1;
2116 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002117 netdev_err(adapter->netdev,
2118 "Failed to request irq (intr type:%d), error %d\n",
2119 intr->type, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002120 } else {
2121 /* Number of rx queues will not change after this */
2122 for (i = 0; i < adapter->num_rx_queues; i++) {
2123 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2124 rq->qid = i;
2125 rq->qid2 = i + adapter->num_rx_queues;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002126 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002127 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002128
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002129 /* init our intr settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002130 for (i = 0; i < intr->num_intrs; i++)
2131 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2132 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2133 adapter->intr.event_intr_idx = 0;
2134 for (i = 0; i < adapter->num_tx_queues; i++)
2135 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2136 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2137 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002138
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002139 netdev_info(adapter->netdev,
2140 "intr type %u, mode %u, %u vectors allocated\n",
2141 intr->type, intr->mask_mode, intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002142 }
2143
2144 return err;
2145}
2146
2147
2148static void
2149vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2150{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002151 struct vmxnet3_intr *intr = &adapter->intr;
2152 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002153
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002154 switch (intr->type) {
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002155#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002156 case VMXNET3_IT_MSIX:
2157 {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002158 int i, vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002159
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002160 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2161 for (i = 0; i < adapter->num_tx_queues; i++) {
2162 free_irq(intr->msix_entries[vector++].vector,
2163 &(adapter->tx_queue[i]));
2164 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2165 break;
2166 }
2167 }
2168
2169 for (i = 0; i < adapter->num_rx_queues; i++) {
2170 free_irq(intr->msix_entries[vector++].vector,
2171 &(adapter->rx_queue[i]));
2172 }
2173
2174 free_irq(intr->msix_entries[vector].vector,
2175 adapter->netdev);
2176 BUG_ON(vector >= intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002177 break;
2178 }
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002179#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002180 case VMXNET3_IT_MSI:
2181 free_irq(adapter->pdev->irq, adapter->netdev);
2182 break;
2183 case VMXNET3_IT_INTX:
2184 free_irq(adapter->pdev->irq, adapter->netdev);
2185 break;
2186 default:
Sasha Levinc068e772012-11-08 10:23:03 +00002187 BUG();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002188 }
2189}
2190
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002191
2192static void
2193vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2194{
Jesse Gross72e85c42011-06-23 13:04:39 +00002195 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2196 u16 vid;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002197
Jesse Gross72e85c42011-06-23 13:04:39 +00002198 /* allow untagged pkts */
2199 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2200
2201 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2202 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002203}
2204
2205
Jiri Pirko8e586132011-12-08 19:52:37 -05002206static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00002207vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002208{
2209 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002210
Jesse Grossf6957f82011-08-07 23:15:47 +00002211 if (!(netdev->flags & IFF_PROMISC)) {
2212 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2213 unsigned long flags;
2214
2215 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2216 spin_lock_irqsave(&adapter->cmd_lock, flags);
2217 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2218 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2219 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2220 }
Jesse Gross72e85c42011-06-23 13:04:39 +00002221
2222 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05002223
2224 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002225}
2226
2227
Jiri Pirko8e586132011-12-08 19:52:37 -05002228static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00002229vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002230{
2231 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002232
Jesse Grossf6957f82011-08-07 23:15:47 +00002233 if (!(netdev->flags & IFF_PROMISC)) {
2234 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2235 unsigned long flags;
2236
2237 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2238 spin_lock_irqsave(&adapter->cmd_lock, flags);
2239 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2240 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2241 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2242 }
Jesse Gross72e85c42011-06-23 13:04:39 +00002243
2244 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05002245
2246 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002247}
2248
2249
2250static u8 *
2251vmxnet3_copy_mc(struct net_device *netdev)
2252{
2253 u8 *buf = NULL;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002254 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002255
2256 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2257 if (sz <= 0xffff) {
2258 /* We may be called with BH disabled */
2259 buf = kmalloc(sz, GFP_ATOMIC);
2260 if (buf) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002261 struct netdev_hw_addr *ha;
Jiri Pirko567ec872010-02-23 23:17:07 +00002262 int i = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002263
Jiri Pirko22bedad32010-04-01 21:22:57 +00002264 netdev_for_each_mc_addr(ha, netdev)
2265 memcpy(buf + i++ * ETH_ALEN, ha->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002266 ETH_ALEN);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002267 }
2268 }
2269 return buf;
2270}
2271
2272
2273static void
2274vmxnet3_set_mc(struct net_device *netdev)
2275{
2276 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002277 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002278 struct Vmxnet3_RxFilterConf *rxConf =
2279 &adapter->shared->devRead.rxFilterConf;
2280 u8 *new_table = NULL;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002281 dma_addr_t new_table_pa = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002282 u32 new_mode = VMXNET3_RXM_UCAST;
2283
Jesse Gross72e85c42011-06-23 13:04:39 +00002284 if (netdev->flags & IFF_PROMISC) {
2285 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2286 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2287
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002288 new_mode |= VMXNET3_RXM_PROMISC;
Jesse Gross72e85c42011-06-23 13:04:39 +00002289 } else {
2290 vmxnet3_restore_vlan(adapter);
2291 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002292
2293 if (netdev->flags & IFF_BROADCAST)
2294 new_mode |= VMXNET3_RXM_BCAST;
2295
2296 if (netdev->flags & IFF_ALLMULTI)
2297 new_mode |= VMXNET3_RXM_ALL_MULTI;
2298 else
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002299 if (!netdev_mc_empty(netdev)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002300 new_table = vmxnet3_copy_mc(netdev);
2301 if (new_table) {
Shrikrishna Khared37d5ec2015-11-13 15:42:10 -08002302 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2303
2304 rxConf->mfTableLen = cpu_to_le16(sz);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002305 new_table_pa = dma_map_single(
2306 &adapter->pdev->dev,
2307 new_table,
Shrikrishna Khared37d5ec2015-11-13 15:42:10 -08002308 sz,
Andy Kingb0eb57c2013-08-23 09:33:49 -07002309 PCI_DMA_TODEVICE);
Andy King4ad9a642014-09-02 13:13:44 -07002310 }
2311
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03002312 if (!dma_mapping_error(&adapter->pdev->dev,
2313 new_table_pa)) {
Andy King4ad9a642014-09-02 13:13:44 -07002314 new_mode |= VMXNET3_RXM_MCAST;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002315 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002316 } else {
Andy King4ad9a642014-09-02 13:13:44 -07002317 netdev_info(netdev,
2318 "failed to copy mcast list, setting ALL_MULTI\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002319 new_mode |= VMXNET3_RXM_ALL_MULTI;
2320 }
2321 }
2322
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002323 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2324 rxConf->mfTableLen = 0;
2325 rxConf->mfTablePA = 0;
2326 }
2327
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002328 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002329 if (new_mode != rxConf->rxMode) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002330 rxConf->rxMode = cpu_to_le32(new_mode);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002331 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2332 VMXNET3_CMD_UPDATE_RX_MODE);
Jesse Gross72e85c42011-06-23 13:04:39 +00002333 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2334 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002335 }
2336
2337 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2338 VMXNET3_CMD_UPDATE_MAC_FILTERS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002339 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002340
Andy King4ad9a642014-09-02 13:13:44 -07002341 if (new_table_pa)
Andy Kingb0eb57c2013-08-23 09:33:49 -07002342 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2343 rxConf->mfTableLen, PCI_DMA_TODEVICE);
Andy King4ad9a642014-09-02 13:13:44 -07002344 kfree(new_table);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002345}
2346
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002347void
2348vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2349{
2350 int i;
2351
2352 for (i = 0; i < adapter->num_rx_queues; i++)
2353 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2354}
2355
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002356
2357/*
2358 * Set up driver_shared based on settings in adapter.
2359 */
2360
2361static void
2362vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2363{
2364 struct Vmxnet3_DriverShared *shared = adapter->shared;
2365 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2366 struct Vmxnet3_TxQueueConf *tqc;
2367 struct Vmxnet3_RxQueueConf *rqc;
2368 int i;
2369
2370 memset(shared, 0, sizeof(*shared));
2371
2372 /* driver settings */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002373 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2374 devRead->misc.driverInfo.version = cpu_to_le32(
2375 VMXNET3_DRIVER_VERSION_NUM);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002376 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2377 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2378 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002379 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2380 *((u32 *)&devRead->misc.driverInfo.gos));
2381 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2382 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002383
Andy Kingb0eb57c2013-08-23 09:33:49 -07002384 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002385 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002386
2387 /* set up feature flags */
Michał Mirosława0d27302011-04-18 13:31:21 +00002388 if (adapter->netdev->features & NETIF_F_RXCSUM)
Harvey Harrison3843e512010-10-21 18:05:32 +00002389 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002390
Michał Mirosława0d27302011-04-18 13:31:21 +00002391 if (adapter->netdev->features & NETIF_F_LRO) {
Harvey Harrison3843e512010-10-21 18:05:32 +00002392 devRead->misc.uptFeatures |= UPT1_F_LRO;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002393 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002394 }
Patrick McHardyf6469682013-04-19 02:04:27 +00002395 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
Harvey Harrison3843e512010-10-21 18:05:32 +00002396 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002397
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002398 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2399 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2400 devRead->misc.queueDescLen = cpu_to_le32(
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002401 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2402 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002403
2404 /* tx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002405 devRead->misc.numTxQueues = adapter->num_tx_queues;
2406 for (i = 0; i < adapter->num_tx_queues; i++) {
2407 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2408 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2409 tqc = &adapter->tqd_start[i].conf;
2410 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2411 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2412 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002413 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002414 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2415 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -07002416 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002417 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2418 tqc->ddLen = cpu_to_le32(
2419 sizeof(struct vmxnet3_tx_buf_info) *
2420 tqc->txRingSize);
2421 tqc->intrIdx = tq->comp_ring.intr_idx;
2422 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002423
2424 /* rx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002425 devRead->misc.numRxQueues = adapter->num_rx_queues;
2426 for (i = 0; i < adapter->num_rx_queues; i++) {
2427 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2428 rqc = &adapter->rqd_start[i].conf;
2429 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2430 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2431 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002432 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002433 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2434 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2435 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2436 rqc->ddLen = cpu_to_le32(
2437 sizeof(struct vmxnet3_rx_buf_info) *
2438 (rqc->rxRingSize[0] +
2439 rqc->rxRingSize[1]));
2440 rqc->intrIdx = rq->comp_ring.intr_idx;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002441 if (VMXNET3_VERSION_GE_3(adapter)) {
2442 rqc->rxDataRingBasePA =
2443 cpu_to_le64(rq->data_ring.basePA);
2444 rqc->rxDataRingDescSize =
2445 cpu_to_le16(rq->data_ring.desc_size);
2446 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002447 }
2448
2449#ifdef VMXNET3_RSS
2450 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2451
2452 if (adapter->rss) {
2453 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
Stephen Hemminger66d35912013-01-15 07:28:34 +00002454
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002455 devRead->misc.uptFeatures |= UPT1_F_RSS;
2456 devRead->misc.numRxQueues = adapter->num_rx_queues;
2457 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2458 UPT1_RSS_HASH_TYPE_IPV4 |
2459 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2460 UPT1_RSS_HASH_TYPE_IPV6;
2461 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2462 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2463 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
Eric Dumazet6bf79cd2014-11-16 06:23:18 -08002464 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
Stephen Hemminger66d35912013-01-15 07:28:34 +00002465
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002466 for (i = 0; i < rssConf->indTableSize; i++)
Ben Hutchings278bc422011-12-15 13:56:49 +00002467 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2468 i, adapter->num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002469
2470 devRead->rssConfDesc.confVer = 1;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002471 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2472 devRead->rssConfDesc.confPA =
2473 cpu_to_le64(adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002474 }
2475
2476#endif /* VMXNET3_RSS */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002477
2478 /* intr settings */
2479 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2480 VMXNET3_IMM_AUTO;
2481 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2482 for (i = 0; i < adapter->intr.num_intrs; i++)
2483 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2484
2485 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
Ronghua Zang6929fe82010-07-15 22:18:47 -07002486 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002487
2488 /* rx filter settings */
2489 devRead->rxFilterConf.rxMode = 0;
2490 vmxnet3_restore_vlan(adapter);
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +00002491 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2492
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002493 /* the rest are already zeroed */
2494}
2495
Shrikrishna Khare4edef402016-06-16 10:51:57 -07002496static void
2497vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2498{
2499 struct Vmxnet3_DriverShared *shared = adapter->shared;
2500 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2501 unsigned long flags;
2502
2503 if (!VMXNET3_VERSION_GE_3(adapter))
2504 return;
2505
2506 spin_lock_irqsave(&adapter->cmd_lock, flags);
2507 cmdInfo->varConf.confVer = 1;
2508 cmdInfo->varConf.confLen =
2509 cpu_to_le32(sizeof(*adapter->coal_conf));
2510 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2511
2512 if (adapter->default_coal_mode) {
2513 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2514 VMXNET3_CMD_GET_COALESCE);
2515 } else {
2516 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2517 VMXNET3_CMD_SET_COALESCE);
2518 }
2519
2520 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2521}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002522
2523int
2524vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2525{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002526 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002527 u32 ret;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002528 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002529
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00002530 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002531 " ring sizes %u %u %u\n", adapter->netdev->name,
2532 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2533 adapter->tx_queue[0].tx_ring.size,
2534 adapter->rx_queue[0].rx_ring[0].size,
2535 adapter->rx_queue[0].rx_ring[1].size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002536
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002537 vmxnet3_tq_init_all(adapter);
2538 err = vmxnet3_rq_init_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002539 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002540 netdev_err(adapter->netdev,
2541 "Failed to init rx queue error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002542 goto rq_err;
2543 }
2544
2545 err = vmxnet3_request_irqs(adapter);
2546 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002547 netdev_err(adapter->netdev,
2548 "Failed to setup irq for error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002549 goto irq_err;
2550 }
2551
2552 vmxnet3_setup_driver_shared(adapter);
2553
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002554 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2555 adapter->shared_pa));
2556 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2557 adapter->shared_pa));
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002558 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002559 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2560 VMXNET3_CMD_ACTIVATE_DEV);
2561 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002562 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002563
2564 if (ret != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002565 netdev_err(adapter->netdev,
2566 "Failed to activate dev: error %u\n", ret);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002567 err = -EINVAL;
2568 goto activate_err;
2569 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002570
Shrikrishna Khare4edef402016-06-16 10:51:57 -07002571 vmxnet3_init_coalesce(adapter);
2572
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002573 for (i = 0; i < adapter->num_rx_queues; i++) {
2574 VMXNET3_WRITE_BAR0_REG(adapter,
2575 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2576 adapter->rx_queue[i].rx_ring[0].next2fill);
2577 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2578 (i * VMXNET3_REG_ALIGN)),
2579 adapter->rx_queue[i].rx_ring[1].next2fill);
2580 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002581
2582 /* Apply the rx filter settins last. */
2583 vmxnet3_set_mc(adapter->netdev);
2584
2585 /*
2586 * Check link state when first activating device. It will start the
2587 * tx queue if the link is up.
2588 */
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00002589 vmxnet3_check_link(adapter, true);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002590 for (i = 0; i < adapter->num_rx_queues; i++)
2591 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002592 vmxnet3_enable_all_intrs(adapter);
2593 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2594 return 0;
2595
2596activate_err:
2597 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2598 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2599 vmxnet3_free_irqs(adapter);
2600irq_err:
2601rq_err:
2602 /* free up buffers we allocated */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002603 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002604 return err;
2605}
2606
2607
2608void
2609vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2610{
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002611 unsigned long flags;
2612 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002613 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002614 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002615}
2616
2617
2618int
2619vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2620{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002621 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002622 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002623 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2624 return 0;
2625
2626
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002627 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002628 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2629 VMXNET3_CMD_QUIESCE_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002630 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002631 vmxnet3_disable_all_intrs(adapter);
2632
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002633 for (i = 0; i < adapter->num_rx_queues; i++)
2634 napi_disable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002635 netif_tx_disable(adapter->netdev);
2636 adapter->link_speed = 0;
2637 netif_carrier_off(adapter->netdev);
2638
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002639 vmxnet3_tq_cleanup_all(adapter);
2640 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002641 vmxnet3_free_irqs(adapter);
2642 return 0;
2643}
2644
2645
2646static void
2647vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2648{
2649 u32 tmp;
2650
2651 tmp = *(u32 *)mac;
2652 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2653
2654 tmp = (mac[5] << 8) | mac[4];
2655 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2656}
2657
2658
2659static int
2660vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2661{
2662 struct sockaddr *addr = p;
2663 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2664
2665 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2666 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2667
2668 return 0;
2669}
2670
2671
2672/* ==================== initialization and cleanup routines ============ */
2673
2674static int
2675vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2676{
2677 int err;
2678 unsigned long mmio_start, mmio_len;
2679 struct pci_dev *pdev = adapter->pdev;
2680
2681 err = pci_enable_device(pdev);
2682 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002683 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002684 return err;
2685 }
2686
2687 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2688 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002689 dev_err(&pdev->dev,
2690 "pci_set_consistent_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002691 err = -EIO;
2692 goto err_set_mask;
2693 }
2694 *dma64 = true;
2695 } else {
2696 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002697 dev_err(&pdev->dev,
2698 "pci_set_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002699 err = -EIO;
2700 goto err_set_mask;
2701 }
2702 *dma64 = false;
2703 }
2704
2705 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2706 vmxnet3_driver_name);
2707 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002708 dev_err(&pdev->dev,
2709 "Failed to request region for adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002710 goto err_set_mask;
2711 }
2712
2713 pci_set_master(pdev);
2714
2715 mmio_start = pci_resource_start(pdev, 0);
2716 mmio_len = pci_resource_len(pdev, 0);
2717 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2718 if (!adapter->hw_addr0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002719 dev_err(&pdev->dev, "Failed to map bar0\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002720 err = -EIO;
2721 goto err_ioremap;
2722 }
2723
2724 mmio_start = pci_resource_start(pdev, 1);
2725 mmio_len = pci_resource_len(pdev, 1);
2726 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2727 if (!adapter->hw_addr1) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002728 dev_err(&pdev->dev, "Failed to map bar1\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002729 err = -EIO;
2730 goto err_bar1;
2731 }
2732 return 0;
2733
2734err_bar1:
2735 iounmap(adapter->hw_addr0);
2736err_ioremap:
2737 pci_release_selected_regions(pdev, (1 << 2) - 1);
2738err_set_mask:
2739 pci_disable_device(pdev);
2740 return err;
2741}
2742
2743
2744static void
2745vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2746{
2747 BUG_ON(!adapter->pdev);
2748
2749 iounmap(adapter->hw_addr0);
2750 iounmap(adapter->hw_addr1);
2751 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2752 pci_disable_device(adapter->pdev);
2753}
2754
2755
2756static void
2757vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2758{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002759 size_t sz, i, ring0_size, ring1_size, comp_size;
2760 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2761
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002762
2763 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2764 VMXNET3_MAX_ETH_HDR_SIZE) {
2765 adapter->skb_buf_size = adapter->netdev->mtu +
2766 VMXNET3_MAX_ETH_HDR_SIZE;
2767 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2768 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2769
2770 adapter->rx_buf_per_pkt = 1;
2771 } else {
2772 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2773 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2774 VMXNET3_MAX_ETH_HDR_SIZE;
2775 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2776 }
2777
2778 /*
2779 * for simplicity, force the ring0 size to be a multiple of
2780 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2781 */
2782 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002783 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2784 ring0_size = (ring0_size + sz - 1) / sz * sz;
Shreyas Bhatewaraa53255d2011-01-14 14:59:25 +00002785 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002786 sz * sz);
2787 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
Shrikrishna Khare53831aa2015-01-06 09:20:15 -08002788 ring1_size = (ring1_size + sz - 1) / sz * sz;
2789 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2790 sz * sz);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002791 comp_size = ring0_size + ring1_size;
2792
2793 for (i = 0; i < adapter->num_rx_queues; i++) {
2794 rq = &adapter->rx_queue[i];
2795 rq->rx_ring[0].size = ring0_size;
2796 rq->rx_ring[1].size = ring1_size;
2797 rq->comp_ring.size = comp_size;
2798 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002799}
2800
2801
2802int
2803vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -07002804 u32 rx_ring_size, u32 rx_ring2_size,
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002805 u16 txdata_desc_size, u16 rxdata_desc_size)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002806{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002807 int err = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002808
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002809 for (i = 0; i < adapter->num_tx_queues; i++) {
2810 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2811 tq->tx_ring.size = tx_ring_size;
2812 tq->data_ring.size = tx_ring_size;
2813 tq->comp_ring.size = tx_ring_size;
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -07002814 tq->txdata_desc_size = txdata_desc_size;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002815 tq->shared = &adapter->tqd_start[i].ctrl;
2816 tq->stopped = true;
2817 tq->adapter = adapter;
2818 tq->qid = i;
2819 err = vmxnet3_tq_create(tq, adapter);
2820 /*
2821 * Too late to change num_tx_queues. We cannot do away with
2822 * lesser number of queues than what we asked for
2823 */
2824 if (err)
2825 goto queue_err;
2826 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002827
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002828 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2829 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002830 vmxnet3_adjust_rx_ring_size(adapter);
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002831
2832 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002833 for (i = 0; i < adapter->num_rx_queues; i++) {
2834 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2835 /* qid and qid2 for rx queues will be assigned later when num
2836 * of rx queues is finalized after allocating intrs */
2837 rq->shared = &adapter->rqd_start[i].ctrl;
2838 rq->adapter = adapter;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002839 rq->data_ring.desc_size = rxdata_desc_size;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002840 err = vmxnet3_rq_create(rq, adapter);
2841 if (err) {
2842 if (i == 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002843 netdev_err(adapter->netdev,
2844 "Could not allocate any rx queues. "
2845 "Aborting.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002846 goto queue_err;
2847 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002848 netdev_info(adapter->netdev,
2849 "Number of rx queues changed "
2850 "to : %d.\n", i);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002851 adapter->num_rx_queues = i;
2852 err = 0;
2853 break;
2854 }
2855 }
2856 }
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002857
2858 if (!adapter->rxdataring_enabled)
2859 vmxnet3_rq_destroy_all_rxdataring(adapter);
2860
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002861 return err;
2862queue_err:
2863 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002864 return err;
2865}
2866
2867static int
2868vmxnet3_open(struct net_device *netdev)
2869{
2870 struct vmxnet3_adapter *adapter;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002871 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002872
2873 adapter = netdev_priv(netdev);
2874
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002875 for (i = 0; i < adapter->num_tx_queues; i++)
2876 spin_lock_init(&adapter->tx_queue[i].tx_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002877
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -07002878 if (VMXNET3_VERSION_GE_3(adapter)) {
2879 unsigned long flags;
2880 u16 txdata_desc_size;
2881
2882 spin_lock_irqsave(&adapter->cmd_lock, flags);
2883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2884 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2885 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2886 VMXNET3_REG_CMD);
2887 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2888
2889 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2890 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2891 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2892 adapter->txdata_desc_size =
2893 sizeof(struct Vmxnet3_TxDataDesc);
2894 } else {
2895 adapter->txdata_desc_size = txdata_desc_size;
2896 }
2897 } else {
2898 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2899 }
2900
2901 err = vmxnet3_create_queues(adapter,
2902 adapter->tx_ring_size,
Neil Hormanf00e2b02014-06-13 10:03:21 -04002903 adapter->rx_ring_size,
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -07002904 adapter->rx_ring2_size,
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07002905 adapter->txdata_desc_size,
2906 adapter->rxdata_desc_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002907 if (err)
2908 goto queue_err;
2909
2910 err = vmxnet3_activate_dev(adapter);
2911 if (err)
2912 goto activate_err;
2913
2914 return 0;
2915
2916activate_err:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002917 vmxnet3_rq_destroy_all(adapter);
2918 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002919queue_err:
2920 return err;
2921}
2922
2923
2924static int
2925vmxnet3_close(struct net_device *netdev)
2926{
2927 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2928
2929 /*
2930 * Reset_work may be in the middle of resetting the device, wait for its
2931 * completion.
2932 */
2933 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2934 msleep(1);
2935
2936 vmxnet3_quiesce_dev(adapter);
2937
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002938 vmxnet3_rq_destroy_all(adapter);
2939 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002940
2941 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2942
2943
2944 return 0;
2945}
2946
2947
2948void
2949vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2950{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002951 int i;
2952
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002953 /*
2954 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2955 * vmxnet3_close() will deadlock.
2956 */
2957 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2958
2959 /* we need to enable NAPI, otherwise dev_close will deadlock */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002960 for (i = 0; i < adapter->num_rx_queues; i++)
2961 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002962 dev_close(adapter->netdev);
2963}
2964
2965
2966static int
2967vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2968{
2969 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2970 int err = 0;
2971
2972 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2973 return -EINVAL;
2974
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002975 netdev->mtu = new_mtu;
2976
2977 /*
2978 * Reset_work may be in the middle of resetting the device, wait for its
2979 * completion.
2980 */
2981 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2982 msleep(1);
2983
2984 if (netif_running(netdev)) {
2985 vmxnet3_quiesce_dev(adapter);
2986 vmxnet3_reset_dev(adapter);
2987
2988 /* we need to re-create the rx queue based on the new mtu */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002989 vmxnet3_rq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002990 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002991 err = vmxnet3_rq_create_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002992 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002993 netdev_err(netdev,
2994 "failed to re-create rx queues, "
2995 " error %d. Closing it.\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002996 goto out;
2997 }
2998
2999 err = vmxnet3_activate_dev(adapter);
3000 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003001 netdev_err(netdev,
3002 "failed to re-activate, error %d. "
3003 "Closing it\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003004 goto out;
3005 }
3006 }
3007
3008out:
3009 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3010 if (err)
3011 vmxnet3_force_close(adapter);
3012
3013 return err;
3014}
3015
3016
3017static void
3018vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3019{
3020 struct net_device *netdev = adapter->netdev;
3021
Michał Mirosława0d27302011-04-18 13:31:21 +00003022 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003023 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3024 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
Jesse Gross72e85c42011-06-23 13:04:39 +00003025 NETIF_F_LRO;
Michał Mirosława0d27302011-04-18 13:31:21 +00003026 if (dma64)
Shreyas Bhatewaraebbf9292011-07-20 17:21:51 +00003027 netdev->hw_features |= NETIF_F_HIGHDMA;
Jesse Gross72e85c42011-06-23 13:04:39 +00003028 netdev->vlan_features = netdev->hw_features &
Patrick McHardyf6469682013-04-19 02:04:27 +00003029 ~(NETIF_F_HW_VLAN_CTAG_TX |
3030 NETIF_F_HW_VLAN_CTAG_RX);
3031 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003032}
3033
3034
3035static void
3036vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3037{
3038 u32 tmp;
3039
3040 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3041 *(u32 *)mac = tmp;
3042
3043 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3044 mac[4] = tmp & 0xff;
3045 mac[5] = (tmp >> 8) & 0xff;
3046}
3047
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003048#ifdef CONFIG_PCI_MSI
3049
3050/*
3051 * Enable MSIx vectors.
3052 * Returns :
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003053 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003054 * were enabled.
3055 * number of vectors which were enabled otherwise (this number is greater
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003056 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3057 */
3058
3059static int
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003060vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003061{
Alexander Gordeevc0a1be32014-02-18 11:12:03 +01003062 int ret = pci_enable_msix_range(adapter->pdev,
3063 adapter->intr.msix_entries, nvec, nvec);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003064
Alexander Gordeevc0a1be32014-02-18 11:12:03 +01003065 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3066 dev_err(&adapter->netdev->dev,
3067 "Failed to enable %d MSI-X, trying %d\n",
3068 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3069
3070 ret = pci_enable_msix_range(adapter->pdev,
3071 adapter->intr.msix_entries,
3072 VMXNET3_LINUX_MIN_MSIX_VECT,
3073 VMXNET3_LINUX_MIN_MSIX_VECT);
3074 }
3075
3076 if (ret < 0) {
3077 dev_err(&adapter->netdev->dev,
3078 "Failed to enable MSI-X, error: %d\n", ret);
3079 }
3080
3081 return ret;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003082}
3083
3084
3085#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003086
3087static void
3088vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3089{
3090 u32 cfg;
Roland Dreiere328d412011-05-06 08:32:53 +00003091 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003092
3093 /* intr settings */
Roland Dreiere328d412011-05-06 08:32:53 +00003094 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003095 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3096 VMXNET3_CMD_GET_CONF_INTR);
3097 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Roland Dreiere328d412011-05-06 08:32:53 +00003098 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003099 adapter->intr.type = cfg & 0x3;
3100 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3101
3102 if (adapter->intr.type == VMXNET3_IT_AUTO) {
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003103 adapter->intr.type = VMXNET3_IT_MSIX;
3104 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003105
Randy Dunlap8f7e5242009-10-14 20:38:58 -07003106#ifdef CONFIG_PCI_MSI
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003107 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003108 int i, nvec;
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003109
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003110 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3111 1 : adapter->num_tx_queues;
3112 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3113 0 : adapter->num_rx_queues;
3114 nvec += 1; /* for link event */
3115 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3116 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003117
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003118 for (i = 0; i < nvec; i++)
3119 adapter->intr.msix_entries[i].entry = i;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003120
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003121 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3122 if (nvec < 0)
3123 goto msix_err;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003124
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003125 /* If we cannot allocate one MSIx vector per queue
3126 * then limit the number of rx queues to 1
3127 */
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003128 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003129 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00003130 || adapter->num_rx_queues != 1) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003131 adapter->share_intr = VMXNET3_INTR_TXSHARE;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003132 netdev_err(adapter->netdev,
3133 "Number of rx queues : 1\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003134 adapter->num_rx_queues = 1;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003135 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003136 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003137
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003138 adapter->intr.num_intrs = nvec;
3139 return;
3140
3141msix_err:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003142 /* If we cannot allocate MSIx vectors use only one rx queue */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00003143 dev_info(&adapter->pdev->dev,
3144 "Failed to enable MSI-X, error %d. "
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003145 "Limiting #rx queues to 1, try MSI.\n", nvec);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003146
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003147 adapter->intr.type = VMXNET3_IT_MSI;
3148 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003149
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003150 if (adapter->intr.type == VMXNET3_IT_MSI) {
Alexander Gordeevb60b8692014-02-18 11:12:02 +01003151 if (!pci_enable_msi(adapter->pdev)) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003152 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003153 adapter->intr.num_intrs = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003154 return;
3155 }
3156 }
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00003157#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003158
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003159 adapter->num_rx_queues = 1;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003160 dev_info(&adapter->netdev->dev,
3161 "Using INTx interrupt, #Rx queues: 1.\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003162 adapter->intr.type = VMXNET3_IT_INTX;
3163
3164 /* INT-X related setting */
3165 adapter->intr.num_intrs = 1;
3166}
3167
3168
3169static void
3170vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3171{
3172 if (adapter->intr.type == VMXNET3_IT_MSIX)
3173 pci_disable_msix(adapter->pdev);
3174 else if (adapter->intr.type == VMXNET3_IT_MSI)
3175 pci_disable_msi(adapter->pdev);
3176 else
3177 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3178}
3179
3180
3181static void
3182vmxnet3_tx_timeout(struct net_device *netdev)
3183{
3184 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3185 adapter->tx_timeout_count++;
3186
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003187 netdev_err(adapter->netdev, "tx hang\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003188 schedule_work(&adapter->work);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003189 netif_wake_queue(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003190}
3191
3192
3193static void
3194vmxnet3_reset_work(struct work_struct *data)
3195{
3196 struct vmxnet3_adapter *adapter;
3197
3198 adapter = container_of(data, struct vmxnet3_adapter, work);
3199
3200 /* if another thread is resetting the device, no need to proceed */
3201 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3202 return;
3203
3204 /* if the device is closed, we must leave it alone */
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00003205 rtnl_lock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003206 if (netif_running(adapter->netdev)) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003207 netdev_notice(adapter->netdev, "resetting\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003208 vmxnet3_quiesce_dev(adapter);
3209 vmxnet3_reset_dev(adapter);
3210 vmxnet3_activate_dev(adapter);
3211 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003212 netdev_info(adapter->netdev, "already closed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003213 }
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00003214 rtnl_unlock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003215
3216 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3217}
3218
3219
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003220static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003221vmxnet3_probe_device(struct pci_dev *pdev,
3222 const struct pci_device_id *id)
3223{
3224 static const struct net_device_ops vmxnet3_netdev_ops = {
3225 .ndo_open = vmxnet3_open,
3226 .ndo_stop = vmxnet3_close,
3227 .ndo_start_xmit = vmxnet3_xmit_frame,
3228 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3229 .ndo_change_mtu = vmxnet3_change_mtu,
Michał Mirosława0d27302011-04-18 13:31:21 +00003230 .ndo_set_features = vmxnet3_set_features,
stephen hemminger95305f62011-06-08 14:53:57 +00003231 .ndo_get_stats64 = vmxnet3_get_stats64,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003232 .ndo_tx_timeout = vmxnet3_tx_timeout,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00003233 .ndo_set_rx_mode = vmxnet3_set_mc,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003234 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3235 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3236#ifdef CONFIG_NET_POLL_CONTROLLER
3237 .ndo_poll_controller = vmxnet3_netpoll,
3238#endif
3239 };
3240 int err;
3241 bool dma64 = false; /* stupid gcc */
3242 u32 ver;
3243 struct net_device *netdev;
3244 struct vmxnet3_adapter *adapter;
3245 u8 mac[ETH_ALEN];
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003246 int size;
3247 int num_tx_queues;
3248 int num_rx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003249
Shreyas Bhatewarae154b632011-05-10 06:13:56 +00003250 if (!pci_msi_enabled())
3251 enable_mq = 0;
3252
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003253#ifdef VMXNET3_RSS
3254 if (enable_mq)
3255 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3256 (int)num_online_cpus());
3257 else
3258#endif
3259 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003260 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003261
3262 if (enable_mq)
3263 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3264 (int)num_online_cpus());
3265 else
3266 num_tx_queues = 1;
3267
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003268 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003269 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3270 max(num_tx_queues, num_rx_queues));
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003271 dev_info(&pdev->dev,
3272 "# of Tx queues : %d, # of Rx queues : %d\n",
3273 num_tx_queues, num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003274
Joe Perches41de8d42012-01-29 13:47:52 +00003275 if (!netdev)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003276 return -ENOMEM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003277
3278 pci_set_drvdata(pdev, netdev);
3279 adapter = netdev_priv(netdev);
3280 adapter->netdev = netdev;
3281 adapter->pdev = pdev;
3282
Neil Hormanf00e2b02014-06-13 10:03:21 -04003283 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3284 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
Shrikrishna Khare53831aa2015-01-06 09:20:15 -08003285 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
Neil Hormanf00e2b02014-06-13 10:03:21 -04003286
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003287 spin_lock_init(&adapter->cmd_lock);
Andy Kingb0eb57c2013-08-23 09:33:49 -07003288 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3289 sizeof(struct vmxnet3_adapter),
3290 PCI_DMA_TODEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03003291 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3292 dev_err(&pdev->dev, "Failed to map dma\n");
3293 err = -EFAULT;
3294 goto err_dma_map;
3295 }
Andy Kingb0eb57c2013-08-23 09:33:49 -07003296 adapter->shared = dma_alloc_coherent(
3297 &adapter->pdev->dev,
3298 sizeof(struct Vmxnet3_DriverShared),
3299 &adapter->shared_pa, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003300 if (!adapter->shared) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003301 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003302 err = -ENOMEM;
3303 goto err_alloc_shared;
3304 }
3305
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003306 adapter->num_rx_queues = num_rx_queues;
3307 adapter->num_tx_queues = num_tx_queues;
Bhavesh Davdae4fabf22013-03-06 12:04:53 +00003308 adapter->rx_buf_per_pkt = 1;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003309
3310 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3311 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
Andy Kingb0eb57c2013-08-23 09:33:49 -07003312 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3313 &adapter->queue_desc_pa,
3314 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003315
3316 if (!adapter->tqd_start) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003317 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003318 err = -ENOMEM;
3319 goto err_alloc_queue_desc;
3320 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003321 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
stephen hemminger96800ee2012-11-13 13:53:28 +00003322 adapter->num_tx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003323
Andy Kingb0eb57c2013-08-23 09:33:49 -07003324 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3325 sizeof(struct Vmxnet3_PMConf),
3326 &adapter->pm_conf_pa,
3327 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003328 if (adapter->pm_conf == NULL) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003329 err = -ENOMEM;
3330 goto err_alloc_pm;
3331 }
3332
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003333#ifdef VMXNET3_RSS
3334
Andy Kingb0eb57c2013-08-23 09:33:49 -07003335 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3336 sizeof(struct UPT1_RSSConf),
3337 &adapter->rss_conf_pa,
3338 GFP_KERNEL);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003339 if (adapter->rss_conf == NULL) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003340 err = -ENOMEM;
3341 goto err_alloc_rss;
3342 }
3343#endif /* VMXNET3_RSS */
3344
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003345 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3346 if (err < 0)
3347 goto err_alloc_pci;
3348
3349 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
Shrikrishna Khare6af9d782016-06-16 10:51:59 -07003350 if (ver & (1 << VMXNET3_REV_3)) {
3351 VMXNET3_WRITE_BAR1_REG(adapter,
3352 VMXNET3_REG_VRRS,
3353 1 << VMXNET3_REV_3);
3354 adapter->version = VMXNET3_REV_3 + 1;
3355 } else if (ver & (1 << VMXNET3_REV_2)) {
Shrikrishna Khare190af102016-06-16 10:51:53 -07003356 VMXNET3_WRITE_BAR1_REG(adapter,
3357 VMXNET3_REG_VRRS,
3358 1 << VMXNET3_REV_2);
3359 adapter->version = VMXNET3_REV_2 + 1;
3360 } else if (ver & (1 << VMXNET3_REV_1)) {
3361 VMXNET3_WRITE_BAR1_REG(adapter,
3362 VMXNET3_REG_VRRS,
3363 1 << VMXNET3_REV_1);
3364 adapter->version = VMXNET3_REV_1 + 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003365 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003366 dev_err(&pdev->dev,
3367 "Incompatible h/w version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003368 err = -EBUSY;
3369 goto err_ver;
3370 }
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -07003371 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003372
3373 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3374 if (ver & 1) {
3375 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3376 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003377 dev_err(&pdev->dev,
3378 "Incompatible upt version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003379 err = -EBUSY;
3380 goto err_ver;
3381 }
3382
Shrikrishna Khare4edef402016-06-16 10:51:57 -07003383 if (VMXNET3_VERSION_GE_3(adapter)) {
3384 adapter->coal_conf =
3385 dma_alloc_coherent(&adapter->pdev->dev,
3386 sizeof(struct Vmxnet3_CoalesceScheme)
3387 ,
3388 &adapter->coal_conf_pa,
3389 GFP_KERNEL);
3390 if (!adapter->coal_conf) {
3391 err = -ENOMEM;
3392 goto err_ver;
3393 }
3394 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
3395 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3396 adapter->default_coal_mode = true;
3397 }
3398
Shreyas Bhatewarae101e7d2011-07-20 16:01:11 +00003399 SET_NETDEV_DEV(netdev, &pdev->dev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003400 vmxnet3_declare_features(adapter, dma64);
3401
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -07003402 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3403 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3404
Stephen Hemminger4db37a72013-01-15 07:28:33 +00003405 if (adapter->num_tx_queues == adapter->num_rx_queues)
3406 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3407 else
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003408 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3409
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003410 vmxnet3_alloc_intr_resources(adapter);
3411
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003412#ifdef VMXNET3_RSS
3413 if (adapter->num_rx_queues > 1 &&
3414 adapter->intr.type == VMXNET3_IT_MSIX) {
3415 adapter->rss = true;
Stephen Hemminger7db11f72013-01-15 07:28:35 +00003416 netdev->hw_features |= NETIF_F_RXHASH;
3417 netdev->features |= NETIF_F_RXHASH;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003418 dev_dbg(&pdev->dev, "RSS is enabled.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003419 } else {
3420 adapter->rss = false;
3421 }
3422#endif
3423
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003424 vmxnet3_read_mac_addr(adapter, mac);
3425 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3426
3427 netdev->netdev_ops = &vmxnet3_netdev_ops;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003428 vmxnet3_set_ethtool_ops(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003429 netdev->watchdog_timeo = 5 * HZ;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003430
3431 INIT_WORK(&adapter->work, vmxnet3_reset_work);
Steve Hodgsone3bc4ff2012-08-14 17:13:36 +01003432 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003433
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003434 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3435 int i;
3436 for (i = 0; i < adapter->num_rx_queues; i++) {
3437 netif_napi_add(adapter->netdev,
3438 &adapter->rx_queue[i].napi,
3439 vmxnet3_poll_rx_only, 64);
3440 }
3441 } else {
3442 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3443 vmxnet3_poll, 64);
3444 }
3445
3446 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3447 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3448
Neil Horman6cdd20c2013-01-29 16:15:45 -05003449 netif_carrier_off(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003450 err = register_netdev(netdev);
3451
3452 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003453 dev_err(&pdev->dev, "Failed to register adapter\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003454 goto err_register;
3455 }
3456
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00003457 vmxnet3_check_link(adapter, false);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003458 return 0;
3459
3460err_register:
Shrikrishna Khare4edef402016-06-16 10:51:57 -07003461 if (VMXNET3_VERSION_GE_3(adapter)) {
3462 dma_free_coherent(&adapter->pdev->dev,
3463 sizeof(struct Vmxnet3_CoalesceScheme),
3464 adapter->coal_conf, adapter->coal_conf_pa);
3465 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003466 vmxnet3_free_intr_resources(adapter);
3467err_ver:
3468 vmxnet3_free_pci_resources(adapter);
3469err_alloc_pci:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003470#ifdef VMXNET3_RSS
Andy Kingb0eb57c2013-08-23 09:33:49 -07003471 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3472 adapter->rss_conf, adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003473err_alloc_rss:
3474#endif
Andy Kingb0eb57c2013-08-23 09:33:49 -07003475 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3476 adapter->pm_conf, adapter->pm_conf_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003477err_alloc_pm:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003478 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3479 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003480err_alloc_queue_desc:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003481 dma_free_coherent(&adapter->pdev->dev,
3482 sizeof(struct Vmxnet3_DriverShared),
3483 adapter->shared, adapter->shared_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003484err_alloc_shared:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003485 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3486 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
Alexey Khoroshilov5738a092015-11-28 01:29:30 +03003487err_dma_map:
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003488 free_netdev(netdev);
3489 return err;
3490}
3491
3492
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003493static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003494vmxnet3_remove_device(struct pci_dev *pdev)
3495{
3496 struct net_device *netdev = pci_get_drvdata(pdev);
3497 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003498 int size = 0;
3499 int num_rx_queues;
3500
3501#ifdef VMXNET3_RSS
3502 if (enable_mq)
3503 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3504 (int)num_online_cpus());
3505 else
3506#endif
3507 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003508 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003509
Tejun Heo23f333a2010-12-12 16:45:14 +01003510 cancel_work_sync(&adapter->work);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003511
3512 unregister_netdev(netdev);
3513
3514 vmxnet3_free_intr_resources(adapter);
3515 vmxnet3_free_pci_resources(adapter);
Shrikrishna Khare4edef402016-06-16 10:51:57 -07003516 if (VMXNET3_VERSION_GE_3(adapter)) {
3517 dma_free_coherent(&adapter->pdev->dev,
3518 sizeof(struct Vmxnet3_CoalesceScheme),
3519 adapter->coal_conf, adapter->coal_conf_pa);
3520 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003521#ifdef VMXNET3_RSS
Andy Kingb0eb57c2013-08-23 09:33:49 -07003522 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3523 adapter->rss_conf, adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003524#endif
Andy Kingb0eb57c2013-08-23 09:33:49 -07003525 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3526 adapter->pm_conf, adapter->pm_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003527
3528 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3529 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
Andy Kingb0eb57c2013-08-23 09:33:49 -07003530 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3531 adapter->queue_desc_pa);
3532 dma_free_coherent(&adapter->pdev->dev,
3533 sizeof(struct Vmxnet3_DriverShared),
3534 adapter->shared, adapter->shared_pa);
3535 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3536 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003537 free_netdev(netdev);
3538}
3539
Shreyas Bhatewarae9ba47b2015-06-19 13:36:02 -07003540static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3541{
3542 struct net_device *netdev = pci_get_drvdata(pdev);
3543 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3544 unsigned long flags;
3545
3546 /* Reset_work may be in the middle of resetting the device, wait for its
3547 * completion.
3548 */
3549 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3550 msleep(1);
3551
3552 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3553 &adapter->state)) {
3554 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3555 return;
3556 }
3557 spin_lock_irqsave(&adapter->cmd_lock, flags);
3558 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3559 VMXNET3_CMD_QUIESCE_DEV);
3560 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3561 vmxnet3_disable_all_intrs(adapter);
3562
3563 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3564}
3565
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003566
3567#ifdef CONFIG_PM
3568
3569static int
3570vmxnet3_suspend(struct device *device)
3571{
3572 struct pci_dev *pdev = to_pci_dev(device);
3573 struct net_device *netdev = pci_get_drvdata(pdev);
3574 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3575 struct Vmxnet3_PMConf *pmConf;
3576 struct ethhdr *ehdr;
3577 struct arphdr *ahdr;
3578 u8 *arpreq;
3579 struct in_device *in_dev;
3580 struct in_ifaddr *ifa;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003581 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003582 int i = 0;
3583
3584 if (!netif_running(netdev))
3585 return 0;
3586
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003587 for (i = 0; i < adapter->num_rx_queues; i++)
3588 napi_disable(&adapter->rx_queue[i].napi);
3589
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003590 vmxnet3_disable_all_intrs(adapter);
3591 vmxnet3_free_irqs(adapter);
3592 vmxnet3_free_intr_resources(adapter);
3593
3594 netif_device_detach(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003595 netif_tx_stop_all_queues(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003596
3597 /* Create wake-up filters. */
3598 pmConf = adapter->pm_conf;
3599 memset(pmConf, 0, sizeof(*pmConf));
3600
3601 if (adapter->wol & WAKE_UCAST) {
3602 pmConf->filters[i].patternSize = ETH_ALEN;
3603 pmConf->filters[i].maskSize = 1;
3604 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3605 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3606
Harvey Harrison3843e512010-10-21 18:05:32 +00003607 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003608 i++;
3609 }
3610
3611 if (adapter->wol & WAKE_ARP) {
3612 in_dev = in_dev_get(netdev);
3613 if (!in_dev)
3614 goto skip_arp;
3615
3616 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3617 if (!ifa)
3618 goto skip_arp;
3619
3620 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3621 sizeof(struct arphdr) + /* ARP header */
3622 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3623 2 * sizeof(u32); /*2 IPv4 addresses */
3624 pmConf->filters[i].maskSize =
3625 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3626
3627 /* ETH_P_ARP in Ethernet header. */
3628 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3629 ehdr->h_proto = htons(ETH_P_ARP);
3630
3631 /* ARPOP_REQUEST in ARP header. */
3632 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3633 ahdr->ar_op = htons(ARPOP_REQUEST);
3634 arpreq = (u8 *)(ahdr + 1);
3635
3636 /* The Unicast IPv4 address in 'tip' field. */
3637 arpreq += 2 * ETH_ALEN + sizeof(u32);
3638 *(u32 *)arpreq = ifa->ifa_address;
3639
3640 /* The mask for the relevant bits. */
3641 pmConf->filters[i].mask[0] = 0x00;
3642 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3643 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3644 pmConf->filters[i].mask[3] = 0x00;
3645 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3646 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3647 in_dev_put(in_dev);
3648
Harvey Harrison3843e512010-10-21 18:05:32 +00003649 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003650 i++;
3651 }
3652
3653skip_arp:
3654 if (adapter->wol & WAKE_MAGIC)
Harvey Harrison3843e512010-10-21 18:05:32 +00003655 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003656
3657 pmConf->numFilters = i;
3658
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003659 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3660 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3661 *pmConf));
Andy Kingb0eb57c2013-08-23 09:33:49 -07003662 adapter->shared->devRead.pmConfDesc.confPA =
3663 cpu_to_le64(adapter->pm_conf_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003664
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003665 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003666 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3667 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003668 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003669
3670 pci_save_state(pdev);
3671 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3672 adapter->wol);
3673 pci_disable_device(pdev);
3674 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3675
3676 return 0;
3677}
3678
3679
3680static int
3681vmxnet3_resume(struct device *device)
3682{
Shrikrishna Khare5ec82c12015-01-09 15:19:14 -08003683 int err;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003684 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003685 struct pci_dev *pdev = to_pci_dev(device);
3686 struct net_device *netdev = pci_get_drvdata(pdev);
3687 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003688
3689 if (!netif_running(netdev))
3690 return 0;
3691
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003692 pci_set_power_state(pdev, PCI_D0);
3693 pci_restore_state(pdev);
3694 err = pci_enable_device_mem(pdev);
3695 if (err != 0)
3696 return err;
3697
3698 pci_enable_wake(pdev, PCI_D0, 0);
3699
Shrikrishna Khare5ec82c12015-01-09 15:19:14 -08003700 vmxnet3_alloc_intr_resources(adapter);
3701
3702 /* During hibernate and suspend, device has to be reinitialized as the
3703 * device state need not be preserved.
3704 */
3705
3706 /* Need not check adapter state as other reset tasks cannot run during
3707 * device resume.
3708 */
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003709 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003710 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
Shrikrishna Khare5ec82c12015-01-09 15:19:14 -08003711 VMXNET3_CMD_QUIESCE_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003712 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shrikrishna Khare5ec82c12015-01-09 15:19:14 -08003713 vmxnet3_tq_cleanup_all(adapter);
3714 vmxnet3_rq_cleanup_all(adapter);
3715
3716 vmxnet3_reset_dev(adapter);
3717 err = vmxnet3_activate_dev(adapter);
3718 if (err != 0) {
3719 netdev_err(netdev,
3720 "failed to re-activate on resume, error: %d", err);
3721 vmxnet3_force_close(adapter);
3722 return err;
3723 }
3724 netif_device_attach(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003725
3726 return 0;
3727}
3728
Alexey Dobriyan47145212009-12-14 18:00:08 -08003729static const struct dev_pm_ops vmxnet3_pm_ops = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003730 .suspend = vmxnet3_suspend,
3731 .resume = vmxnet3_resume,
Shrikrishna Khare5ec82c12015-01-09 15:19:14 -08003732 .freeze = vmxnet3_suspend,
3733 .restore = vmxnet3_resume,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003734};
3735#endif
3736
3737static struct pci_driver vmxnet3_driver = {
3738 .name = vmxnet3_driver_name,
3739 .id_table = vmxnet3_pciid_table,
3740 .probe = vmxnet3_probe_device,
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003741 .remove = vmxnet3_remove_device,
Shreyas Bhatewarae9ba47b2015-06-19 13:36:02 -07003742 .shutdown = vmxnet3_shutdown_device,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003743#ifdef CONFIG_PM
3744 .driver.pm = &vmxnet3_pm_ops,
3745#endif
3746};
3747
3748
3749static int __init
3750vmxnet3_init_module(void)
3751{
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003752 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003753 VMXNET3_DRIVER_VERSION_REPORT);
3754 return pci_register_driver(&vmxnet3_driver);
3755}
3756
3757module_init(vmxnet3_init_module);
3758
3759
3760static void
3761vmxnet3_exit_module(void)
3762{
3763 pci_unregister_driver(&vmxnet3_driver);
3764}
3765
3766module_exit(vmxnet3_exit_module);
3767
3768MODULE_AUTHOR("VMware, Inc.");
3769MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3770MODULE_LICENSE("GPL v2");
3771MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);